diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 258902db14bf1872c35faf2d6919e2020909d26c..8355e79350b79cf8789719d20e48f34497fee93b 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -378,6 +378,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/meltdown /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 540553c933b6197e51cd81367ffbc16fbd24273d..372b88f4e706250123ef30b8a851b8c8bd02b8d2 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -101,6 +101,7 @@ Date: February 2015 Contact: "Jaegeuk Kim" Description: Controls the trimming rate in batch mode. + What: /sys/fs/f2fs//cp_interval Date: October 2015 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a5478d737cbda3600e8009c0d4d814aa5c0ed038..51ec539c8ddbf56d14bdee77e88cc9f6b71d7d2b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2609,6 +2609,9 @@ allow data leaks with this option, which is equivalent to spectre_v2=off. + nospec_store_bypass_disable + [HW] Disable all mitigations for the Speculative Store Bypass vulnerability + noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. @@ -3939,6 +3942,48 @@ Not specifying this option is equivalent to spectre_v2=auto. + spec_store_bypass_disable= + [HW] Control Speculative Store Bypass (SSB) Disable mitigation + (Speculative Store Bypass vulnerability) + + Certain CPUs are vulnerable to an exploit against a + a common industry wide performance optimization known + as "Speculative Store Bypass" in which recent stores + to the same memory location may not be observed by + later loads during speculative execution. The idea + is that such stores are unlikely and that they can + be detected prior to instruction retirement at the + end of a particular speculation execution window. + + In vulnerable processors, the speculatively forwarded + store can be used in a cache side channel attack, for + example to read memory to which the attacker does not + directly have access (e.g. inside sandboxed code). + + This parameter controls whether the Speculative Store + Bypass optimization is used. + + on - Unconditionally disable Speculative Store Bypass + off - Unconditionally enable Speculative Store Bypass + auto - Kernel detects whether the CPU model contains an + implementation of Speculative Store Bypass and + picks the most appropriate mitigation. If the + CPU is not vulnerable, "off" is selected. If the + CPU is vulnerable the default mitigation is + architecture and Kconfig dependent. See below. + prctl - Control Speculative Store Bypass per thread + via prctl. Speculative Store Bypass is enabled + for a process by default. The state of the control + is inherited on fork. + seccomp - Same as "prctl" above, but all seccomp threads + will disable SSB unless they explicitly opt out. + + Not specifying this option is equivalent to + spec_store_bypass_disable=auto. + + Default mitigations: + X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl" + spia_io_base= [HW,MTD] spia_fio_base= spia_pedr= diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt index 2989fbfe7972a56bf69539e79c30a5c0a757b942..440628d02630740cbac0a96dc97ed171b50dce39 100644 --- a/Documentation/devicetree/bindings/arm/msm/imem.txt +++ b/Documentation/devicetree/bindings/arm/msm/imem.txt @@ -73,6 +73,11 @@ USB Diag Cookies: Memory region used to store USB PID and serial numbers to be used by bootloader in download mode. +SSR Minidump Offset +------------------- +-Compatible: "qcom,msm-imem-minidump" +-reg: start address and size of ssr imem region + Required properties: -compatible: "qcom,msm-imem-diag-dload" -reg: start address and size of USB Diag download mode region in imem @@ -121,4 +126,9 @@ Example: compatible = "qcom,msm-imem-emergency_download_mode"; reg = <0xfe0 12>; }; + + ss_mdump@b88 { + compatible = "qcom,msm-imem-minidump"; + reg = <0xb88 28>; + }; }; diff --git a/Documentation/devicetree/bindings/arm/msm/memory-offline.txt b/Documentation/devicetree/bindings/arm/msm/memory-offline.txt new file mode 100644 index 0000000000000000000000000000000000000000..96c64d7517e3d41586481e457b1114561e1df707 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/memory-offline.txt @@ -0,0 +1,31 @@ +Memory offline driver +===================== + +The memory offline driver supports the onlining and offlining of DDR memory. +Through the mem-offline node you can configure how much of the DDR will +support being offlined/onlined. +By default all memory is onlined when the device has booted up. + +Note that offlinable memory can only support 'movable' memory allocations so +designating too much memory as offlinable can result in system performance and +stability issues. + +For more information on how to request the onlining and offlining of memory +see the memory hotplug documentation (Documentation/memory-hotplug.txt). + +Required properties: +- compatible: "qcom,mem-offline" +- granule: The minimum granule size in mega-bytes for memory onlining/offlining. +- mem-percent: Percentage of the DDR which will support being onlined/offlined. + The system will round down the value to align with the minimum offlinable + granule size supported by DDR. +- mboxes: Reference to the mailbox used by the driver to make requests to + online/offline memory. + +Example: + mem-offline { + compatible = "qcom,mem-offline"; + granule = <512>; + mem-percent = "35"; + mboxes = <&qmp_aop 0>; + }; diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index 6b643f728383c023696a757f93001aa25e9ee98f..58c94abd503df94b68e0bdec4691e9e421fc2de2 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -53,11 +53,20 @@ SoCs: - QCS405 compatible = "qcom,qcs405" +- SDXPRAIRIE + compatible = "qcom,sdxprairie" + +- SDMMAGPIE + compatible = "qcom,sdmmagpie" + Generic board variants: - CDP device: compatible = "qcom,cdp" +- IDP device: + compatible = "qcom,idp" + - MTP device: compatible = "qcom,mtp" @@ -151,3 +160,9 @@ compatible = "qcom,qcs405-mtp" compatible = "qcom,qcs405-cdp" compatible = "qcom,sm8150-auto-adp-star" compatible = "qcom,auto-adp-star" +compatible = "qcom,sdxprairie-rumi" +compatible = "qcom,sdxprairie-mtp" +compatible = "qcom,sdxprairie-cdp" +compatible = "qcom,sdmmagpie-rumi" +compatible = "qcom,sdmmagpie-idp" +compatible = "qcom,sdmmagpie-qrd" diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt index a73b7d2aecf393a3a99c3dd5cbb5dc484a03a2b6..2c23b4b7d8e3d35cf674042af2c2e0a495621180 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt @@ -145,6 +145,8 @@ qcom,sbm-offset: The offset used to determine location of Sideband remove bandwidth votes. qcom,disable-ports: The ports to disable on the sideband manager when the requirement bandwidth affecting the node reduces to 0. +node-reg-names: Names of the regulator associated with bus node used + to grab the phandle of the regulator. Example: diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt index 57583eafb5b9c302b004beaefbb049668d67a136..3ea859613223c623dd63edd02257ad5dfe93ab4f 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt @@ -31,6 +31,9 @@ Reports single and double bit errors in the data and tag ram of LLCC. 4. LLCC AMON Driver: Keeps track of the data progress within the internal channels of LLCC. +5. LLCC Performance Monitor +Used to monitor the events of LLCC sub blocks. + == llcc device == Require Properties: @@ -94,7 +97,8 @@ compatible devices: qcom,sdm670-llcc, qcom,sm8150-llcc, qcom,sdmshrike-llcc, - qcom,sm6150-llcc + qcom,sm6150-llcc, + qcom,sdmmagpie-llcc Example: @@ -118,6 +122,10 @@ Example: compatible = "qcom,llcc-amon"; qcom,fg-cnt = <0x7>; }; + + qcom,llcc-perfmon { + compatible = "qcom,llcc-perfmon"; + }; }; == Client == diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt index b74fe736a11c5be46a0f8270eb040af666d34bb5..a204510a2cf5fdbed003fe27aac13d16fc5dfc35 100644 --- a/Documentation/devicetree/bindings/bus/mhi.txt +++ b/Documentation/devicetree/bindings/bus/mhi.txt @@ -14,77 +14,6 @@ Main node properties: Value type: Definition: Maximum number of channels supported by this controller -- mhi,chan-cfg - Usage: required - Value type: Array of - Definition: Array of tuples describe channel configuration. - 1st element: Physical channel number - 2nd element: Transfer ring length in elements - 3rd element: Event ring associated with this channel - 4th element: Channel direction as defined by enum dma_data_direction - 0 = Bidirectional data transfer - 1 = UL data transfer - 2 = DL data transfer - 3 = No direction, not a regular data transfer channel - 5th element: Channel doorbell mode configuration as defined by - enum MHI_BRSTMODE - 2 = burst mode disabled - 3 = burst mode enabled - 6th element: mhi doorbell configuration, valid only when burst mode - enabled. - 0 = Use default (device specific) polling configuration - For UL channels, value specifies the timer to poll MHI context - in milliseconds. - For DL channels, the threshold to poll the MHI context - in multiple of eight ring element. - 7th element: Channel execution enviornment as defined by enum MHI_EE - 1 = Bootloader stage - 2 = AMSS mode - 8th element: data transfer type accepted as defined by enum - MHI_XFER_TYPE - 0 = accept cpu address for buffer - 1 = accept skb - 2 = accept scatterlist - 3 = offload channel, does not accept any transfer type - 9th element: Bitwise configuration settings for the channel - Bit mask: - BIT(0) : LPM notify, this channel master requre lpm enter/exit - notifications. - BIT(1) : Offload channel, MHI host only involved in setting up - the data pipe. Not involved in active data transfer. - BIT(2) : Must switch to doorbell mode whenever MHI M0 state - transition happens. - BIT(3) : MHI bus driver pre-allocate buffer for this channel. - If set, clients not allowed to queue buffers. Valid only for DL - direction. - BIT(4) : MHI host driver to automatically start channels once - mhi device driver probe is complete. - -- mhi,chan-names - Usage: required - Value type: Array of - Definition: Channel names configured in mhi,chan-cfg. - -- mhi,ev-cfg - Usage: required - Value type: Array of - Definition: Array of tuples describe event configuration. - 1st element: Event ring length in elements - 2nd element: Interrupt moderation time in ms - 3rd element: MSI associated with this event ring - 4th element: Dedicated channel number, if it's a dedicated event ring - 5th element: Event ring priority, set to 1 for now - 6th element: Event doorbell mode configuration as defined by - enum MHI_BRSTMODE - 2 = burst mode disabled - 3 = burst mode enabled - 7th element: Bitwise configuration settings for the channel - Bit mask: - BIT(0) : Event ring associated with hardware channels - BIT(1) : Client manages the event ring (use by napi_poll) - BIT(2) : Event ring associated with offload channel - BIT(3) : Event ring dedicated to control events only - - mhi,timeout Usage: optional Value type: @@ -115,6 +44,187 @@ Main node properties: Value type: Definition: Size of each segment to allocate for BHIe vector table +- mhi,time-sync + Usage: optional + Value type: + Definition: Set true, if the external device support MHI get time + feature for time synchronization between host processor and + external modem. + +- mhi,use-bb + Usage: optional + Value type: + Definition: Set true, if PCIe controller does not have full access to host + DDR, and we're using a dedicated memory pool like cma, or + carveout pool. Pool must support atomic allocation. + +- mhi,buffer-len + Usage: optional + Value type: + Definition: MHI automatically pre-allocate buffers for some channel. + Set the length of buffer size to allocate. If not default + size MHI_MAX_MTU will be used. + +============================ +mhi channel node properties: +============================ + +- reg + Usage: required + Value type: + Definition: physical channel number + +- label + Usage: required + Value type: + Definition: given name for the channel + +- mhi,num-elements + Usage: optional + Value type: + Definition: Number of elements transfer ring support + +- mhi,event-ring + Usage: required + Value type: + Definition: Event ring index associated with this channel + +- mhi,chan-dir + Usage: required + Value type: + Definition: Channel direction as defined by enum dma_data_direction + 0 = Bidirectional data transfer + 1 = UL data transfer + 2 = DL data transfer + 3 = No direction, not a regular data transfer channel + +- mhi,ee + Usage: required + Value type: + Definition: Channel execution enviornment as defined by enum MHI_EE + 1 = Bootloader stage + 2 = AMSS mode + +- mhi,pollcfg + Usage: optional + Value type: + Definition: MHI poll configuration, valid only when burst mode is enabled + 0 = Use default (device specific) polling configuration + For UL channels, value specifies the timer to poll MHI context in + milliseconds. + For DL channels, the threshold to poll the MHI context in multiple of + eight ring element. + +- mhi,data-type + Usage: required + Value type: + Definition: Data transfer type accepted as defined by enum MHI_XFER_TYPE + 0 = accept cpu address for buffer + 1 = accept skb + 2 = accept scatterlist + 3 = offload channel, does not accept any transfer type + +- mhi,doorbell-mode + Usage: required + Value type: + Definition: Channel doorbell mode configuration as defined by enum + MHI_BRSTMODE + 2 = burst mode disabled + 3 = burst mode enabled + +- mhi,lpm-notify + Usage: optional + Value type: + Definition: This channel master require low power mode enter and exit + notifications from mhi bus master. + +- mhi,offload-chan + Usage: optional + Value type: + Definition: Client managed channel, MHI host only involved in setting up + the data path, not involved in active data path. + +- mhi,db-mode-switch + Usage: optional + Value type: + Definition: Must switch to doorbell mode whenever MHI M0 state transition + happens. + +- mhi,auto-queue + Usage: optional + Value type: + Definition: MHI bus driver will pre-allocate buffers for this channel and + queue to hardware. If set, client not allowed to queue buffers. Valid + only for downlink direction. + +- mhi,auto-start + Usage: optional + Value type: + Definition: MHI host driver to automatically start channels once mhi device + driver probe is complete. This should be only set true if initial + handshake iniaitead by external modem. + +========================== +mhi event node properties: +========================== + +- mhi,num-elements + Usage: required + Value type: + Definition: Number of elements event ring support + +- mhi,intmod + Usage: required + Value type: + Definition: interrupt moderation time in ms + +- mhi,msi + Usage: required + Value type: + Definition: MSI associated with this event ring + +- mhi,chan + Usage: optional + Value type: + Definition: Dedicated channel number, if it's a dedicated event ring + +- mhi,priority + Usage: required + Value type: + Definition: Event ring priority, set to 1 for now + +- mhi,brstmode + Usage: required + Value type: + Definition: Event doorbell mode configuration as defined by + enum MHI_BRSTMODE + 2 = burst mode disabled + 3 = burst mode enabled + +- mhi,data-type + Usage: optional + Value type: + Definition: Type of data this event ring will process as defined + by enum mhi_er_data_type + 0 = process data packets (default) + 1 = process mhi control packets + +- mhi,hw-ev + Usage: optional + Value type: + Definition: Event ring associated with hardware channels + +- mhi,client-manage + Usage: optional + Value type: + Definition: Client manages the event ring (use by napi_poll) + +- mhi,offload + Usage: optional + Value type: + Definition: Event ring associated with offload channel + + Children node properties: MHI drivers that require DT can add driver specific information as a child node. @@ -129,12 +239,48 @@ Example: ======== mhi_controller { mhi,max-channels = <105>; - mhi,chan-cfg = <0 64 2 1 2 1 2 0 0>, <1 64 2 2 2 1 2 0 0>, - <2 64 1 1 2 1 1 0 0>, <3 64 1 2 2 1 1 0 0>; - mhi,chan-names = "LOOPBACK", "LOOPBACK", - "SAHARA", "SAHARA"; - mhi,ev-cfg = <64 1 1 0 1 2 8> - <64 1 2 0 1 2 0>; + + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,chan = <0>; + mhi,priority = <1>; + mhi,bstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,chan = <0>; + mhi,priority = <1>; + mhi,bstmode = <2>; + }; + mhi,fw-name = "sbl1.mbn"; mhi,timeout = <500>; diff --git a/Documentation/devicetree/bindings/bus/mhi_qcom.txt b/Documentation/devicetree/bindings/bus/mhi_qcom.txt index c0f8d8657ed0f83c45ffc1a4d9c097d1174ef70d..b2e575b77b8d9c835e226203ae042b234fd006b4 100644 --- a/Documentation/devicetree/bindings/bus/mhi_qcom.txt +++ b/Documentation/devicetree/bindings/bus/mhi_qcom.txt @@ -9,31 +9,11 @@ Node Structure Main node properties: -- compatible +- reg Usage: required - Value type: - Definition: "qcom,mhi" - -- qcom,pci-dev-id - Usage: optional - Value type: - Definition: PCIe device id of external modem to bind. If not set, any - device is compatible with this node. - -- qcom,pci-domain - Usage: required - Value type: - Definition: PCIe root complex external modem connected to - -- qcom,pci-bus - Usage: required - Value type: - Definition: PCIe bus external modem connected to - -- qcom,pci-slot - Usage: required - Value type: - Definition: PCIe slot as assigned by pci framework to external modem + Value type: Array (5-cell PCI resource) of + Definition: First cell is devfn, which is determined by pci bus topology. + Assign the other cells 0 since they are not used. - qcom,smmu-cfg Usage: required @@ -94,17 +74,24 @@ Main node properties: ======== Example: ======== -qcom,mhi { - compatible = "qcom,mhi"; - qcom,pci-domain = <0>; - qcom,pci-bus = <1>; - qcom,pci-slot = <0>; - qcom,smmu-cfg = <0x3d>; - qcom,addr-win = <0x0 0x20000000 0x0 0x3fffffff>; - qcom,msm-bus,name = "mhi"; - qcom,msm-bus,num-cases = <2>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = <45 512 0 0>, - <45 512 1200000000 650000000>; - + +/* pcie domain (root complex) modem connected to */ +&pcie1 { + /* pcie bus modem connected to */ + pci,bus@1 { + reg = <0 0 0 0 0>; + + qcom,mhi { + reg = <0 0 0 0 0>; + qcom,smmu-cfg = <0x3d>; + qcom,addr-win = <0x0 0x20000000 0x0 0x3fffffff>; + qcom,msm-bus,name = "mhi"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = <45 512 0 0>, + <45 512 1200000000 650000000>; + + + }; + }; }; diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt index 64eaed6077b2e5d142eb5775f96bc1278ad10e01..e8910581887c1655f8369d9a5ab84d91ee372123 100644 --- a/Documentation/devicetree/bindings/clock/qcom,camcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt @@ -2,7 +2,8 @@ Qualcomm Technologies, Inc. Camera Clock & Reset Controller Binding ------------------------------------------------------------------- Required properties : -- compatible : must contain "qcom,camcc-sm8150" or "qcom,camcc-sdmshrike". +- compatible : must contain "qcom,camcc-sm8150", "qcom,camcc-sm8150-v2" + or "qcom,camcc-sdmshrike". - reg : shall contain base register location and length. - reg-names: names of registers listed in the same order as in the reg property. diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt index 3b5758f94683a73f025761c68536a0972ea41744..62f84b9df32495e12a5c39e27284775f2f7a5bf2 100644 --- a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt @@ -2,7 +2,7 @@ Qualcomm Technologies, Inc. Display Clock & Reset Controller Binding -------------------------------------------------------------------- Required properties : -- compatible : Shall contain "qcom,dispcc-sm8150". +- compatible : Shall contain "qcom,dispcc-sm8150" or "qcom,dispcc-sm8150-v2". - reg : Shall contain base register location and length. - reg-names: Address name. Must be "cc_base". - vdd_mm-supply: phandle to the MM_CX rail that needs to be voted on behalf diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt index 4390120d11dbefb829d12edc21cfb78f85417180..d238475136be7bc94191a9d4ca36543264259df2 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt @@ -19,6 +19,7 @@ Required properties : "qcom,gcc-msm8996" "qcom,gcc-mdm9615" "qcom,gcc-sm8150" + "qcom,gcc-sm8150-v2" "qcom,gcc-sdmshrike" "qcom,gcc-qcs405" "qcom,gcc-mdss-qcs405" diff --git a/Documentation/devicetree/bindings/clock/qcom,npucc.txt b/Documentation/devicetree/bindings/clock/qcom,npucc.txt index a7e4aaadaa7bc93b15c012fac6e8768ee20b19ce..a5310ee9df231981911750665a2f463eda69493c 100644 --- a/Documentation/devicetree/bindings/clock/qcom,npucc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,npucc.txt @@ -2,7 +2,7 @@ Qualcomm Technologies, Inc. NPU Clock & Reset Controller Binding ---------------------------------------------------------------- Required properties : -- compatible : must contain "qcom,npucc-sm8150". +- compatible : must contain "qcom,npucc-sm8150" or "qcom,npucc-sm8150-v2". - reg : shall contain base register location and length. - reg-names: names of registers listed in the same order as in the reg property. diff --git a/Documentation/devicetree/bindings/clock/qcom,qcs405-cpucc.txt b/Documentation/devicetree/bindings/clock/qcom,qcs405-cpucc.txt new file mode 100644 index 0000000000000000000000000000000000000000..a5ecfa73ae401eb17b803e74fd49ffb53e09aebd --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,qcs405-cpucc.txt @@ -0,0 +1,48 @@ +Qualcomm Technologies, Inc. QCS405 CPU clock driver +--------------------------------------------------- + +It is the clock controller driver which provides higher frequency +clocks and allows CPU frequency scaling on qcs405 based platforms. + +Required properties: +- compatible: Shall contain following: + "qcom,cpu-qcs405" +- clocks: Phandle to the clock device. +- clock-names: Names of the used clocks. Shall contain following: + "xo_ao", "gpll0_ao" +- reg: Shall contain base register offset and size. +- reg-names: Names of the bases for the above registers. Shall contain following: + "apcs_cmd", "apcs_pll" +- vdd_dig_ao-supply: The regulator powering the APSS PLL. +- cpu-vdd-supply: The regulator powering the APSS RCG. +- qcom,speedX-bin-vZ: A table of CPU frequency (Hz) to regulator voltage (uV) mapping. + Format: + This represents the max frequency possible for each possible + power configuration for a CPU that's binned as speed bin X, + speed bin revision Z. Version can be between [0-3]. +- #clock-cells: Shall contain 1. + +Optional properties: +- reg-names: "efuse" +- qcom,cpucc-init-rate: Initial rate which needs to be set from cpu driver. + +Example: + clock_cpu: qcom,clock-cpu@0b011050 { + compatible = "qcom,cpu-qcs405"; + clocks = <&clock_rpmcc RPM_SMD_XO_A_CLK_SRC>, + <&clock_gcc GPLL0_AO_OUT_MAIN>; + clock-names = "xo_ao", "gpll0_ao" ; + qcom,cpucc-init-rate = <960000000>; + reg = <0x0b011050 0x8>, + <0xb016000 0x34>; + reg-names = "apcs_cmd" , "apcs_pll"; + cpu-vdd-supply = <&apc_vreg_corner>; + vdd_dig_ao-supply = <&pmd9655_s1_level>; + qcom,speed0-bin-v0 = + < 0 0>, + < 960000000 1>, + < 1113600000 2>, + < 1267200000 3>, + < 1382400000 4>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt index 4f10cea75af0b522a2b1de50fe1f6a5da04478df..b758de540298cacfe0bc00d35c8a86ce8f391ef9 100644 --- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt @@ -1,7 +1,7 @@ Qualcomm Technologies, Inc. Video Clock & Reset Controller Bindings Required properties: -- compatible: shall contain "qcom,videocc-sm8150". +- compatible: shall contain "qcom,videocc-sm8150" or "qcom,videocc-sm8150-v2". - reg: shall contain base register location and length. - reg-names: names of registers listed in the same order as in the reg property. - vdd_mm-supply: the logic rail supply. diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt index 7eda08eb8a1e336cf0756baa62a91f21fd58bce6..a2b6a8a565a79a90e654f02d811066a756ee783a 100644 --- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt +++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt @@ -20,6 +20,7 @@ Required properties : - "allwinner,sun50i-a64-ccu" - "allwinner,sun50i-a64-r-ccu" - "allwinner,sun50i-h5-ccu" + - "allwinner,sun50i-h6-ccu" - "nextthing,gr8-ccu" - reg: Must contain the registers base address and length @@ -31,6 +32,9 @@ Required properties : - #clock-cells : must contain 1 - #reset-cells : must contain 1 +For the main CCU on H6, one more clock is needed: +- "iosc": the SoC's internal frequency oscillator + For the PRCM CCUs on A83T/H3/A64, two more clocks are needed: - "pll-periph": the SoC's peripheral PLL from the main CCU - "iosc": the SoC's internal frequency oscillator diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt index 217a90eaabe7f87db72539a2c9046f5ebc892b7c..9c38bbe7e6d7d86993be1f24ad011ee27a8e1d59 100644 --- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt +++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt @@ -11,7 +11,11 @@ Required properties: interrupts. Optional properties: -- clocks: Optional reference to the clock used by the XOR engine. +- clocks: Optional reference to the clocks used by the XOR engine. +- clock-names: mandatory if there is a second clock, in this case the + name must be "core" for the first clock and "reg" for the second + one + Example: diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt index a9b05550c7c4f6209df8dc8d357bbf80407f5474..1d20423775be66c09c865d91eefc51903b448a92 100644 --- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt @@ -197,6 +197,15 @@ Optional properties: - qcom,mdss-dsi-te-using-wd: Boolean entry enables the watchdog timer support to generate the vsync signal for command mode panel. By default, panel TE will be used to generate the vsync. - qcom,mdss-dsi-te-using-te-pin: Boolean to specify whether using hardware vsync. +- qcom,mdss-dsi-qsync-min-refresh-rate: A u32 entry to specify minimum refresh rate supported by the panel to enable qsync feature. +- qcom,mdss-dsi-qsync-on-commands: String that specifies the commands to enable qsync feature. +- qcom,mdss-dsi-qsync-on-commands-state: String that specifies the ctrl state for sending qsync on commands. + "dsi_lp_mode" = DSI low power mode (default) + "dsi_hs_mode" = DSI high speed mode +- qcom,mdss-dsi-qsync-off-commands: String that specifies the commands to disable qsync feature. +- qcom,mdss-dsi-qsync-off-commands-state: String that specifies the ctrl state for sending qsync off commands. + "dsi_lp_mode" = DSI low power mode (default) + "dsi_hs_mode" = DSI high speed mode - qcom,mdss-dsi-te-pin-select: Specifies TE operating mode. 0 = TE through embedded dcs command 1 = TE through TE gpio pin. (default) @@ -599,6 +608,7 @@ Example: qcom,mdss-dsi-te-check-enable; qcom,mdss-dsi-te-using-wd; qcom,mdss-dsi-te-using-te-pin; + qcom,mdss-dsi-qsync-min-refresh-rate = <30>; qcom,mdss-dsi-te-dcs-command = <1>; qcom,mdss-dsi-wr-mem-continue = <0x3c>; qcom,mdss-dsi-wr-mem-start = <0x2c>; @@ -712,6 +722,10 @@ Example: 29 00 00 00 00 00 02 B0 04 29 00 00 00 00 00 02 F1 00]; qcom,mdss-dsi-timing-switch-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-qsync-on-commands = [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-on-commands-state = "dsi_hs_mode"; + qcom,mdss-dsi-qsync-off-commands = [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-off-commands-state = "dsi_hs_mode"; qcom,mdss-dsc-slice-height = <16>; qcom,mdss-dsc-slice-width = <360>; diff --git a/Documentation/devicetree/bindings/input/qti-haptics.txt b/Documentation/devicetree/bindings/input/qti-haptics.txt new file mode 100644 index 0000000000000000000000000000000000000000..8982fc733154b388fb557a9ea2b66a921b79dec5 --- /dev/null +++ b/Documentation/devicetree/bindings/input/qti-haptics.txt @@ -0,0 +1,188 @@ +Qualcomm Technologies, Inc. Haptics driver + +Haptics peripheral in QTI PMICs can support different type of actuators or +vibrators: + 1) Eccentric Rotation Mass (ERM); + 2) Linear Resonant Actuator (LRA). +This binding document describes the properties for this module. + +Properties: + +- compatible + Usage: required + Value type: + Definition: It can be one of the following: + "qcom,haptics", + "qcom,pm660-haptics", + "qcom,pm8150b-haptics". + +- reg + Usage: required + Value type: + Definition: Base address of haptics peripheral. + +- interrupts + Usage: required + Value type: + Definition: Peripheral interrupt specifier. + +- interrupt-names + Usage: required + Value type: + Definition: Interrupt names. This list must match up 1-to-1 with the + interrupts specified in the 'interrupts' property. Following + interrupts are required: "hap_play_irq", "hap_sc_irq". + +- qcom,actuator-type + Usage: optional + Value type: + Definition: Specifies the type of the actuator connected on the output of + haptics module. Allowed values: "erm", "lra". If this is + not specified, then LRA type will be used by default. + +- qcom,vmax-mv + Usage: optional + Value type: + Definition: Specifies the maximum allowed output voltage in millivolts + for the actuator. Value specified here will be rounded + off to the closest multiple of 116 mV. Allowed values: + 0 to 3596. If this is not specified, then 1800 mV will be + used by default. + +- qcom,ilim-ma + Usage: optional + Value type: + Definition: Specifies the maximum allowed output current in mA for the + actuator. Allowed values: 400 or 800. If this is not + specified, 400 mA will be used by default. + +- qcom,play-rate-us + Usage: optional + Value type: + Definition: Specifies the period at which each sample of the 8-byte waveform + registers is played. For ERM, this period is flexible and it + can be chosen based on the desired shape of the pattern. + For LRA, it should be set equal to the resonance period + specified in the LRA actuator datasheet. Allowed values are: + 0 to 20475. If this is not specified, 5715us play rate is used. + +- qcom,brake-pattern + Usage: optional + Value type: + Definition: Specifies the brake pattern with 4 elements used to enable the + internal reverse braking. Allowed values for each element are: + 0: no brake; + 1: brake with (Vmax / 2) strength; + 2: brake with Vmax strength; + 3: brake with (2 * Vmax) strength; + If this property is specified with an array of non-zero values, + then the brake pattern is applied at the end of the playing + waveform. + +- qcom,external-waveform-source + Usage: optional + Value type: + Definition: The haptics module supports to play with internal constant + Vmax strength or play with patterns specified in its internal + 8-bytes waveform buffer. It can also play with the audio + LINE-IN signal or PWM waveform coming from LINE-IN/PWM pin. + This property specify the kind of the waveform resources + on the LINE-IN/PWM pins. Allowed values are: "audio", "pwm". + If this is not specified, internal signals (Vmax or buffer) + will be selected according to the requriement of the playing + waveforms. + +- vdd-supply + Usage: optional + Value type: + Definition: Specifies the phandle of the regulator device which supplies + haptics module through VDD_HAP pin. This is only needed if VDD_HAP + is supplied from an external boost regulator instead of VPH_PWR. + +Following properties are specific only when LRA actuator is used: + +- qcom,lra-resonance-sig-shape + Usage: optional + Value type: + Definition: Specifies the shape of the LRA resonance drive signal. Allowed + values: "sine", "square". If this is not specified, sinusoid + resonance driver signal is used. + +- qcom,lra-auto-resonance-en + Usage: optional + Value type: + Definition: If specified, the hardware feature of LRA auto resonance detection + is enabled for correcting the resonance frequency variation. + +- qcom,lra-auto-resonance-mode + Usage: optional + Value type: + Definition: Specifies the auto resonance technique for LRA. Allowed values are: + "zxd": zero crossing based discontinuous method; + "qwd": quarter wave drive method; + +Following properties could be specified in child nodes for defining vibrating +waveforms/effects: + +- qcom,effect-id + Usage: required + Value type: + Definition: Specifies the effect ID that the client can request to play the + corresponding waveform defined in this child node. The ID is + normaly defined and sent from userspace for certain user + notification event. + +- qcom,wf-pattern + Usage: required + Value type: + Definition: Specifies the waveform pattern in a byte array that will be + played for the effect-id. Allowed values for each element + are: 0x00 to 0x1F. + +- qcom,wf-play-rate-us + Usage: optional + Value type: + Definition: Specifies the play period in microseconds for each byte pattern. + For LRA actuator, For LRA, it should be set equal to the resonance + period specified in the LRA actuator datasheet. Allowed values + are: 0 to 20475. + +- qcom,wf-repeat-count + Usage: optional + Value type: + Definition: Specifies the repeat times for the waveform pattern. Allowed + values are: 1, 2, 4, 8, 16, 32, 64, 128. + +- qcom,wf-s-repeat-count + Usage: optional + Value type: + Definition: Specifies the repeat times for each sample defined in + qcom,wf-pattern. Allowed values are: 1, 2, 4, 8. + +Example: + qcom,haptics@c000 { + compatible = "qcom,haptics"; + reg = <0xc000 0x100>; + interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hap-sc-irq", "hap-play-irq"; + qcom,actuator-type = "lra"; + qcom,vmax-mv = <1800>; + qcom,ilim-ma = <400>; + qcom,play-rate-us = <4255>; + qcom,brake-pattern = [03 02 01 00]; + qcom,lra-resonance-sig-shape = "sine"; + qcom,lra-auto-resonance-mode; + + wf_0 { + /* CLICK effect */ + qcom,effect-id = <0>; + qcom,wf-pattern = [0a 14 1f 1f 1f 1f 14 0a]; + }; + + wf_5 { + /* HEAVY_CLICK effect */ + qcom,effect-id = <5>; + qcom,wf-pattern = [08 0a 1a 1f 1f 1a 0a 08]; + }; + }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt index 12ced5f7685be8b2f4f1fb4f212439828d21dfa5..717b8901fb6833c3a641190cd0ba455e4f9b45cb 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt @@ -8,13 +8,16 @@ replays it to the subsystem interrupt controller after it becomes operational. Platform interrupt controller MPM is next in hierarchy, followed by others. +This defines 2 interrupt controllers to monitor the interrupts when the system is asleep: + +One to monitor the wakeup capable gic interrupts called wakegic. + Properties: - compatible: Usage: required Value type: - Definition: Should contain "qcom,mpm" for mpm pin data - and the respective target compatible flag. + Definition: Should contain "qcom,mpm-gic" and the respective target compatible flag. - interrupts: Usage: required @@ -48,18 +51,42 @@ Properties: Example: -mpm: mpm@7781b8 { - compatible = "qcom,mpm"; +wakegic: wake-gic@7781b8 { + compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8953", "qcom,mpm-gic-msm8937"; interrupts = ; - reg = <0x7781b8 0x1000>, - <0x17911008 0x4>; /* MSM_APCS_GCC_BASE 4K */ + reg = <0x601d4 0x1000>, + <0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */ reg-names = "vmpm", "ipc"; - qcom,num-mpm-irqs = <96>; - - wakegic: wake-gic { - compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8953"; - interrupt-controller; - #interrupt-cells = <3>; - interrupt-parent = <&intc>; - }; + interrupt-controller; + interrupt-parent = <&intc>; + #interrupt-cells = <3>; +}; + + +One to monitor the wakeup capable gpio interrupts called wakegpio. + +properties: + +- compatible: + Usage: required + Value type: + Definition: Should contain "qcom,mpm-gpio" and the respective target compatible flag. + +- interrupt-parent: + Usage: required + Value type: + Definition: Specifies the interrupt parent necessary for hierarchical domain to operate. + +- interrupt-controller: + Usage: required + Value type: + Definition: Identifies the node as an interrupt controller. + +Example: + +wakegpio: wake-gpio { + compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8953", "qcom,mpm-gpio-msm8937"; + interrupt-controller; + interrupt-parent = <&tlmm>; + #interrupt-cells = <2>; }; diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt index c61442e1d37905423084edf0f8d5b6423c066722..04faf3cf51eabd1240e44364e4774a10f8795f81 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt @@ -113,6 +113,10 @@ conditions. supported as we are directly comparing client SID with ID bits of SMR registers. +-qcom,disable-atos: + Some hardware may not have full support for atos debugging + in tandem with other features like power collapse. + - qcom,deferred-regulator-disable-delay : The time delay for deferred regulator disable in ms. In case of unmap call, regulator is enabled/disabled. This may introduce additional delay. For diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt b/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt index 72554ba33d09c9202a0ece7d4b8a8ecc24f2ebfb..c4192683007ac345ba6de088909e34b18982d124 100644 --- a/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt +++ b/Documentation/devicetree/bindings/leds/backlight/qcom-spmi-wled.txt @@ -23,6 +23,22 @@ platforms. The PMIC is connected to the host processor via SPMI bus. Definition: Names associated with base addresses. should be "wled-ctrl-base", "wled-sink-base". +- interrupts + Usage: optional + Value type: + Definition: Interrupts associated with WLED. Interrupts can be + specified as per the encoding listed under + Documentation/devicetree/bindings/spmi/ + qcom,spmi-pmic-arb.txt. + +- interrupt-names + Usage: optional + Value type: + Definition: Interrupt names associated with the interrupts. + Currently supported interrupts are "sc-irq", "ovp-irq", + "pre-flash-irq" and "flash-irq". Pre_flash and flash + interrupts can be specified only for PMICs that has WLED5. + - label Usage: required Value type: @@ -95,20 +111,6 @@ platforms. The PMIC is connected to the host processor via SPMI bus. Definition: Specify if external PFET control for short circuit protection is needed. This is not applicable for PM8150L. -- interrupts - Usage: optional - Value type: - Definition: Interrupts associated with WLED. Interrupts can be - specified as per the encoding listed under - Documentation/devicetree/bindings/spmi/ - qcom,spmi-pmic-arb.txt. - -- interrupt-names - Usage: optional - Value type: - Definition: Interrupt names associated with the interrupts. - Currently supported interrupts are "sc-irq" and "ovp-irq". - - qcom,auto-calibration Usage: optional Value type: @@ -140,6 +142,95 @@ platforms. The PMIC is connected to the host processor via SPMI bus. Value type: Definition: If specified, can be used to get PMIC revision information. +Following properties are for child subnodes that are needed for WLED preflash +(or torch), flash and switch. These child subnodes can be specified only for +PMICs that has WLED5 (e.g. PM8150L). + +For wled_torch child subnode, + +- label + Usage: required + Value type: + Definition: Should be "torch". + +- qcom,default-led-trigger + Usage: optional + Value type: + Definition: Name for LED trigger. If unspecified, "wled_torch" is used. + +- qcom,wled-torch-fsc + Usage: optional + Value type: + Definition: WLED torch full scale current in mA. This configures the + maximum current allowed for torch device. Allowed values + are from 5 to 60 mA with a step of 5 mA. If not specified, + default value is set to 30 mA. + +- qcom,wled-torch-step + Usage: optional + Value type: + Definition: WLED torch step delay in us. This configures the step delay + when the output is ramped up to the desired target current. + Allowed values are from 50 to 400 us with a step of 50 us. + If not specified, default value is set to 200 us. + +- qcom,wled-torch-timer + Usage: optional + Value type: + Definition: WLED torch safety timer in ms. This configures the safety + timer to turn off torch automatically after timer expiry. + Allowed values are: 50, 100, 200, 400, 600, 800, 1000 and + 1200. If not specified, default value is set to 1200 ms. + +For wled_flash child subnode, + +- label + Usage: required + Value type: + Definition: Should be "flash". + +- qcom,default-led-trigger + Usage: optional + Value type: + Definition: Name for LED trigger. If unspecified, "wled_flash" is used. + +- qcom,wled-flash-fsc + Usage: optional + Value type: + Definition: WLED flash full scale current in mA. This configures the + maximum current allowed for flash device. Allowed values + are from 5 to 60 mA with a step of 5 mA. If not specified, + default value is set to 40 mA. + +- qcom,wled-flash-step + Usage: optional + Value type: + Definition: WLED flash step delay in us. This configures the step delay + when the output is ramped up to the desired target current. + Allowed values are from 50 to 400 us with a step of 50 us. + If not specified, default value is set to 200 us. + +- qcom,wled-flash-timer + Usage: optional + Value type: + Definition: WLED flash safety timer in ms. This configures the safety + timer to turn off flash automatically after timer expiry. + Allowed values are: 50, 100, 200, 400, 600, 800, 1000 and + 1200. If not specified, default value is set to 100 ms. + +For wled_switch child subnode, + +- label + Usage: required + Value type: + Definition: Should be "switch". + +- qcom,default-led-trigger + Usage: optional + Value type: + Definition: Name for LED trigger. If unspecified, "wled_switch" is + used. + Example: qcom-wled@d800 { @@ -171,4 +262,22 @@ qcom-wled@d800 { interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>; interrupt-names = "ovp-irq"; qcom,string-cfg = <7>; + + wled_torch: qcom,wled-torch { + label = "torch"; + qcom,wled-torch-fsc = <40>; + qcom,wled-torch-step = <300>; + qcom,wled-torch-timer = <600>; + }; + + wled_flash: qcom,wled-flash { + label = "flash"; + qcom,wled-flash-fsc = <60>; + qcom,wled-flash-step = <100>; + qcom,wled-flash-timer = <200>; + }; + + wled_switch: qcom,wled-switch { + label = "switch"; + }; }; diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt index 4fb042154270b6880e1ca56209f0b628199bae0a..91a6815c3925d2f55c715fe62ced9c4297578808 100644 --- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt +++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt @@ -7,6 +7,7 @@ Required properties: - compatible : one of: - "qcom,msm-vidc" - "qcom,sm8150-vidc" : Invokes driver specific data for SM8150. + - "qcom,sm6150-vidc" : Invokes driver specific data for SM6150. - "qcom,sdm845-vidc" : Invokes driver specific data for SDM845. - "qcom,sdm670-vidc" : Invokes driver specific data for SDM670. @@ -143,6 +144,7 @@ Memory Heaps Required properties: - compatible : one of: - "qcom,msm-vidc,mem-adsp" + - "qcom,msm-vidc,mem-cdsp" - memory-region : phandle to the memory heap/region. Example: diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt new file mode 100644 index 0000000000000000000000000000000000000000..a98e4aef7add0a86f25549142ff56f3b28525b04 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt @@ -0,0 +1,77 @@ +Qualcomm Technologies, Inc. Parallel Interface controller (QPIC) for NAND devices + +Required properties: +- compatible : "qcom,msm-nand". +- reg : should specify QPIC NANDc and BAM physical address range. +- reg-names : should specify relevant names to each reg property defined. +- interrupts : should specify QPIC/BAM interrupt numbers. +- interrupt-names : should specify relevant names to each interrupts property + defined. +- qcom,reg-adjustment-offset : Specify the base adjustment offset value for the + version registers +- qcom,qpic-clk-rpmh: Indicates whether QPIC clock is RPMH controlled clock or + not. + +MTD flash partition layout for NAND devices - + +Each partition is represented as a sub-node of the qcom,mtd-partitions device. +Each node's name represents the name of the corresponding partition. + +This is now completely optional as the partition information is avaialble from +bootloader. + +Optional properties: +- reg : boot_cfg. This is needed only on the targets where both NAND and eMMC + devices are supported. On eMMC based builds, NAND cannot be enabled by + default due to the absence of some of its required resources. +- reg : The partition offset and size +- label : The label / name for this partition. +- read-only: This parameter, if present, indicates that this partition + should only be mounted read-only. +- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for +below optional properties: + - qcom,msm-bus,name + - qcom,msm-bus,num-cases + - qcom,msm-bus,active-only + - qcom,msm-bus,num-paths + - qcom,msm-bus,vectors-KBps + +Examples: + + qcom,nand@f9af0000 { + compatible = "qcom,msm-nand"; + reg = <0xf9af0000 0x1000>, + <0xf9ac4000 0x8000>, + <0x5e02c 0x4>; + reg-names = "nand_phys", + "bam_phys", + "boot_cfg"; + qcom,reg-adjustment-offset = <0x4000>; + + interrupts = <0 279 0>; + interrupt-names = "bam_irq"; + + qcom,msm-bus,name = "qpic_nand"; + qcom,msm-bus,num-cases = <1>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = <91 512 0 0>, + qcom,qpic-clk-rpmh; + }; + + qcom,mtd-partitions { + #address-cells = <1>; + #size-cells = <1>; + partition@0 { + label = "boot"; + reg = <0x0 0x1000>; + read-only; + }; + partition@20000 { + label = "userdata"; + reg = <0x20000 0x1000>; + }; + partition@40000 { + label = "system"; + reg = <0x40000 0x1000>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/can/qti-can.txt b/Documentation/devicetree/bindings/net/can/qti-can.txt new file mode 100644 index 0000000000000000000000000000000000000000..c494cc846d971ed408cb8bc81ae3e1520b47a9b0 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/qti-can.txt @@ -0,0 +1,48 @@ +* QTI CAN driver * + +Required properties: + - compatible: Should be "qcom,renesas,rh850" or "qcom,nxp,mpc5746c". + - reg: Should contain SPI chip select. + - interrupt-parent: Should specify interrupt controller for the interrupt. + - interrupts: Should contain IRQ line for the CAN controller. + - spi-max-frequency: Should contain maximum spi clock frequency for slave device + - qcom,clk-freq-mhz : The frequency at which the CAN clock should be configured. + - qcom,max-can-channels : Maximum number of CAN channels supported by the controller. + +Optional properties: + - qcom,reset-gpio: Reference to the GPIO connected to the reset input. + - pinctrl-names : Names corresponding to the numbered pinctrl states. + - pinctrl-0 : This explains the active state of the GPIO line. + - pinctrl-1 : This explains the suspend state of the GPIO line. + - qcom,bits-per-word: Indicate how many bits are in a SPI frame. e.g.: 8, 16, 32. + Default to 16. + - qcom,reset-delay-msec: Delay in milliseconds to be applied after resetting the chip. + This is applicable only if the reset-gpio is specified. Default value is 1 ms. + - qcom,support-can-fd: Whether CAN FD mode is supported or not. + - qcom,can-fw-cmd-timeout-req: Whether a timeout is required if we don't get a response from + the firmware after flash write. + - qcom,can-fw-cmd-timeout-ms: The duration after which timeout will happen if we don't get a + response from the firmware. + - qcom,rem-all-buffering-timeout-ms: The duration after which timeout will happen if we don't get a + response from the firmware while trying to remove all the buffered frames IDs from flash. + +Example: + +can-controller@0 { + compatible = "qcom,nxp,mpc5746c"; + reg = <0>; + spi-max-frequency = <9600000>; + interrupt-parent = <&tlmm_pinmux>; + interrupts = <87 0>; + qcom,reset-gpio = <&tlmm_pinmux 89 0x1>; + qcom,clk-freq-mhz = <20000000>; + qcom,max-can-channels = <2>; + qcom,bits-per-word = <8>; + qcom,reset-delay-msec = <150>; + qcom,can-fw-cmd-timeout-req; + qcom,can-fw-cmd-timeout-ms = <400>; + qcom,rem-all-buffering-timeout-ms = <2000>; + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&can_rst_on>; + pinctrl-1 = <&can_rst_off>; +}; diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt index 47284f85ec804fd038a9bd931cbcb867fa5fe793..c3f9826692bcfd2052ef220e79542487a3cca52f 100644 --- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt @@ -20,7 +20,8 @@ Required subnode-properties: gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0, i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0, spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0, - uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0 + uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0, + uart5nocts cpuclkout: cpuclkoutgrp0 udlclkout: udlclkoutgrp0 i2c1: i2c1grp0 @@ -37,7 +38,7 @@ Required subnode-properties: uart2: uart2grp0, uart2grp1 uart3: uart3grp0 uart4: uart4grp0 - uart5: uart5grp0 + uart5: uart5grp0, uart5nocts nand: nandgrp0 sdio0: sdio0grp0 sdio1: sdio1grp0 diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdmmagpie-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,sdmmagpie-pinctrl.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f629b8ab82372969da8137a5bb446585564efe2 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdmmagpie-pinctrl.txt @@ -0,0 +1,186 @@ +Qualcomm Technologies, Inc. SDMMAGPIE TLMM block + +This binding describes the Top Level Mode Multiplexer block found in the +SDMMAGPIE platform. + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,sdmmagpie-pinctrl" + +- reg: + Usage: required + Value type: + Definition: the base address and size of the TLMM register space. + +- interrupts: + Usage: required + Value type: + Definition: should specify the TLMM summary IRQ. + +- interrupt-controller: + Usage: required + Value type: + Definition: identifies this node as an interrupt controller + +- #interrupt-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +- gpio-controller: + Usage: required + Value type: + Definition: identifies this node as a gpio controller + +- #gpio-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for +a general description of GPIO and interrupt bindings. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin, a group, or a list of pins or groups. This configuration can include the +mux function to select on those pin(s)/group(s), and various pin configuration +parameters, such as pull-up, drive strength, etc. + + +PIN CONFIGURATION NODES: + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. + + Valid pins are: + gpio0-gpio149 + Supports mux, bias and drive-strength + + sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd, + sdc2_data sdc1_rclk + Supports bias and drive-strength + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Functions are only valid for gpio pins. + Valid values are: + + blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens, + bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8, + qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b, + dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10, + blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12, + mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11, + atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char, + cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b, + pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c, + qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4, + qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5, + atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6, + atest_usb20, atest_char0, dac_calib10, qdss_stm10, + qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6, + blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11, + qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1, + qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11, + dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6, + qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14, + dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem, + dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto, + dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0, + dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25, + sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2, + qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3, + uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9, + blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7, + qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11, + blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0, + cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4, + blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4, + qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus, + isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s, + qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b, + sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b, + gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12, + qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29, + tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27, + qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk, + sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b, + sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b, + ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b, + blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt, + pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11, + qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx, + qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3, + gpio + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configued as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull down. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull up. + +- output-high: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + high. + Not valid for sdc pins. + +- output-low: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + low. + Not valid for sdc pins. + +- drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins, in mA. + Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16 + +Example: + + tlmm: pinctrl@03400000 { + compatible = "qcom,sdmmagpie-pinctrl"; + reg = <0x03400000 0xdc2000>; + interrupts = <0 208 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdxprairie-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdxprairie-pinctrl new file mode 100644 index 0000000000000000000000000000000000000000..9de6959b0aa67fc62ce15e8bb9e0fe7b4f2d632f --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdxprairie-pinctrl @@ -0,0 +1,186 @@ +Qualcomm Technologies, Inc. SDXPRAIRIE TLMM block + +This binding describes the Top Level Mode Multiplexer block found in the +SDXPRAIRIE platform. + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,sdxprairie-pinctrl" + +- reg: + Usage: required + Value type: + Definition: the base address and size of the TLMM register space. + +- interrupts: + Usage: required + Value type: + Definition: should specify the TLMM summary IRQ. + +- interrupt-controller: + Usage: required + Value type: + Definition: identifies this node as an interrupt controller + +- #interrupt-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +- gpio-controller: + Usage: required + Value type: + Definition: identifies this node as a gpio controller + +- #gpio-cells: + Usage: required + Value type: + Definition: must be 2. Specifying the pin number and flags, as defined + in + +Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for +a general description of GPIO and interrupt bindings. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin, a group, or a list of pins or groups. This configuration can include the +mux function to select on those pin(s)/group(s), and various pin configuration +parameters, such as pull-up, drive strength, etc. + + +PIN CONFIGURATION NODES: + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. + + Valid pins are: + gpio0-gpio149 + Supports mux, bias and drive-strength + + sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd, + sdc2_data sdc1_rclk + Supports bias and drive-strength + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Functions are only valid for gpio pins. + Valid values are: + + blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens, + bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8, + qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b, + dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10, + blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12, + mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11, + atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char, + cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b, + pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c, + qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4, + qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5, + atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6, + atest_usb20, atest_char0, dac_calib10, qdss_stm10, + qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6, + blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11, + qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1, + qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11, + dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6, + qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14, + dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem, + dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto, + dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0, + dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25, + sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2, + qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3, + uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9, + blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7, + qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11, + blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0, + cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4, + blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4, + qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus, + isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s, + qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b, + sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b, + gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12, + qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29, + tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27, + qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk, + sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b, + sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b, + ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b, + blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt, + pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11, + qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx, + qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3, + gpio + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configued as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull down. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configued as pull up. + +- output-high: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + high. + Not valid for sdc pins. + +- output-low: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + low. + Not valid for sdc pins. + +- drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins, in mA. + Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16 + +Example: + + tlmm: pinctrl@03900000 { + compatible = "qcom,sdxprairie-pinctrl"; + reg = <0x03900000 0x300000>; + interrupts = <0 212 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt index 15611971426db68acc56224d077f0f1e6304f83d..6983ad330f03d295733310c639e4e610827b4f5c 100644 --- a/Documentation/devicetree/bindings/platform/msm/ipa.txt +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -87,6 +87,8 @@ memory allocation over a PCIe bridge need to be unlocked by TZ. - qcom,ipa-uc-monitor-holb: Boolean context flag to indicate whether monitoring of holb via IPA uc is required. +-qcom,ipa-fltrt-not-hashable: Boolean context flag to indicate filter/route rules + hashing not supported. IPA pipe sub nodes (A2 static pipes configurations): diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt new file mode 100644 index 0000000000000000000000000000000000000000..afeb65dc86d0b585f9d0d43dce87cc590b5b2ac6 --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt @@ -0,0 +1,319 @@ +Qualcomm Techonologies, Inc. QPNP PMIC QGAUGE (QG) Device + +QPNP PMIC QGAUGE device provides the ability to gauge the State-of-Charge +of the battery. It provides an interface to the clients to read various +battery related parameters. + +======================= +Required Node Structure +======================= + +Qgauge device must be described in two level of nodes. The first level +describes the properties of the Qgauge device and the second level +describes the peripherals managed/used of the module. + +==================================== +First Level Node - QGAUGE device +==================================== + +- compatible + Usage: required + Value type: + Definition: Should be "qcom,qpnp-qg". + +- qcom,pmic-revid + Usage: required + Value type: + Definition: Should specify the phandle of PMIC revid module. This is + used to identify the PMIC subtype. + +- qcom,qg-vadc + Usage: required + Value type: + Definition: Phandle for the VADC node, it is used for BATT_ID and + BATT_THERM readings. + +- qcom,vbatt-empty-mv + Usage: optional + Value type: + Definition: The battery voltage threshold (in mV) at which the + vbatt-empty interrupt fires. The SOC is forced to 0 + when this interrupt fires. If not specified, the + default value is 3200 mV. + +- qcom,vbatt-empty-cold-mv + Usage: optional + Value type: + Definition: The battery voltage threshold (in mV) at which the + vbatt-empty interrupt fires. This threshold is only + applied at cold temperature specified by + 'qcom,cold-temp-threshold'. The SOC is forced to 0 + when this interrupt fires. If not specified, the + default value is 3000 mV. + +- qcom,vbatt-cutoff-mv + Usage: optional + Value type: + Definition: The battery voltage threshold (in mV) at which the + the Qgauge algorithm converges to 0 SOC. If not specified + the default value is 3400 mV. + +- qcom,vbatt-low-mv + Usage: optional + Value type: + Definition: The battery voltage threshold (in mV) at which the + the VBAT_LOW interrupt fires. Software can take necessary + the action when this interrupt fires. If not specified + the default value is 3500 mV. + +- qcom,vbatt-low-cold-mv + Usage: optional + Value type: + Definition: The battery voltage threshold (in mV) at which the + the VBAT_LOW interrupt fires. The threshold is only + applied at cold temperature specified by + 'qcom,cold-temp-threshold'. Software can take necessary + the action when this interrupt fires. If not specified + the default value is 3800 mV. + +- qcom,qg-iterm-ma + Usage: optional + Value type: + Definition: The battery current (in mA) at which the the QG algorithm + converges the SOC to 100% during charging and can be used to + terminate charging. If not specified, the default value is + 100mA. + +- qcom,delta-soc + Usage: optional + Value type: + Definition: The SOC percentage increase at which the SOC is + periodically reported to the userspace. If not specified, + the value defaults to 1%. + +- qcom,s2-fifo-length + Usage: optional + Value type: + Definition: The total number if FIFO samples which need to be filled up + in S2 state of QG to fire the FIFO DONE interrupt. + Minimum value = 1 Maximum Value = 8. If not specified, + the default value is 5. + +- qcom,s2-acc-length + Usage: optional + Value type: + Definition: The number of distinct V & I samples to be accumulated + in each FIFO in the S2 state of QG. + Minimum Value = 0 Maximum Value = 256. If not specified, + the default value is 128. + +- qcom,s2-acc-interval-ms + Usage: optional + Value type: + Definition: The time (in ms) between each of the V & I samples being + accumulated in FIFO. + Minimum Value = 0 ms Maximum Value = 2550 ms. If not + specified the default value is 100 ms. + +- qcom,ocv-timer-expiry-min + Usage: optional + Value type: + Definition: The maximum time (in minutes) for the QG to transition from + S3 to S2 state. + Minimum Value = 2 min Maximum Value = 30 min. If not + specified the hardware default is set to 14 min. + +- qcom,ocv-tol-threshold-uv + Usage: optional + Value type: + Definition: The OCV detection error tolerance (in uV). The maximum + voltage allowed between 2 VBATT readings in the S3 state + to qualify for a valid OCV. + Minimum Value = 0 uV Maximum Value = 12262 uV Step = 195 uV + +- qcom,s3-entry-fifo-length + Usage: optional + Value type: + Definition: The minimum number if FIFO samples which have to qualify the + S3 IBAT entry threshold (qcom,s3-entry-ibat-ua) for QG + to enter into S3 state. + Minimum Value = 1 Maximum Value = 8. The hardware default + is configured to 3. + +- qcom,s3-entry-ibat-ua + Usage: optional + Value type: + Definition: The battery current (in uA) for the QG to enter into the S3 + state. The QG algorithm enters into S3 if the battery + current is lower than this threshold consecutive for + the FIFO length specified in 'qcom,s3-entry-fifo-length'. + Minimum Value = 0 uA Maximum Value = 155550 uA + Step = 610 uA. + +- qcom,s3-exit-ibat-ua + Usage: optional + Value type: + Definition: The battery current (in uA) for the QG to exit S3 state. + If the battery current is higher than this threshold QG + exists S3 state. + Minimum Value = 0 uA Maximum Value = 155550 uA + Step = 610 uA. + +- qcom,rbat-conn-mohm + Usage: optional + Value type: + Definition: Resistance of the battery connectors in mOhms. + +- qcom,ignore-shutdown-soc-secs + Usage: optional + Value type: + Definition: Time in seconds beyond which shutdown SOC is ignored. + If not specified the default value is 360 secs. + +- qcom,hold-soc-while-full + Usage: optional + Value type: + Definition: A boolean property that when defined holds SOC at 100% when + the battery is full until recharge starts. + +- qcom,linearize-soc + Usage: optional + Value type: + Definition: A boolean property that when defined linearizes SOC when + the SOC drops after charge termination monotonically to + improve the user experience. This is applicable only if + "qcom,hold-soc-while-full" is specified. + +- qcom,cold-temp-threshold + Usage: optional + Value type: + Definition: Temperature threshold in decidegree at which the low + temperature specific configuration as applied. If not + specified, the default value is 0 degree centigrade. + +- qcom,cl-disable + Usage: optional + Value type: + Definition: A boolean property to disable the battery capacity + learning when charging. + +- qcom,cl-feedback-on + Usage: optional + Value type: + Definition: A boolean property to feedback the learned capacity into + the capacity lerning algorithm. This has to be used only if the + property "qcom,cl-disable" is not specified. + +- qcom,cl-max-start-soc + Usage: optional + Value type: + Definition: Battery SOC has to be below or equal to this value at the + start of a charge cycle to start the capacity learning. + If this is not specified, then the default value used + will be 15. Unit is in percentage. + +- qcom,cl-min-start-soc + Usage: optional + Value type: + Definition: Battery SOC has to be above or equal to this value at the + start of a charge cycle to start the capacity learning. + If this is not specified, then the default value used + will be 10. Unit is in percentage. + +- qcom,cl-min-temp + Usage: optional + Value type: + Definition: Lower limit of battery temperature to start the capacity + learning. If this is not specified, then the default value + used will be 150 (15 C). Unit is in decidegC. + +- qcom,cl-max-temp + Usage: optional + Value type: + Definition: Upper limit of battery temperature to start the capacity + learning. If this is not specified, then the default value + used will be 500 (50 C). Unit is in decidegC. + +- qcom,cl-max-increment + Usage: optional + Value type: + Definition: Maximum capacity increment allowed per capacity learning + cycle. If this is not specified, then the default value + used will be 5 (0.5%). Unit is in decipercentage. + +- qcom,cl-max-decrement + Usage: optional + Value type: + Definition: Maximum capacity decrement allowed per capacity learning + cycle. If this is not specified, then the default value + used will be 100 (10%). Unit is in decipercentage. + +- qcom,cl-min-limit + Usage: optional + Value type: + Definition: Minimum limit that the capacity cannot go below in a + capacity learning cycle. If this is not specified, then + the default value is 0. Unit is in decipercentage. + +- qcom,cl-max-limit + Usage: optional + Value type: + Definition: Maximum limit that the capacity cannot go above in a + capacity learning cycle. If this is not specified, then + the default value is 0. Unit is in decipercentage. + +========================================================== +Second Level Nodes - Peripherals managed by QGAUGE driver +========================================================== +- reg + Usage: required + Value type: + Definition: Addresses and sizes for the specified peripheral + +- interrupts + Usage: optional + Value type: + Definition: Interrupt mapping as per the interrupt encoding + +- interrupt-names + Usage: optional + Value type: + Definition: Interrupt names. This list must match up 1-to-1 with the + interrupts specified in the 'interrupts' property. + +======== +Example +======== + +pmi632_qg: qpnp,qg { + compatible = "qcom,qpnp-qg"; + qcom,pmic-revid = <&pmi632_revid>; + qcom,qg-vadc = <&pmi632_vadc>; + qcom,vbatt-empty-mv = <3200>; + qcom,vbatt-low-mv = <3500>; + qcom,vbatt-cutoff-mv = <3400>; + qcom,qg-iterm-ma = <100>; + + qcom,qgauge@4800 { + status = "okay"; + reg = <0x4800 0x100>; + interrupts = <0x2 0x48 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x48 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x48 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x48 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x48 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x48 0x6 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "qg-batt-missing", + "qg-vbat-low", + "qg-vbat-empty", + "qg-fifo-done", + "qg-good-ocv", + "qg-fsm-state-chg", + "qg-event"; + }; + + qcom,qg-sdam@b000 { + status = "okay"; + reg = <0xb000 0x100>; + }; +}; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt index 4eb09a348aa98a5a90ac1b2926c8a1d842c78529..f50211139a317804ccb1bc608da3555b60b0b023 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt @@ -124,6 +124,27 @@ Charger specific properties: then charge inhibit will be disabled by default. Allowed values are: 50, 100, 200, 300. +- qcom,chg-term-src + Usage: optional + Value type: + Definition: Specify either the ADC or analog comparators to be used in order + to set threshold values for charge termination current. + 0 - Unspecified + 1 - Select ADC comparator + 2 - Select ANALOG comparator + +- qcom,chg-term-current-ma + Usage: optional + Value type: + Definition: When ADC comparator is selected as qcom,chg-term-src, this + parameter should be set to the desired upper threshold. + +- qcom,chg-term-base-current-ma + Usage: optional + Value type: + Definition: When ADC comparator is selected as qcom,chg-term-src, this + parameter should be set to the desired lower threshold. + - qcom,auto-recharge-soc Usage: optional Value type: diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt index 5e104f92b5f59a812525e8ccc9b2bec54319aab0..ff5bb515b4c05beb43f083607ae44662319f2e91 100644 --- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt +++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt @@ -86,9 +86,11 @@ Remote Heap: Required properties: - compatible : Must be "qcom,msm-adsprpc-mem-region" - memory-region : CMA region which is owned by this device +- restrict-access : Blocking vote for hyp_assign_phys function call Example: qcom,adsprpc-mem { compatible = "qcom,msm-adsprpc-mem-region"; memory-region = <&adsp_mem>; + restrict-access; }; diff --git a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt index a9e44a83fe4f232c776cfe654da2cb56c237b544..81746f3d7d871bfee9914c39a68371dc8e0064db 100644 --- a/Documentation/devicetree/bindings/regulator/cpr-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/cpr-regulator.txt @@ -675,6 +675,14 @@ Optional properties: The number of quadruples should be equal to the number of values specified in the qcom,cpr-aging-sensor-id property. This property is required if the qcom,cpr-aging-sensor-id property has been specified. +- qcom,cpr-vdd-mode-map: Array of boolean values which define the mapping + of the VDD operating mode for each APC virtual + corner. A element value 0 indicates the VDD to + be configured to AUTO mode and value 1 indicates + the VDD to be configured to PWM mode for the + corresponding virtual corner. The elements in + the array are ordered from lowest voltage corner + to highest voltage corner. Example: apc_vreg_corner: regulator@f9018000 { status = "okay"; @@ -727,6 +735,7 @@ Example: qcom,cpr-voltage-ceiling = <1050000 1150000 1280000>; qcom,cpr-voltage-floor = <1050000 1050000 1100000>; vdd-apc-supply = <&pm8226_s2>; + qcom,cpr-vdd-mode-map = <0 0 0 0 0 0 1 1 1 1 1 1>; vdd-apc-optional-prim-supply = <&ncp6335d>; vdd-apc-optional-sec-supply = <&fan53555>; vdd-mx-supply = <&pm8226_l3_ao>; diff --git a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt index d982f372a9a8cfe94d142e6d75e2921c1fc30eb6..9716f150bf9798022d868519416549ffb423c8ac 100644 --- a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt +++ b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt @@ -59,16 +59,6 @@ no-map (optional) - empty property of the region as part of its standard mapping of system memory, nor permit speculative access to it under any circumstances other than under the control of the device driver using the region. -no-map-fixup (optional) - empty property - - Indicates the operating system must reserve the memory region and keep - virtual mapping. Upon first allocation the actual allocated region is - removed for any virtual mapping and behaves as "no-map" while the - remaining memory is returned back to the system for normal use. One would - like to use this property where he is not sure about how much region size - must be reserved, so he gives it a max size which then is shrink once - (first) allocation is done. This property is for some specific use cases, - if unsure please don't use it. This property cannot be used together with - "no-map" attribute. reusable (optional) - empty property - The operating system can use the memory in this region with the limitation that the device driver(s) owning the region need to be diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index b31dc21ae3d3736e7d8e89e9c232d1544ebda9b8..f1273d30b081a3176375af5ef0e7494d778c21c8 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -300,6 +300,11 @@ Required properties: - compatible : "qcom,msm-audio-apr" This device is added to represent APR module. + - qcom,subsys-name: This value provides the subsystem name where codec + is present. It can be "apr_modem" or "apr_adsp". This + property enable apr driver to receive subsystem up/down + notification from modem/adsp. + * msm-ocmem-audio Required properties: @@ -650,6 +655,7 @@ Example: audio_apr: qcom,msm-audio-apr { compatible = "qcom,msm-audio-apr"; + qcom,subsys-name = "apr_adsp"; }; qcom,msm-ocmem-audio { diff --git a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt index ef77c0335d9c4b227134690ef685a5c9cb95b58b..95a5977fe66b3b6c1618ed49ddc3e103c49467ee 100644 --- a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt @@ -108,6 +108,8 @@ Optional properties: - qcom,core-voltage-level: This property must be a list of three integer values (no, min, max) where each value represents either a voltage in microvolts or a value corresponding to voltage corner. + - qcom,link-training-reset: This property indicates to start link training + timer to reset the elastic buffer based on rx equalization value. Example: ssphy0: ssphy@f9b38000 { diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 4b6cf4c5e06168023db327035f149b8f5ca584a6..37d8698ca2d656e62f970b342ad84528b914a3c1 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -182,13 +182,15 @@ whint_mode=%s Control which write hints are passed down to block passes down hints with its policy. alloc_mode=%s Adjust block allocation policy, which supports "reuse" and "default". -fsync_mode=%s Control the policy of fsync. Currently supports "posix" - and "strict". In "posix" mode, which is default, fsync - will follow POSIX semantics and does a light operation - to improve the filesystem performance. In "strict" mode, - fsync will be heavy and behaves in line with xfs, ext4 - and btrfs, where xfstest generic/342 will pass, but the - performance will regress. +fsync_mode=%s Control the policy of fsync. Currently supports "posix", + "strict", and "nobarrier". In "posix" mode, which is + default, fsync will follow POSIX semantics and does a + light operation to improve the filesystem performance. + In "strict" mode, fsync will be heavy and behaves in line + with xfs, ext4 and btrfs, where xfstest generic/342 will + pass, but the performance will regress. "nobarrier" is + based on "posix", but doesn't issue flush command for + non-atomic files likewise "nobarrier" mount option. test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt context. The fake fscrypt context is used by xfstests. diff --git a/Documentation/mhi.txt b/Documentation/mhi.txt index 1c501f18f1568ad366d0ef5c2dc6a788e53acfa8..928789936b37523aeab9e96dc95f7126aec77b6f 100644 --- a/Documentation/mhi.txt +++ b/Documentation/mhi.txt @@ -137,6 +137,47 @@ Example Operation for data transfer: 8. Host wakes up and check event ring for completion event 9. Host update the Event[i].ctxt.WP to indicate processed of completion event. +Time sync +--------- +To synchronize two applications between host and external modem, MHI provide +native support to get external modems free running timer value in a fast +reliable method. MHI clients do not need to create client specific methods to +get modem time. + +When client requests modem time, MHI host will automatically capture host time +at that moment so clients are able to do accurate drift adjustment. + +Example: + +Client request time @ time T1 + +Host Time: Tx +Modem Time: Ty + +Client request time @ time T2 +Host Time: Txx +Modem Time: Tyy + +Then drift is: +Tyy - Ty + == Txx - Tx + +Clients are free to implement their own drift algorithms, what MHI host provide +is a way to accurately correlate host time with external modem time. + +To avoid link level latencies, controller must support capabilities to disable +any link level latency. + +During Time capture host will: + 1. Capture host time + 2. Trigger doorbell to capture modem time + +It's important time between Step 2 to Step 1 is deterministic as possible. +Therefore, MHI host will: + 1. Disable any MHI related to low power modes. + 2. Disable preemption + 3. Request bus master to disable any link level latencies. Controller + should disable all low power modes such as L0s, L1, L1ss. + MHI States ---------- diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 9baf66a9ef4e7e57f24388d3da3130da83503195..b60e950d3a6a07509fe6015c9235cb3270bd5bb0 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -30,6 +30,7 @@ Currently, these files are in /proc/sys/vm: - dirty_writeback_centisecs - drop_caches - extfrag_threshold +- extra_free_kbytes - hugepages_treat_as_movable - hugetlb_shm_group - laptop_mode @@ -49,6 +50,7 @@ Currently, these files are in /proc/sys/vm: - nr_trim_pages (only if CONFIG_MMU=n) - numa_zonelist_order - oom_dump_tasks +- reap_mem_on_sigkill - oom_kill_allocating_task - overcommit_kbytes - overcommit_memory @@ -260,6 +262,21 @@ any throttling. ============================================================== +extra_free_kbytes + +This parameter tells the VM to keep extra free memory between the threshold +where background reclaim (kswapd) kicks in, and the threshold where direct +reclaim (by allocating processes) kicks in. + +This is useful for workloads that require low latency memory allocations +and have a bounded burstiness in memory allocations, for example a +realtime application that receives and transmits network traffic +(causing in-kernel memory allocations) with a maximum total message burst +size of 200MB may need 200MB of extra free memory to avoid direct reclaim +related latencies. + +============================================================== + hugepages_treat_as_movable This parameter controls whether we can allocate hugepages from ZONE_MOVABLE @@ -640,6 +657,24 @@ The default value is 1 (enabled). ============================================================== +reap_mem_on_sigkill + +This enables or disables the memory reaping for a SIGKILL received +process and that the sending process must have the CAP_KILL capabilities. + +If this is set to 1, when a process receives SIGKILL from a process +that has the capability, CAP_KILL, the process is added into the oom_reaper +queue which can be picked up by the oom_reaper thread to reap the memory of +that process. This reaps for the process which received SIGKILL through +either sys_kill from user or kill_pid from kernel. + +If this is set to 0, we are not reaping memory of a SIGKILL, sent through +either sys_kill from user or kill_pid from kernel, received process. + +The default value is 0 (disabled). + +============================================================== + oom_kill_allocating_task This enables or disables killing the OOM-triggering task in diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 7b2eb1b7d4cab3f68b2a7569977a5fa8b7a39f23..a3233da7fa88ed94bf73aebceaf2b12a6a1169fc 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -19,6 +19,7 @@ place where this information is gathered. no_new_privs seccomp_filter unshare + spec_ctrl .. only:: subproject and html diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst new file mode 100644 index 0000000000000000000000000000000000000000..32f3d55c54b75e1c6642a3d328a1dc404dfd4dc9 --- /dev/null +++ b/Documentation/userspace-api/spec_ctrl.rst @@ -0,0 +1,94 @@ +=================== +Speculation Control +=================== + +Quite some CPUs have speculation-related misfeatures which are in +fact vulnerabilities causing data leaks in various forms even across +privilege domains. + +The kernel provides mitigation for such vulnerabilities in various +forms. Some of these mitigations are compile-time configurable and some +can be supplied on the kernel command line. + +There is also a class of mitigations which are very expensive, but they can +be restricted to a certain set of processes or tasks in controlled +environments. The mechanism to control these mitigations is via +:manpage:`prctl(2)`. + +There are two prctl options which are related to this: + + * PR_GET_SPECULATION_CTRL + + * PR_SET_SPECULATION_CTRL + +PR_GET_SPECULATION_CTRL +----------------------- + +PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature +which is selected with arg2 of prctl(2). The return value uses bits 0-3 with +the following meaning: + +==== ===================== =================================================== +Bit Define Description +==== ===================== =================================================== +0 PR_SPEC_PRCTL Mitigation can be controlled per task by + PR_SET_SPECULATION_CTRL. +1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is + disabled. +2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is + enabled. +3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A + subsequent prctl(..., PR_SPEC_ENABLE) will fail. +==== ===================== =================================================== + +If all bits are 0 the CPU is not affected by the speculation misfeature. + +If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is +available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation +misfeature will fail. + +PR_SET_SPECULATION_CTRL +----------------------- + +PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which +is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand +in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or +PR_SPEC_FORCE_DISABLE. + +Common error codes +------------------ +======= ================================================================= +Value Meaning +======= ================================================================= +EINVAL The prctl is not implemented by the architecture or unused + prctl(2) arguments are not 0. + +ENODEV arg2 is selecting a not supported speculation misfeature. +======= ================================================================= + +PR_SET_SPECULATION_CTRL error codes +----------------------------------- +======= ================================================================= +Value Meaning +======= ================================================================= +0 Success + +ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor + PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE. + +ENXIO Control of the selected speculation misfeature is not possible. + See PR_GET_SPECULATION_CTRL. + +EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller + tried to enable it again. +======= ================================================================= + +Speculation misfeature controls +------------------------------- +- PR_SPEC_STORE_BYPASS: Speculative Store Bypass + + Invocations: + * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); diff --git a/Makefile b/Makefile index 1980262f3061ddeb1d94e899f39ea537d590acd0..d946b1ed4ed738d45a825b50a79c304aa80e2ff1 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 41 +SUBLEVEL = 48 EXTRAVERSION = NAME = Petit Gorille @@ -769,7 +769,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) endif KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h index 68dfb3cb71454384dd187edfd0dded0fe4b65117..02a7c2fa610635ac3431f995fd625e8c09020635 100644 --- a/arch/alpha/include/asm/xchg.h +++ b/arch/alpha/include/asm/xchg.h @@ -12,6 +12,10 @@ * Atomic exchange. * Since it can be used to implement critical sections * it must clobber "memory" (also for interrupts in UP). + * + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * */ static inline unsigned long @@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " insbl %1,%4,%1\n" @@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val) { unsigned long ret, tmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %4,7,%3\n" " inswl %1,%4,%1\n" @@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%4\n" " bis $31,%3,%1\n" @@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val) { unsigned long dummy; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%4\n" " bis $31,%3,%1\n" @@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size) * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. + * The leading and the trailing memory barriers guarantee that these + * operations are fully ordered. + * + * The trailing memory barrier is placed in SMP unconditionally, in + * order to guarantee that dependency ordering is preserved when a + * dependency is headed by an unsuccessful operation. */ static inline unsigned long @@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " insbl %1,%5,%1\n" @@ -150,8 +161,8 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) { unsigned long prev, tmp, cmp, addr64; + smp_mb(); __asm__ __volatile__( " andnot %5,7,%4\n" " inswl %1,%5,%1\n" @@ -177,8 +189,8 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) " or %1,%2,%2\n" " stq_c %2,0(%4)\n" " beq %2,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -200,8 +213,8 @@ ____cmpxchg(_u32, volatile int *m, int old, int new) " mov %4,%1\n" " stl_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" @@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) { unsigned long prev, cmp; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%5\n" " cmpeq %0,%3,%1\n" @@ -223,8 +237,8 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) " mov %4,%1\n" " stq_c %1,%2\n" " beq %1,3f\n" - __ASM__MB "2:\n" + __ASM__MB ".subsection 2\n" "3: br 1b\n" ".previous" diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c84e67fdea095cbed225edac052f4ce07007abb9..4383313b064a0439a74a8b8184d210d0d9160a12 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -487,7 +487,6 @@ config ARC_CURR_IN_REG config ARC_EMUL_UNALIGNED bool "Emulate unaligned memory access (userspace only)" - default N select SYSCTL_ARCH_UNALIGN_NO_WARN select SYSCTL_ARCH_UNALIGN_ALLOW depends on ISA_ARCOMPACT diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h index ea022d47896cef2761bbb8a147844c506f358621..21ec82466d62c89922566ec3bf32551c2dce003b 100644 --- a/arch/arc/include/asm/bug.h +++ b/arch/arc/include/asm/bug.h @@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address); #define BUG() do { \ pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ - dump_stack(); \ + barrier_before_unreachable(); \ + __builtin_trap(); \ } while (0) #define HAVE_ARCH_BUG diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index f61a52b01625b106143b089570b327b585e5a254..5fe84e481654ebe76a483bf81c2a11ffe870322d 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock); static char smp_cpuinfo_buf[128]; +/* + * Set mask to halt GFRC if any online core in SMP cluster is halted. + * Only works for ARC HS v3.0+, on earlier versions has no effect. + */ +static void mcip_update_gfrc_halt_mask(int cpu) +{ + struct bcr_generic gfrc; + unsigned long flags; + u32 gfrc_halt_mask; + + READ_BCR(ARC_REG_GFRC_BUILD, gfrc); + + /* + * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in + * GFRC 0x3 version. + */ + if (gfrc.ver < 0x3) + return; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + __mcip_cmd(CMD_GFRC_READ_CORE, 0); + gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK); + gfrc_halt_mask |= BIT(cpu); + __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + +static void mcip_update_debug_halt_mask(int cpu) +{ + u32 mcip_mask = 0; + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + /* + * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK + * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK + * and CMD_DEBUG_READ_SELECT. + */ + __mcip_cmd(CMD_DEBUG_READ_SELECT, 0); + mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK); + + mcip_mask |= BIT(cpu); + + __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask); + /* + * Parameter specified halt cause: + * STATUS32[H]/actionpoint/breakpoint/self-halt + * We choose all of them (0xF). + */ + __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + static void mcip_setup_per_cpu(int cpu) { + struct mcip_bcr mp; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + smp_ipi_irq_setup(cpu, IPI_IRQ); smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ); + + /* Update GFRC halt mask as new CPU came online */ + if (mp.gfrc) + mcip_update_gfrc_halt_mask(cpu); + + /* Update MCIP debug mask as new CPU came online */ + if (mp.dbg) + mcip_update_debug_halt_mask(cpu); } static void mcip_ipi_send(int cpu) @@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void) IS_AVAIL1(mp.gfrc, "GFRC")); cpuinfo_arc700[0].extn.gfrc = mp.gfrc; - - if (mp.dbg) { - __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf); - __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); - } } struct plat_smp_ops plat_smp_ops = { diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 6df9d94a953763eca43b20f02f1897308ab1ee7a..115eecc0d9a4a640b972796ef5f3a706d53917a4 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void) { } +static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) +{ + unsigned long dt_root = of_get_flat_dt_root(); + const char *buf; + + buf = of_get_flat_dt_prop(dt_root, name, NULL); + if (!buf) + return -EINVAL; + + if (cpulist_parse(buf, cpumask)) + return -EINVAL; + + return 0; +} + +/* + * Read from DeviceTree and setup cpu possible mask. If there is no + * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist. + */ +static void __init arc_init_cpu_possible(void) +{ + struct cpumask cpumask; + + if (arc_get_cpu_map("possible-cpus", &cpumask)) { + pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n", + NR_CPUS); + + cpumask_setall(&cpumask); + } + + if (!cpumask_test_cpu(0, &cpumask)) + panic("Master cpu (cpu[0]) is missed in cpu possible mask!"); + + init_cpu_possible(&cpumask); +} + /* * Called from setup_arch() before calling setup_processor() * @@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void) */ void __init smp_init_cpus(void) { - unsigned int i; - - for (i = 0; i < NR_CPUS; i++) - set_cpu_possible(i, true); + arc_init_cpu_possible(); if (plat_smp_ops.init_early_smp) plat_smp_ops.init_early_smp(); @@ -70,16 +104,12 @@ void __init smp_init_cpus(void) /* called from init ( ) => process 1 */ void __init smp_prepare_cpus(unsigned int max_cpus) { - int i; - /* * if platform didn't set the present map already, do it now * boot cpu is set to present already by init/main.c */ - if (num_present_cpus() <= 1) { - for (i = 0; i < max_cpus; i++) - set_cpu_present(i, true); - } + if (num_present_cpus() <= 1) + init_cpu_present(cpu_possible_mask); } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index aecb098dc54ed9083f8e504b31e0fe0649bbd434..a4a62a217ed3613edecda83f32ff3d5df17307cb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -241,6 +241,9 @@ config NEED_RET_TO_USER config ARCH_MTD_XIP bool +config ARCH_WANT_KMAP_ATOMIC_FLUSH + bool + config VECTORS_BASE hex default 0xffff0000 if MMU || CPU_HIGH_VECTOR @@ -559,6 +562,7 @@ config ARCH_QCOM select CLKDEV_LOOKUP select GENERIC_CLOCKEVENTS select GENERIC_ALLOCATOR + select ARM_GIC select ARM_PATCH_PHYS_VIRT select ARM_HAS_SG_CHAIN select ARCH_HAS_OPP @@ -568,6 +572,7 @@ config ARCH_QCOM select SPARSE_IRQ select USE_OF select PINCTRL + select ARCH_WANT_KMAP_ATOMIC_FLUSH help Support for Qualcomm MSM/QSD based systems. This runs on the apps processor of the MSM/QSD and depends on a shared memory diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts index 5f29010cdbd8129d57bd2141164f4fdd4e58cc5b..4ef80a703eda38472a5cb8a3b01c35e7068e598e 100644 --- a/arch/arm/boot/dts/at91-tse850-3.dts +++ b/arch/arm/boot/dts/at91-tse850-3.dts @@ -245,7 +245,7 @@ }; eeprom@50 { - compatible = "nxp,24c02", "atmel,24c02"; + compatible = "nxp,se97b", "atmel,24c02"; reg = <0x50>; pagesize = <16>; }; diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi index 61e1580035097017f37b9b8e660e2f3b829f9d4b..168c002f0ca0191c41ee4d1b26dd72c3ab88e48b 100644 --- a/arch/arm/boot/dts/bcm2836.dtsi +++ b/arch/arm/boot/dts/bcm2836.dtsi @@ -9,7 +9,7 @@ <0x40000000 0x40000000 0x00001000>; dma-ranges = <0xc0000000 0x00000000 0x3f000000>; - local_intc: local_intc { + local_intc: local_intc@40000000 { compatible = "brcm,bcm2836-l1-intc"; reg = <0x40000000 0x100>; interrupt-controller; diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index bc1cca5cf43c41b8dad3902ce1a627b625883cb3..d5d058a568c3c34fb65195f13d04707ce4d8ffb9 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi @@ -8,7 +8,7 @@ <0x40000000 0x40000000 0x00001000>; dma-ranges = <0xc0000000 0x00000000 0x3f000000>; - local_intc: local_intc { + local_intc: local_intc@40000000 { compatible = "brcm,bcm2836-l1-intc"; reg = <0x40000000 0x100>; interrupt-controller; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 013431e3d7c3140d3a0645bdf4f130e9a860f984..4745e3c7806bc199f1d2ad218a7eff7d308b0001 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -251,7 +251,7 @@ jtag_gpio4: jtag_gpio4 { brcm,pins = <4 5 6 12 13>; - brcm,function = ; + brcm,function = ; }; jtag_gpio22: jtag_gpio22 { brcm,pins = <22 23 24 25 26 27>; @@ -396,8 +396,8 @@ i2s: i2s@7e203000 { compatible = "brcm,bcm2835-i2s"; - reg = <0x7e203000 0x20>, - <0x7e101098 0x02>; + reg = <0x7e203000 0x24>; + clocks = <&clocks BCM2835_CLOCK_PCM>; dmas = <&dma 2>, <&dma 3>; diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index 6a44b8021702176c63d09e55925ffd3d7e02994e..f0e2008f7490146a22ae06fbc310dbe5cb18c4d5 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -49,7 +49,7 @@ memory { device_type = "memory"; - reg = <0x60000000 0x80000000>; + reg = <0x60000000 0x20000000>; }; gpio-restart { diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts index 41c9132eb550d07dd686a85eda0296bbfad0b6f7..64363f75c01ad507ce40c32bd2e4a756190a608e 100644 --- a/arch/arm/boot/dts/dra71-evm.dts +++ b/arch/arm/boot/dts/dra71-evm.dts @@ -24,13 +24,13 @@ regulator-name = "vddshv8"; regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <3000000>; + regulator-max-microvolt = <3300000>; regulator-boot-on; vin-supply = <&evm_5v0>; gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; states = <1800000 0x0 - 3000000 0x1>; + 3300000 0x1>; }; evm_1v8_sw: fixedregulator-evm_1v8 { diff --git a/arch/arm/boot/dts/imx6dl-icore-rqs.dts b/arch/arm/boot/dts/imx6dl-icore-rqs.dts index cf42c2f5cdc7f9d13409efbd8db6d18d969f2511..1281bc39b7ab87a430b5edaaacf187b82526186d 100644 --- a/arch/arm/boot/dts/imx6dl-icore-rqs.dts +++ b/arch/arm/boot/dts/imx6dl-icore-rqs.dts @@ -42,7 +42,7 @@ /dts-v1/; -#include "imx6q.dtsi" +#include "imx6dl.dtsi" #include "imx6qdl-icore-rqs.dtsi" / { diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts index ae45af1ad062acd7df10c0f2a772b81ccae91c76..3cc1fb9ce44186c61ae0c713e63ad7ffdf677d6e 100644 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts @@ -213,37 +213,37 @@ &iomuxc { pinctrl_enet1: enet1grp { fsl,pins = < - MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3 - MX7D_PAD_SD2_WP__ENET1_MDC 0x3 - MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1 - MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1 - MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1 - MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1 - MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1 - MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1 - MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1 - MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1 - MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1 - MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1 - MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1 - MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1 + MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x30 + MX7D_PAD_SD2_WP__ENET1_MDC 0x30 + MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x11 + MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x11 + MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x11 + MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x11 + MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x11 + MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x11 + MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x11 + MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x11 + MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x11 + MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x11 + MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x11 + MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x11 >; }; pinctrl_enet2: enet2grp { fsl,pins = < - MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1 - MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1 - MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1 - MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1 - MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1 - MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1 - MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1 - MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1 - MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1 - MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1 - MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1 - MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1 + MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x11 + MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x11 + MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x11 + MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x11 + MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x11 + MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x11 + MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x11 + MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x11 + MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x11 + MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x11 + MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x11 + MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x11 >; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 44637cabcc566d402847992607984f9736db5a27..255e64ba32e2c7a163d670a7f5a670dbfbebbdea 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -82,7 +82,7 @@ enable-active-high; }; - reg_usb_otg2_vbus: regulator-usb-otg1-vbus { + reg_usb_otg2_vbus: regulator-usb-otg2-vbus { compatible = "regulator-fixed"; regulator-name = "usb_otg2_vbus"; regulator-min-microvolt = <5000000>; diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index 95da5cb9d37ab8e50fab028bca63782dd422bf7c..b6ebe79261c61adb44fb5f2305e76a617e3942a4 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts @@ -427,7 +427,7 @@ "dclkin.0", "dclkin.1"; ports { - port@1 { + port@0 { endpoint { remote-endpoint = <&adv7511_in>; }; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index 4916c65e0ace7ca0b9ec68cddaca34b55c39d932..5c0a76493d22aedc28422d793519a0cbbc06ab61 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -261,7 +261,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; resets = <&cru SRST_SDIO>; @@ -279,7 +279,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; default-sample-phase = <158>; disable-wp; dmas = <&pdma 12>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 06814421eed2ef9c41fd5db803bc60f91ee85bed..f59f7cc62be690e8750991111ad9fd03d892b5a5 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -600,7 +600,7 @@ interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; pinctrl-names = "default"; pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>; @@ -613,7 +613,7 @@ interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; pinctrl-names = "default"; pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>; @@ -628,7 +628,7 @@ max-frequency = <37500000>; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; bus-width = <8>; default-sample-phase = <158>; fifo-depth = <0x100>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 356ed1e624525224c8e797da0990f83a75823008..f7a951afd28118947249a53df726b4a9245b348f 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -927,6 +927,7 @@ i2s: i2s@ff890000 { compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s"; reg = <0x0 0xff890000 0x0 0x10000>; + #sound-dai-cells = <0>; interrupts = ; #address-cells = <1>; #size-cells = <0>; @@ -1122,6 +1123,7 @@ compatible = "rockchip,rk3288-dw-hdmi"; reg = <0x0 0xff980000 0x0 0x20000>; reg-io-width = <4>; + #sound-dai-cells = <0>; rockchip,grf = <&grf>; interrupts = ; clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 7e24dc8e82d4f0834274f6f01deec5700e4e0ebb..8d9f42a422cbe3c630bc4bca70ebb9a155bcb286 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -827,7 +827,7 @@ timer@fffec600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xfffec600 0x100>; - interrupts = <1 13 0xf04>; + interrupts = <1 13 0xf01>; clocks = <&mpu_periph_clk>; }; diff --git a/arch/arm/configs/qcs405-perf_defconfig b/arch/arm/configs/qcs405-perf_defconfig deleted file mode 100644 index a3ebc47bd0c0dde9e88bd8d490d363e64eac8ddb..0000000000000000000000000000000000000000 --- a/arch/arm/configs/qcs405-perf_defconfig +++ /dev/null @@ -1,419 +0,0 @@ -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_QCS405=y -# CONFIG_VDSO is not set -CONFIG_ARM_PSCI=y -CONFIG_PREEMPT=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_VFP=y -CONFIG_NEON=y -CONFIG_KERNEL_MODE_NEON=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_BT=y -CONFIG_BT_RFCOMM=y -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=y -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=y -CONFIG_CFG80211=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_DMA_CMA=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_UID_SYS_STATS=y -CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_KS8851=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_USB_NET_SMSC75XX=y -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_EVBUG=m -CONFIG_INPUT_KEYRESET=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_XPAD=y -CONFIG_INPUT_TABLET=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ATMEL_MXT=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_KEYCHORD=y -CONFIG_INPUT_UINPUT=y -CONFIG_INPUT_GPIO=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_I2C_CHARDEV=y -CONFIG_SPI=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_PINCTRL_QCS405=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_GPIOLIB=y -CONFIG_THERMAL=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_CPR=y -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_RPM_SMD=y -CONFIG_REGULATOR_SPM=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SOC_CAMERA=y -CONFIG_SOC_CAMERA_PLATFORM=y -CONFIG_FB=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_SOC=y -CONFIG_HIDRAW=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_STORAGE_DATAFAB=y -CONFIG_USB_STORAGE_FREECOM=y -CONFIG_USB_STORAGE_ISD200=y -CONFIG_USB_STORAGE_USBAT=y -CONFIG_USB_STORAGE_SDDR09=y -CONFIG_USB_STORAGE_SDDR55=y -CONFIG_USB_STORAGE_JUMPSHOT=y -CONFIG_USB_STORAGE_ALAUDA=y -CONFIG_USB_STORAGE_KARMA=y -CONFIG_USB_STORAGE_CYPRESS_ATACB=y -CONFIG_USB_DWC3=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_COMMON_CLK_QCOM=y -CONFIG_QCOM_CLK_SMD_RPM=y -CONFIG_MDM_GCC_QCS405=y -CONFIG_MDM_DEBUGCC_QCS405=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_MAILBOX=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_RPM=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_SMD=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_MSM_SPM=y -CONFIG_MSM_L2_SPM=y -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_IIO=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_PAGE_OWNER=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_FAULT_INJECTION_DEBUG_FS=y -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_IPC_LOGGING=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_LSM_MMAP_MIN_ADDR=4096 -CONFIG_HARDENED_USERCOPY=y -CONFIG_SECURITY_SELINUX=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm/configs/qcs405_defconfig b/arch/arm/configs/qcs405_defconfig index f756b2b493d147f510a12caa7f1ec7590f2cc193..b6ee98ca78ef9be69f8e98c3e72485fb46704f02 100644 --- a/arch/arm/configs/qcs405_defconfig +++ b/arch/arm/configs/qcs405_defconfig @@ -42,7 +42,6 @@ CONFIG_ZSMALLOC=y CONFIG_SECCOMP=y CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y CONFIG_VFP=y CONFIG_NEON=y CONFIG_KERNEL_MODE_NEON=y @@ -196,6 +195,12 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y +CONFIG_MTD_NAND=y +CONFIG_MTD_UBI=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y @@ -266,6 +271,7 @@ CONFIG_PINCTRL_QCS405=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_GPIOLIB=y CONFIG_POWER_SUPPLY=y +CONFIG_SMB1351_USB_CHARGER=y CONFIG_THERMAL=y CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_THERMAL_GOV_LOW_LIMITS=y @@ -294,6 +300,11 @@ CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_SOC_CAMERA=y CONFIG_SOC_CAMERA_PLATFORM=y CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_SOUND=y CONFIG_SND=y @@ -366,8 +377,11 @@ CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y CONFIG_QCOM_APCS_IPC=y +CONFIG_ARM_SMMU=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y CONFIG_QCOM_IOMMU=y CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_QCOM_GLINK_RPM=y @@ -380,16 +394,24 @@ CONFIG_MSM_SPM=y CONFIG_MSM_L2_SPM=y CONFIG_QCOM_SCM=y CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y # CONFIG_MSM_JTAGV8 is not set CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y +CONFIG_QTI_MPM=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT2_FS=y @@ -405,6 +427,8 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y @@ -448,6 +472,7 @@ CONFIG_CORESIGHT_CTI=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc8d4bbd82e27719a990c7972fd77bfca9dc7aef..9342904cccca67ac3cfba4e0b75bcbc17893ade0 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -536,4 +536,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm +#ifdef CONFIG_KPROBES +#define _ASM_NOKPROBE(entry) \ + .pushsection "_kprobe_blacklist", "aw" ; \ + .balign 4 ; \ + .long entry; \ + .popsection +#else +#define _ASM_NOKPROBE(entry) +#endif + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index eb46fc81a440c3384ff55efe1ab26c43cd6c86db..08cd720eae0110e354d7055b8a8841ffc81a7075 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -221,6 +221,22 @@ static inline unsigned int kvm_get_vmid_bits(void) return 8; } +/* + * We are not in the kvm->srcu critical section most of the time, so we take + * the SRCU read lock here. Since we copy the data from the user page, we + * can immediately drop the lock again. + */ +static inline int kvm_read_guest_lock(struct kvm *kvm, + gpa_t gpa, void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_read_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + static inline void *kvm_get_hyp_vector(void) { return kvm_ksym_ref(__kvm_hyp_vector); diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h index 9c99e817535ecd4cfa6013e8dffba974e94a2bae..5b85889f82eeb422400c039e8b5e8a2a6d4553b3 100644 --- a/arch/arm/include/asm/vdso.h +++ b/arch/arm/include/asm/vdso.h @@ -12,8 +12,6 @@ struct mm_struct; void arm_install_vdso(struct mm_struct *mm, unsigned long addr); -extern char vdso_start, vdso_end; - extern unsigned int vdso_total_pages; #else /* CONFIG_VDSO */ diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 29286fbc211abdf4c37f6785b22f29128c41dad8..a319a7877c60f2df6447e26c4c2e9fc4d23f991e 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -112,6 +112,11 @@ int psci_cpu_kill(unsigned int cpu) return 0; } +bool psci_cpu_can_disable(unsigned int cpu) +{ + return true; +} + #endif bool __init psci_smp_available(void) @@ -126,5 +131,6 @@ const struct smp_operations psci_smp_ops __initconst = { .cpu_disable = psci_cpu_disable, .cpu_die = psci_cpu_die, .cpu_kill = psci_cpu_kill, + .cpu_can_disable = psci_cpu_can_disable, #endif }; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 01cf563c9780bac7484041c7524e70065fd623f6..4efa719b90bb703c854c1dd7c38d21db829d2d36 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -418,7 +419,8 @@ void unregister_undef_hook(struct undef_hook *hook) raw_spin_unlock_irqrestore(&undef_lock, flags); } -static int call_undef_hook(struct pt_regs *regs, unsigned int instr) +static nokprobe_inline +int call_undef_hook(struct pt_regs *regs, unsigned int instr) { struct undef_hook *hook; unsigned long flags; @@ -491,6 +493,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); } +NOKPROBE_SYMBOL(do_undefinstr) /* * Handle FIQ similarly to NMI on x86 systems. diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index a4d6dc0f24273e15d18b53b23dbf1481f6b7cfc7..f4dd7f9663c10a704d3858de6beb538e5023cf78 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -39,6 +39,8 @@ static struct page **vdso_text_pagelist; +extern char vdso_start[], vdso_end[]; + /* Total number of pages needed for the data and text portions of the VDSO. */ unsigned int vdso_total_pages __ro_after_init; @@ -197,13 +199,13 @@ static int __init vdso_init(void) unsigned int text_pages; int i; - if (memcmp(&vdso_start, "\177ELF", 4)) { + if (memcmp(vdso_start, "\177ELF", 4)) { pr_err("VDSO is not a valid ELF object!\n"); return -ENOEXEC; } - text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; - pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start); + text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; + pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start); /* Allocate the VDSO text pagelist */ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), @@ -218,7 +220,7 @@ static int __init vdso_init(void) for (i = 0; i < text_pages; i++) { struct page *page; - page = virt_to_page(&vdso_start + i * PAGE_SIZE); + page = virt_to_page(vdso_start + i * PAGE_SIZE); vdso_text_pagelist[i] = page; } @@ -229,7 +231,7 @@ static int __init vdso_init(void) cntvct_ok = cntvct_functional(); - patch_vdso(&vdso_start); + patch_vdso(vdso_start); return 0; } diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index df73914e81c8344feccac5df8d5791dcbe92ed60..746e7801dcdf70fed9e339c2d6800b3f275c49b7 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -38,6 +38,7 @@ ENTRY(__get_user_1) mov r0, #0 ret lr ENDPROC(__get_user_1) +_ASM_NOKPROBE(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad @@ -58,6 +59,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_2) +_ASM_NOKPROBE(__get_user_2) ENTRY(__get_user_4) check_uaccess r0, 4, r1, r2, __get_user_bad @@ -65,6 +67,7 @@ ENTRY(__get_user_4) mov r0, #0 ret lr ENDPROC(__get_user_4) +_ASM_NOKPROBE(__get_user_4) ENTRY(__get_user_8) check_uaccess r0, 8, r1, r2, __get_user_bad8 @@ -78,6 +81,7 @@ ENTRY(__get_user_8) mov r0, #0 ret lr ENDPROC(__get_user_8) +_ASM_NOKPROBE(__get_user_8) #ifdef __ARMEB__ ENTRY(__get_user_32t_8) @@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8) mov r0, #0 ret lr ENDPROC(__get_user_32t_8) +_ASM_NOKPROBE(__get_user_32t_8) ENTRY(__get_user_64t_1) check_uaccess r0, 1, r1, r2, __get_user_bad8 @@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1) mov r0, #0 ret lr ENDPROC(__get_user_64t_1) +_ASM_NOKPROBE(__get_user_64t_1) ENTRY(__get_user_64t_2) check_uaccess r0, 2, r1, r2, __get_user_bad8 @@ -114,6 +120,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_64t_2) +_ASM_NOKPROBE(__get_user_64t_2) ENTRY(__get_user_64t_4) check_uaccess r0, 4, r1, r2, __get_user_bad8 @@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4) mov r0, #0 ret lr ENDPROC(__get_user_64t_4) +_ASM_NOKPROBE(__get_user_64t_4) #endif __get_user_bad8: @@ -131,6 +139,8 @@ __get_user_bad: ret lr ENDPROC(__get_user_bad) ENDPROC(__get_user_bad8) +_ASM_NOKPROBE(__get_user_bad) +_ASM_NOKPROBE(__get_user_bad8) .pushsection __ex_table, "a" .long 1b, __get_user_bad diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index a3e78074be701a3a6abec03ff7e5586f79acfcbe..62eb7d6688900f244f546bd88da951e4ef934ac0 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ - GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c index 43e3e188f521341884d0a6e280d5724ef606e13f..fa512413a47172212483ebec6811bc5547aa729b 100644 --- a/arch/arm/mach-omap1/clock.c +++ b/arch/arm/mach-omap1/clock.c @@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c) return -ENOMEM; c->dent = d; - d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); + d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount); if (!d) { err = -ENOMEM; goto err_out; } - d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); + d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate); if (!d) { err = -ENOMEM; goto err_out; } - d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); + d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags); if (!d) { err = -ENOMEM; goto err_out; diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 4bb6751864a50e046e74c0952ad75571e1d979d0..fc5fb776a7101234bd64da673815d10a0b75f0f2 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -299,8 +299,6 @@ static void irq_save_context(void) if (soc_is_dra7xx()) return; - if (!sar_base) - sar_base = omap4_get_sar_ram_base(); if (wakeupgen_ops && wakeupgen_ops->save_context) wakeupgen_ops->save_context(); } @@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node, irq_hotplug_init(); irq_pm_init(); + sar_base = omap4_get_sar_ram_base(); + return 0; } IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init); diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 366158a54fcd8beae9ff50d712e1b5c63f87d456..6f68576e56956a635acae35af565d09bb2ff2d0f 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -186,7 +186,7 @@ static void omap_pm_end(void) cpu_idle_poll_ctrl(false); } -static void omap_pm_finish(void) +static void omap_pm_wake(void) { if (soc_is_omap34xx()) omap_prcm_irq_complete(); @@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = { .begin = omap_pm_begin, .end = omap_pm_end, .enter = omap_pm_enter, - .finish = omap_pm_finish, + .wake = omap_pm_wake, .valid = suspend_valid_only_mem, }; diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index ece09c9461f78d9b3908095615a688522b69e9b3..d61fbd7a2840a4980205c16d1c675a957c6292c8 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = { .tick_resume = omap2_gp_timer_shutdown, }; -static struct property device_disabled = { - .name = "status", - .length = sizeof("disabled"), - .value = "disabled", -}; - static const struct of_device_id omap_timer_match[] __initconst = { { .compatible = "ti,omap2420-timer", }, { .compatible = "ti,omap3430-timer", }, @@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id * of_get_property(np, "ti,timer-secure", NULL))) continue; - if (!of_device_is_compatible(np, "ti,omap-counter32k")) - of_add_property(np, &device_disabled); + if (!of_device_is_compatible(np, "ti,omap-counter32k")) { + struct property *prop; + + prop = kzalloc(sizeof(*prop), GFP_KERNEL); + if (!prop) + return NULL; + prop->name = "status"; + prop->value = "disabled"; + prop->length = strlen(prop->value); + of_add_property(np, prop); + } return np; } diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig index 2a7bb6ccdcb7eb219f515c6e0f1ba2bfe573a349..a810f4dd34b1e266a001f20a920f421ba7dcf3e8 100644 --- a/arch/arm/mach-orion5x/Kconfig +++ b/arch/arm/mach-orion5x/Kconfig @@ -58,7 +58,6 @@ config MACH_KUROBOX_PRO config MACH_DNS323 bool "D-Link DNS-323" - select GENERIC_NET_UTILS select I2C_BOARDINFO if I2C help Say 'Y' here if you want your kernel to support the @@ -66,7 +65,6 @@ config MACH_DNS323 config MACH_TS209 bool "QNAP TS-109/TS-209" - select GENERIC_NET_UTILS help Say 'Y' here if you want your kernel to support the QNAP TS-109/TS-209 platform. @@ -101,7 +99,6 @@ config MACH_LINKSTATION_LS_HGL config MACH_TS409 bool "QNAP TS-409" - select GENERIC_NET_UTILS help Say 'Y' here if you want your kernel to support the QNAP TS-409 platform. diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index cd483bfb5ca82cd3d6289a47e6cc0a56f7787e1f..d13344b2ddcd4ef0ad6c8b8554733a0fc6063610 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -173,10 +173,42 @@ static struct mv643xx_eth_platform_data dns323_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; +/* dns323_parse_hex_*() taken from tsx09-common.c; should a common copy of these + * functions be kept somewhere? + */ +static int __init dns323_parse_hex_nibble(char n) +{ + if (n >= '0' && n <= '9') + return n - '0'; + + if (n >= 'A' && n <= 'F') + return n - 'A' + 10; + + if (n >= 'a' && n <= 'f') + return n - 'a' + 10; + + return -1; +} + +static int __init dns323_parse_hex_byte(const char *b) +{ + int hi; + int lo; + + hi = dns323_parse_hex_nibble(b[0]); + lo = dns323_parse_hex_nibble(b[1]); + + if (hi < 0 || lo < 0) + return -1; + + return (hi << 4) | lo; +} + static int __init dns323_read_mac_addr(void) { u_int8_t addr[6]; - void __iomem *mac_page; + int i; + char *mac_page; /* MAC address is stored as a regular ol' string in /dev/mtdblock4 * (0x007d0000-0x00800000) starting at offset 196480 (0x2ff80). @@ -185,8 +217,23 @@ static int __init dns323_read_mac_addr(void) if (!mac_page) return -ENOMEM; - if (!mac_pton((__force const char *) mac_page, addr)) - goto error_fail; + /* Sanity check the string we're looking at */ + for (i = 0; i < 5; i++) { + if (*(mac_page + (i * 3) + 2) != ':') { + goto error_fail; + } + } + + for (i = 0; i < 6; i++) { + int byte; + + byte = dns323_parse_hex_byte(mac_page + (i * 3)); + if (byte < 0) { + goto error_fail; + } + + addr[i] = byte; + } iounmap(mac_page); printk("DNS-323: Found ethernet MAC address: %pM\n", addr); diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c index 89774985d3803fbc8c84a7eb993a7d3e18bf0d75..905d4f2dd0b827938862f1a089e18651eea2757f 100644 --- a/arch/arm/mach-orion5x/tsx09-common.c +++ b/arch/arm/mach-orion5x/tsx09-common.c @@ -53,12 +53,53 @@ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; +static int __init qnap_tsx09_parse_hex_nibble(char n) +{ + if (n >= '0' && n <= '9') + return n - '0'; + + if (n >= 'A' && n <= 'F') + return n - 'A' + 10; + + if (n >= 'a' && n <= 'f') + return n - 'a' + 10; + + return -1; +} + +static int __init qnap_tsx09_parse_hex_byte(const char *b) +{ + int hi; + int lo; + + hi = qnap_tsx09_parse_hex_nibble(b[0]); + lo = qnap_tsx09_parse_hex_nibble(b[1]); + + if (hi < 0 || lo < 0) + return -1; + + return (hi << 4) | lo; +} + static int __init qnap_tsx09_check_mac_addr(const char *addr_str) { u_int8_t addr[6]; + int i; - if (!mac_pton(addr_str, addr)) - return -1; + for (i = 0; i < 6; i++) { + int byte; + + /* + * Enforce "xx:xx:xx:xx:xx:xx\n" format. + */ + if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) + return -1; + + byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); + if (byte < 0) + return -1; + addr[i] = byte; + } printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr); @@ -77,12 +118,12 @@ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) unsigned long addr; for (addr = mem_base; addr < (mem_base + size); addr += 1024) { - void __iomem *nor_page; + char *nor_page; int ret = 0; nor_page = ioremap(addr, 1024); if (nor_page != NULL) { - ret = qnap_tsx09_check_mac_addr((__force const char *)nor_page); + ret = qnap_tsx09_check_mac_addr(nor_page); iounmap(nor_page); } diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig index 2fec9f2fef2761468b4ec05a9b68d6ca79c831a6..669123485dc3ab44520410bff51686226768f57d 100644 --- a/arch/arm/mach-qcom/Kconfig +++ b/arch/arm/mach-qcom/Kconfig @@ -78,5 +78,18 @@ config ARCH_MDM9615 select CLKSRC_OF select COMMON_CLK +config ARCH_SDXPRAIRIE + bool "Enable support for SDXPRAIRIE" + select CPU_V7 + select HAVE_ARM_ARCH_TIMER + select MSM_CORTEX_A7 + select PINCTRL + select PCI + select QCOM_SCM if SMP + select MSM_JTAG_MM if CORESIGHT_ETM + select PM_DEVFREQ + select COMMON_CLK + select COMMON_CLK_QCOM + select QCOM_GDSC endmenu endif diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile index caacb5f27b3000dfa85e13ea9cde174e26b9d315..23af561084b8abc58369834d3f7dbd914ff0042d 100644 --- a/arch/arm/mach-qcom/Makefile +++ b/arch/arm/mach-qcom/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_USE_OF) += board-dt.o obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_ARCH_QCS405) += board-qcs405.o +obj-$(CONFIG_ARCH_SDXPRAIRIE) += board-sdxprairie.o diff --git a/arch/arm/mach-qcom/board-sdxprairie.c b/arch/arm/mach-qcom/board-sdxprairie.c new file mode 100644 index 0000000000000000000000000000000000000000..4adac212953b7af96434ef66e7f5c17ceb02e971 --- /dev/null +++ b/arch/arm/mach-qcom/board-sdxprairie.c @@ -0,0 +1,32 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "board-dt.h" +#include +#include + +static const char *sdxprairie_dt_match[] __initconst = { + "qcom,sdxprairie", + NULL +}; + +static void __init sdxprairie_init(void) +{ + board_dt_populate(NULL); +} + +DT_MACHINE_START(SDXPRAIRIE_DT, + "Qualcomm Technologies, Inc. SDXPRAIRIE (Flattened Device Tree)") + .init_machine = sdxprairie_init, + .dt_compat = sdxprairie_dt_match, +MACHINE_END diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0b499a7773ca801717e80edf9cfe2ac1cbae8222..600e4ac82485a2ad9552f75520d399c7b2f4093f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -112,6 +112,21 @@ static void __dma_page_cpu_to_dev(struct page *, unsigned long, static void __dma_page_dev_to_cpu(struct page *, unsigned long, size_t, enum dma_data_direction); +static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller); + +static void __dma_free_remap(void *cpu_addr, size_t size, bool warn); + +static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot); + +static void *arm_dma_remap(struct device *dev, void *cpu_addr, + dma_addr_t handle, size_t size, + unsigned long attrs); + +static void arm_dma_unremap(struct device *dev, void *remapped_addr, + size_t size); + /** * arm_dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -200,6 +215,8 @@ const struct dma_map_ops arm_dma_ops = { .sync_sg_for_device = arm_dma_sync_sg_for_device, .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, + .remap = arm_dma_remap, + .unremap = arm_dma_unremap, }; EXPORT_SYMBOL(arm_dma_ops); @@ -375,10 +392,10 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, prot, caller); } -static void __dma_free_remap(void *cpu_addr, size_t size) +static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn) { dma_common_free_remap(cpu_addr, size, - VM_ARM_DMA_CONSISTENT | VM_USERMAP, false); + VM_ARM_DMA_CONSISTENT | VM_USERMAP, no_warn); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -541,21 +558,39 @@ static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, return 0; } -static void __dma_remap(struct page *page, size_t size, pgprot_t prot) +static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, + void *data) +{ + pte_clear(&init_mm, addr, pte); + return 0; +} + +static void __dma_remap(struct page *page, size_t size, pgprot_t prot, + bool want_vaddr) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; + int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); + + if (!want_vaddr) + func = __dma_clear_pte; + else + func = __dma_update_pte; - apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); + apply_to_page_range(&init_mm, start, size, func, &prot); + mb(); /*Ensure pte's are updated */ flush_tlb_kernel_range(start, end); } + +#define NO_KERNEL_MAPPING_DUMMY 0x2222 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, const void *caller, bool want_vaddr) { struct page *page; - void *ptr = NULL; + void *ptr = (void *)NO_KERNEL_MAPPING_DUMMY; /* * __alloc_remap_buffer is only called when the device is * non-coherent @@ -629,21 +664,31 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, __dma_clear_buffer(page, size, coherent_flag); - if (!want_vaddr) - goto out; - if (PageHighMem(page)) { - ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); - if (!ptr) { - dma_release_from_contiguous(dev, page, count); - return NULL; + if (!want_vaddr) { + /* + * Something non-NULL needs to be returned here. Give + * back a dummy address that is unmapped to catch + * clients trying to use the address incorrectly + */ + ptr = (void *)NO_KERNEL_MAPPING_DUMMY; + + /* also flush out the stale highmem mappings */ + kmap_flush_unused(); + kmap_atomic_flush_unused(); + } else { + ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, + caller); + if (!ptr) { + dma_release_from_contiguous(dev, page, count); + return NULL; + } } } else { - __dma_remap(page, size, prot); + __dma_remap(page, size, prot, want_vaddr); ptr = page_address(page); } - out: *ret_page = page; return ptr; } @@ -651,12 +696,10 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, static void __free_from_contiguous(struct device *dev, struct page *page, void *cpu_addr, size_t size, bool want_vaddr) { - if (want_vaddr) { - if (PageHighMem(page)) - __dma_free_remap(cpu_addr, size); - else - __dma_remap(page, size, PAGE_KERNEL); - } + if (PageHighMem(page)) + __dma_free_remap(cpu_addr, size, true); + else + __dma_remap(page, size, PAGE_KERNEL, true); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } @@ -750,7 +793,7 @@ static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, static void remap_allocator_free(struct arm_dma_free_args *args) { if (args->want_vaddr) - __dma_free_remap(args->cpu_addr, args->size); + __dma_free_remap(args->cpu_addr, args->size, false); __dma_free_buffer(args->page, args->size); } @@ -837,7 +880,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, kfree(buf); } - return args.want_vaddr ? addr : page; + return addr; } /* @@ -883,6 +926,41 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } +static void *arm_dma_remap(struct device *dev, void *cpu_addr, + dma_addr_t handle, size_t size, + unsigned long attrs) +{ + void *ptr; + struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); + unsigned long offset = handle & ~PAGE_MASK; + + size = PAGE_ALIGN(size + offset); + ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, + __builtin_return_address(0)); + return ptr ? ptr + offset : ptr; +} + +static void arm_dma_unremap(struct device *dev, void *remapped_addr, + size_t size) +{ + unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; + struct vm_struct *area; + + size = PAGE_ALIGN(size); + remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK); + + area = find_vm_area(remapped_addr); + if (!area || (area->flags & flags) != flags) { + WARN(1, "trying to free invalid coherent area: %p\n", + remapped_addr); + return; + } + + vunmap(remapped_addr); + flush_tlb_kernel_range((unsigned long)remapped_addr, + (unsigned long)(remapped_addr + size)); +} /* * Create userspace mapping for the DMA-coherent memory. */ @@ -917,9 +995,10 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, .page = page, .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), }; + void *addr = (args.want_vaddr) ? cpu_addr : page; - buf = arm_dma_buffer_find(cpu_addr); - if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) + buf = arm_dma_buffer_find(addr); + if (WARN(!buf, "Freeing invalid buffer %pK\n", addr)) return; buf->allocator->free(&args); @@ -1320,8 +1399,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, int coherent_flag) { struct page **pages; - int count = size >> PAGE_SHIFT; - int array_size = count * sizeof(struct page *); + size_t count = size >> PAGE_SHIFT; + size_t array_size = count * sizeof(struct page *); int i = 0; int order_idx = 0; @@ -1981,7 +2060,11 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t dma_addr; - int ret, prot, len = PAGE_ALIGN(size + offset); + int ret, prot, len, start_offset, map_offset; + + map_offset = offset & ~PAGE_MASK; + start_offset = offset & PAGE_MASK; + len = PAGE_ALIGN(map_offset + size); dma_addr = __alloc_iova(mapping, len); if (dma_addr == ARM_MAPPING_ERROR) @@ -1989,11 +2072,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p prot = __dma_info_to_prot(dir, attrs); - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); + ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) + + start_offset, len, prot); if (ret < 0) goto fail; - return dma_addr + offset; + return dma_addr + map_offset; fail: __free_iova(mapping, dma_addr, len); return ARM_MAPPING_ERROR; @@ -2386,22 +2470,34 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu) { struct dma_iommu_mapping *mapping; + struct iommu_domain *domain; if (!iommu) return false; - mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); - if (IS_ERR(mapping)) { - pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", - size, dev_name(dev)); + /* + * Adding this to not to attach to smmu device by default as any way + * clients call the arm_iommu_create_mapping() in their use cases. + */ + domain = iommu_get_domain_for_dev(dev); + + if (!domain) return false; - } - if (__arm_iommu_attach_device(dev, mapping)) { - pr_warn("Failed to attached device %s to IOMMU_mapping\n", + if (domain->type == IOMMU_DOMAIN_DMA) { + mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); + if (IS_ERR(mapping)) { + pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", + size, dev_name(dev)); + return false; + } + + if (__arm_iommu_attach_device(dev, mapping)) { + pr_warn("Failed to attached device %s to IOMMU_mapping\n", dev_name(dev)); - arm_iommu_release_mapping(mapping); - return false; + arm_iommu_release_mapping(mapping); + return false; + } } return true; diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index d02f8187b1ccedb0df0520d2d75e6d93c838b360..5d73327f849175e9a6842f28ce6b0d9071860e9c 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -147,3 +148,58 @@ void *kmap_atomic_pfn(unsigned long pfn) return (void *)vaddr; } + +#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH +static void kmap_remove_unused_cpu(int cpu) +{ + int start_idx, idx, type; + + pagefault_disable(); + type = kmap_atomic_idx(); + start_idx = type + 1 + KM_TYPE_NR * cpu; + + for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) { + unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + pte_t ptep; + + ptep = get_top_pte(vaddr); + if (ptep) + set_top_pte(vaddr, __pte(0)); + } + pagefault_enable(); +} + +static void kmap_remove_unused(void *unused) +{ + kmap_remove_unused_cpu(smp_processor_id()); +} + +void kmap_atomic_flush_unused(void) +{ + on_each_cpu(kmap_remove_unused, NULL, 1); +} + +static int hotplug_kmap_atomic_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch (action & (~CPU_TASKS_FROZEN)) { + case CPU_DYING: + kmap_remove_unused_cpu((int)hcpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block hotplug_kmap_atomic_notifier = { + .notifier_call = hotplug_kmap_atomic_callback, +}; + +static int __init init_kmap_atomic(void) +{ + return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier); +} +early_initcall(init_kmap_atomic); +#endif diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 7a327bd32521c91699e9a923f96b257d4a2cba35..ebef8aacea83ac097914fbe7ecab7aa587644ce6 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -890,11 +890,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->irq = irq->start; timer->pdev = pdev; - /* Skip pm_runtime_enable for OMAP1 */ - if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) { - pm_runtime_enable(dev); - pm_runtime_irq_safe(dev); - } + pm_runtime_enable(dev); + pm_runtime_irq_safe(dev); if (!timer->reserved) { ret = pm_runtime_get_sync(dev); diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index bcdecc25461bcaa51f6df405807935bb481de2c5..b2aa9b32bff2b5e9d2e6d102a4cd58f6cf8c5676 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { unsigned long flags; struct kprobe *p = &op->kp; - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + struct kprobe_ctlblk *kcb; /* Save skipped registers */ regs->ARM_pc = (unsigned long)op->kp.addr; regs->ARM_ORIG_r0 = ~0UL; local_irq_save(flags); + kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); @@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) local_irq_restore(flags); } +NOKPROBE_SYMBOL(optimized_callback) int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) { diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 50decc5850d8c1782864ef83e4828ae890ed7f90..c05aa8ad7bdf8c22d949a95970f5c5e0a560bd05 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -22,7 +22,24 @@ config ARM64 select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA + select ARCH_INLINE_READ_LOCK if !PREEMPT + select ARCH_INLINE_READ_LOCK_BH if !PREEMPT + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT + select ARCH_INLINE_READ_UNLOCK if !PREEMPT + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_WRITE_LOCK if !PREEMPT + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_LTO_CLANG select ARCH_SUPPORTS_ATOMIC_RMW @@ -228,6 +245,18 @@ config NEED_SG_DMA_LENGTH config SMP def_bool y +config HOTPLUG_SIZE_BITS + int "Memory hotplug block size(29 => 512MB 30 => 1GB)" + depends on SPARSEMEM + depends on MEMORY_HOTPLUG + depends on QCOM_MEM_OFFLINE + default 30 + help + Selects granularity of hotplug memory. Block + size for memory hotplug is represent as a power + of 2. + If unsure, stick with default value. + config SWIOTLB def_bool y diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index b5f07163814826c11c389a079469ddff8bbe1526..1fd95d45199f5d1ab8c60fe2b90e49b6fc9b546b 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -177,6 +177,14 @@ config ARCH_QCS405 If you do not wish to build a kernel that runs on this chipset, say 'N' here. +config ARCH_SDMMAGPIE + bool "Enable Support for Qualcomm Technologies, Inc. SDMMAGPIE" + depends on ARCH_QCOM + select COMMON_CLK_QCOM + help + This enables support for the SDMMAGPIE chipset. If you do not + wish to build a kernel that runs on this chipset, say 'N' here. + config ARCH_REALTEK bool "Realtek Platforms" help diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi index 4220fbdcb24a7f18c5e3ab66574ba22c8c92c873..ff5c4c47b22bfecfa36f0090dc8be5c85b171271 100644 --- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi @@ -98,7 +98,7 @@ clock-output-names = "clk125mhz"; }; - pci { + pcie@30000000 { compatible = "pci-host-ecam-generic"; device_type = "pci"; #interrupt-cells = <1>; @@ -118,6 +118,7 @@ ranges = <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>; + bus-range = <0 0xff>; interrupt-map-mask = <0 0 0 7>; interrupt-map = /* addr pin ic icaddr icintr */ diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 2e7e6a670dbb7bf9fdcb33793e3fc866088cee3f..b27f1d9ab513827649d3c2bfa41d2d4dd43b0a19 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -21,7 +21,9 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sm8150p-mtp-overlay.dtbo \ sm8150p-qrd-overlay.dtbo \ sm8150-sdx50m-cdp-overlay.dtbo \ - sm8150-sdx50m-mtp-overlay.dtbo + sm8150-sdx50m-mtp-overlay.dtbo \ + sm8150-sdx50m-mtp-2.5k-panel-overlay.dtbo \ + sm8150-sdx50m-qrd-overlay.dtbo sm8150-cdp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-mtp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb @@ -30,6 +32,8 @@ sm8150-qrd-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-auto-adp-star-overlay.dtbo-base := sm8150-auto.dtb sm8150-sdx50m-cdp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-sdx50m-mtp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb +sm8150-sdx50m-mtp-2.5k-panel-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb +sm8150-sdx50m-qrd-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150p-mtp-overlay.dtbo-base := sm8150p.dtb sm8150p-v2.dtb sm8150p-qrd-overlay.dtbo-base := sm8150p.dtb sm8150p-v2.dtb else @@ -84,6 +88,22 @@ dtb-$(CONFIG_ARCH_SM6150) += sm6150-rumi.dtb \ sm6150-idp.dtb endif +ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) + dtbo-$(CONFIG_ARCH_SDMMAGPIE) += \ + sdmmagpie-idp-overlay.dtbo \ + sdmmagpie-rumi-overlay.dtbo \ + sdmmagpie-qrd-overlay.dtbo \ + +sdmmagpie-idp-overlay.dtbo-base := sdmmagpie.dtb +sdmmagpie-rumi-overlay.dtbo-base := sdmmagpie.dtb +sdmmagpie-qrd-overlay.dtbo-base := sdmmagpie.dtb +else +dtb-$(CONFIG_ARCH_SDMMAGPIE) += sdmmagpie-rumi.dtb \ + sdmmagpie-idp.dtb \ + sdmmagpie-qrd.dtb +endif + + ifeq ($(CONFIG_ARM64),y) always := $(dtb-y) subdir-y := $(dts-dirs) diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi index 870e2b3d80720b50e07fd4dbc83aec9b75d266c0..0b0b087994a0befbe2829535e70c73bc796640a6 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi @@ -43,6 +43,7 @@ qcom,mdss-dsi-te-using-wd; qcom,mdss-dsi-te-using-te-pin; qcom,panel-ack-disabled; + qcom,mdss-dsi-qsync-min-refresh-rate = <45>; qcom,mdss-dsi-display-timings { timing@0{ @@ -69,6 +70,14 @@ [05 01 00 00 00 00 02 28 00 05 01 00 00 00 00 02 10 00]; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-qsync-on-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-on-commands-state = + "dsi_hs_mode"; + qcom,mdss-dsi-qsync-off-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-off-commands-state = + "dsi_hs_mode"; }; timing@1{ qcom,mdss-dsi-panel-width = <1280>; @@ -94,6 +103,14 @@ [05 01 00 00 00 00 02 28 00 05 01 00 00 00 00 02 10 00]; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-qsync-on-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-on-commands-state = + "dsi_hs_mode"; + qcom,mdss-dsi-qsync-off-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-off-commands-state = + "dsi_hs_mode"; }; timing@2{ qcom,mdss-dsi-panel-width = <1080>; @@ -115,6 +132,14 @@ [05 01 00 00 00 00 02 28 00 05 01 00 00 00 00 02 10 00]; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-qsync-on-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-on-commands-state = + "dsi_hs_mode"; + qcom,mdss-dsi-qsync-off-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-off-commands-state = + "dsi_hs_mode"; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi index dbfedb92865adc5e0dad289febb40371d5b6a76c..f06e26b30e09a7ca6668e4e384b6b302be587e75 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,7 @@ qcom,mdss-dsi-mdp-trigger = "none"; qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>; qcom,panel-ack-disabled; + qcom,mdss-dsi-qsync-min-refresh-rate = <45>; qcom,mdss-dsi-display-timings { timing@0{ @@ -55,6 +56,14 @@ [05 01 00 00 32 00 02 28 00 05 01 00 00 78 00 02 10 00]; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-qsync-on-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-on-commands-state = + "dsi_hs_mode"; + qcom,mdss-dsi-qsync-off-commands = + [15 01 00 00 00 00 02 51 00]; + qcom,mdss-dsi-qsync-off-commands-state = + "dsi_hs_mode"; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..0779eddcb0bbba84a5ebf09f6621f19524a6596e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + gfx_iommu: qcom,iommu@1f00000 { + status = "disabled"; + compatible = "qcom,qsmmu-v500"; + reg = <0x1f00000 0x10000>, + <0x1ee2000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,tz-device-id = "GPU"; + qcom,skip-init; + qcom,dynamic; + qcom,use-3-lvl-tables; + #global-interrupts = <0>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + ; + clocks = <&clock_gcc GCC_SMMU_CFG_CLK>, + <&clock_gcc GCC_GFX_TCU_CLK>; + clock-names = "iface_clk", "core_clk"; + }; + + apps_iommu: qcom,iommu@1e00000 { + status = "okay"; + compatible = "qcom,qsmmu-v500"; + reg = <0x1e00000 0x40000>, + <0x1ee2000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,tz-device-id = "APPS"; + qcom,skip-init; + qcom,enable-static-cb; + qcom,use-3-lvl-tables; + #global-interrupts = <0>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&clock_gcc GCC_SMMU_CFG_CLK>, + <&clock_gcc GCC_APSS_TCU_CLK>; + clock-names = "iface_clk", "core_clk"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi index d24312ad8bfced7aee691af68fc13f32b68865ba..fca527cf14cf5efdb5489b387f26eec20fa44599 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi @@ -18,7 +18,7 @@ reg = <0x50a0000 0x10000>, <0x50c2000 0x20>; reg-names = "base", "tcu-base"; - #iommu-cells = <1>; + #iommu-cells = <2>; qcom,skip-init; qcom,use-3-lvl-tables; #global-interrupts = <1>; @@ -194,21 +194,23 @@ kgsl_iommu_test_device { compatible = "iommu-debug-test"; - /* - * 0x7 isn't a valid sid, but should pass the sid sanity check. - * We just need _something_ here to get this node recognized by - * the SMMU driver. Our test uses ATOS, which doesn't use SIDs - * anyways, so using a dummy value is ok. - */ - iommus = <&kgsl_smmu 0x7>; + iommus = <&kgsl_smmu 0x7 0>; + }; + + kgsl_iommu_coherent_test_device { + compatible = "iommu-debug-test"; + iommus = <&kgsl_smmu 0x9 0>; + dma-coherent; }; apps_iommu_test_device { compatible = "iommu-debug-test"; - /* - * This SID belongs to TSIF. We can't use a fake SID for - * the apps_smmu device. - */ - iommus = <&apps_smmu 0x20 0>; + iommus = <&apps_smmu 0x21 0>; + }; + + apps_iommu_coherent_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x23 0>; + dma-coherent; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi index 279c141e33a6a7f13e1667de0943d2c6220f97d8..688fac432a73378957c587e5071ad3d0d2ab56a2 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150.dtsi @@ -24,6 +24,7 @@ qcom,skip-init; qcom,use-3-lvl-tables; qcom,no-asid-retention; + qcom,disable-atos; #global-interrupts = <1>; qcom,regulator-names = "vdd"; vdd-supply = <&gpu_cx_gdsc>; @@ -72,6 +73,7 @@ qcom,skip-init; qcom,use-3-lvl-tables; qcom,no-asid-retention; + qcom,disable-atos; #global-interrupts = <1>; #size-cells = <1>; #address-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi index 1e6d31361f55ed12279adc193f05aff9cd60b38c..ce2c509521e513f683ef8e8c488f6e225dacc0ba 100644 --- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi @@ -298,6 +298,7 @@ audio_apr: qcom,msm-audio-apr { compatible = "qcom,msm-audio-apr"; + qcom,subsys-name = "apr_adsp"; }; dai_pri_auxpcm: qcom,msm-pri-auxpcm { diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 887b61c872dd15b697e4d1969b26be5010a18d4e..ab00be277c6fcbc87d57ecd166ec01b84e4e7efe 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -484,8 +484,8 @@ blsp2_spi5: spi@075ba000{ compatible = "qcom,spi-qup-v2.2.1"; reg = <0x075ba000 0x600>; - interrupts = ; - clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>, + interrupts = ; + clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; clock-names = "core", "iface"; pinctrl-names = "default", "sleep"; diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi index ff70579dd561160a261ec49feee34edace70310f..1b9020eebd7873c555f625c65adf652008212144 100644 --- a/arch/arm64/boot/dts/qcom/pm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi @@ -57,6 +57,174 @@ reg = <0x900 0x100>; }; + pm6150_charger: qcom,qpnp-smb5 { + compatible = "qcom,qpnp-smb5"; + #address-cells = <1>; + #size-cells = <1>; + #cooling-cells = <2>; + + qcom,pmic-revid = <&pm6150_revid>; + qcom,auto-recharge-soc = <98>; + + qcom,chgr@1000 { + reg = <0x1000 0x100>; + interrupts = + <0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x5 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x6 IRQ_TYPE_EDGE_RISING>, + <0x2 0x10 0x7 IRQ_TYPE_EDGE_RISING>; + + interrupt-names = "chgr-error", + "chg-state-change", + "step-chg-state-change", + "step-chg-soc-update-fail", + "step-chg-soc-update-req", + "fg-fvcal-qualified", + "vph-alarm", + "vph-drop-prechg"; + }; + + qcom,dcdc@1100 { + reg = <0x1100 0x100>; + interrupts = + <0x2 0x11 0x0 IRQ_TYPE_EDGE_RISING>, + <0x2 0x11 0x1 IRQ_TYPE_EDGE_RISING>, + <0x2 0x11 0x2 IRQ_TYPE_EDGE_RISING>, + <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x11 0x6 IRQ_TYPE_EDGE_RISING>, + <0x2 0x11 0x7 IRQ_TYPE_EDGE_BOTH>; + + interrupt-names = "otg-fail", + "otg-oc-disable-sw", + "otg-oc-hiccup", + "bsm-active", + "high-duty-cycle", + "input-current-limiting", + "concurrent-mode-disable", + "switcher-power-ok"; + }; + + qcom,batif@1200 { + reg = <0x1200 0x100>; + interrupts = + <0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>, + <0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x12 0x7 IRQ_TYPE_EDGE_BOTH>; + + interrupt-names = "bat-temp", + "bat-ov", + "bat-low", + "bat-therm-or-id-missing", + "bat-terminal-missing", + "buck-oc", + "vph-ov"; + }; + + qcom,usb@1300 { + reg = <0x1300 0x100>; + interrupts = + <0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>, + <0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>, + <0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>; + + interrupt-names = "usbin-collapse", + "usbin-vashdn", + "usbin-uv", + "usbin-ov", + "usbin-plugin", + "usbin-revi-change", + "usbin-src-change", + "usbin-icl-change"; + }; + + qcom,dc@1400 { + reg = <0x1400 0x100>; + interrupts = + <0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x2 0x14 0x5 IRQ_TYPE_EDGE_RISING>, + <0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>, + <0x2 0x14 0x7 IRQ_TYPE_EDGE_RISING>; + + interrupt-names = "dcin-vashdn", + "dcin-uv", + "dcin-ov", + "dcin-plugin", + "dcin-revi", + "dcin-pon", + "dcin-en"; + }; + + qcom,typec@1500 { + reg = <0x1500 0x100>; + interrupts = + <0x2 0x15 0x0 IRQ_TYPE_EDGE_RISING>, + <0x2 0x15 0x1 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x2 IRQ_TYPE_EDGE_RISING>, + <0x2 0x15 0x3 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x4 IRQ_TYPE_EDGE_RISING>, + <0x2 0x15 0x5 IRQ_TYPE_EDGE_RISING>, + <0x2 0x15 0x6 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x15 0x7 IRQ_TYPE_EDGE_RISING>; + + interrupt-names = "typec-or-rid-detect-change", + "typec-vpd-detect", + "typec-cc-state-change", + "typec-vconn-oc", + "typec-vbus-change", + "typec-attach-detach", + "typec-legacy-cable-detect", + "typec-try-snk-src-detect"; + }; + + qcom,misc@1600 { + reg = <0x1600 0x100>; + interrupts = + <0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x2 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x3 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x4 IRQ_TYPE_LEVEL_HIGH>, + <0x2 0x16 0x5 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x6 IRQ_TYPE_EDGE_RISING>, + <0x2 0x16 0x7 IRQ_TYPE_EDGE_RISING>; + + interrupt-names = "wdog-snarl", + "wdog-bark", + "aicl-fail", + "aicl-done", + "smb-en", + "imp-trigger", + "temp-change", + "temp-change-smb"; + }; + + smb5_vbus: qcom,smb5-vbus { + regulator-name = "smb5-vbus"; + }; + + smb5_vconn: qcom,smb5-vconn { + regulator-name = "smb5-vconn"; + }; + }; + pm6150_tz: qcom,temp-alarm@2400 { compatible = "qcom,spmi-temp-alarm"; reg = <0x2400 0x100>; diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi index 074b798867d34984930620bc758492ec76c9ba0a..fdee85928eec3e10aa16586dfe2208f267dac501 100644 --- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi @@ -287,11 +287,30 @@ reg = <0xd800 0x100>, <0xd900 0x100>; reg-names = "wled-ctrl-base", "wled-sink-base"; label = "backlight"; - interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>; - interrupt-names = "ovp-irq"; + interrupts = <0x5 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, + <0x5 0xd8 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x5 0xd8 0x5 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "ovp-irq", "pre-flash-irq", + "flash-irq"; qcom,pmic-revid = <&pm8150l_revid>; qcom,auto-calibration; status = "disabled"; + + wled_flash: qcom,wled-flash { + label = "flash"; + qcom,default-led-trigger = "wled_flash"; + }; + + wled_torch: qcom,wled-torch { + label = "torch"; + qcom,default-led-trigger = "wled_torch"; + qcom,wled-torch-timer = <1200>; + }; + + wled_switch: qcom,wled-switch { + label = "switch"; + qcom,default-led-trigger = "wled_switch"; + }; }; pm8150l_lpg: qcom,pwms@b100 { diff --git a/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi b/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ca659e0dc2ce023270152874d735ee0ee7b9bef4 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi @@ -0,0 +1,799 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + ad_hoc_bus: ad-hoc-bus@580000 { + compatible = "qcom,msm-bus-device"; + reg = <0x580000 0x23080>, + <0x400000 0x80000>, + <0x500000 0x15080>; + reg-names = "snoc-base", "bimc-base", "pcnoc-base"; + + /*Buses*/ + fab_bimc: fab-bimc { + cell-id = ; + label = "fab-bimc"; + qcom,fab-dev; + qcom,base-name = "bimc-base"; + qcom,bus-type = <2>; + qcom,bypass-qos-prg; + qcom,util-fact = <153>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&clock_rpmcc BIMC_MSMBUS_CLK>, + <&clock_rpmcc BIMC_MSMBUS_A_CLK>; + }; + + fab_pcnoc: fab-pcnoc { + cell-id = ; + label = "fab-pcnoc"; + qcom,fab-dev; + qcom,base-name = "pcnoc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&clock_rpmcc PNOC_MSMBUS_CLK>, + <&clock_rpmcc PNOC_MSMBUS_A_CLK>; + }; + + fab_snoc: fab-snoc { + cell-id = ; + label = "fab-snoc"; + qcom,fab-dev; + qcom,base-name = "snoc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&clock_rpmcc SNOC_MSMBUS_CLK>, + <&clock_rpmcc SNOC_MSMBUS_A_CLK>; + }; + + /*BIMC Masters*/ + mas_apps_proc: mas-apps-proc { + cell-id = ; + label = "mas-apps-proc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ebi &slv_bimc_snoc>; + qcom,bus-dev = <&fab_bimc>; + qcom,mas-rpm-id = ; + }; + + mas_oxili: mas-oxili { + cell-id = ; + label = "mas-oxili"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <2>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_ebi &slv_bimc_snoc>; + qcom,prio-lvl = <0>; + qcom,prio-rd = <0>; + qcom,prio-wr = <0>; + qcom,bus-dev = <&fab_bimc>; + qcom,mas-rpm-id = ; + }; + + mas_mdp: mas-mdp { + cell-id = ; + label = "mas-mdp"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <4>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_ebi &slv_bimc_snoc>; + qcom,prio-lvl = <1>; + qcom,prio-rd = <1>; + qcom,prio-wr = <1>; + qcom,bus-dev = <&fab_bimc>; + qcom,mas-rpm-id = ; + }; + + mas_snoc_bimc_1: mas-snoc-bimc-1 { + cell-id = ; + label = "mas-snoc-bimc-1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ebi>; + qcom,bus-dev = <&fab_bimc>; + qcom,mas-rpm-id = ; + }; + + mas_tcu_0: mas-tcu-0 { + cell-id = ; + label = "mas-tcu-0"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <6>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_ebi &slv_bimc_snoc>; + qcom,prio-lvl = <2>; + qcom,prio-rd = <2>; + qcom,prio-wr = <2>; + qcom,bus-dev = <&fab_bimc>; + qcom,mas-rpm-id = ; + }; + + /*PCNOC Masters*/ + mas_spdm: mas-spdm { + cell-id = ; + label = "mas-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <5>; + qcom,qos-mode = "fixed"; + qcom,connections = <&pcnoc_int_3>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_blsp_1: mas-blsp-1 { + cell-id = ; + label = "mas-blsp-1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_3>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_blsp_2: mas-blsp-2 { + cell-id = ; + label = "mas-blsp-2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_3>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_xi_usb_hs1: mas-xi-usb-hs1 { + cell-id = ; + label = "mas-xi-usb-hs1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 + &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + }; + + mas_crypto: mas-crypto { + cell-id = ; + label = "mas-crypto"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,connections = <&slv_pcnoc_snoc &pcnoc_int_2>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 + &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + }; + + mas_sdcc_1: mas-sdcc-1 { + cell-id = ; + label = "mas-sdcc-1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 + &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + }; + + mas_sdcc_2: mas-sdcc-2 { + cell-id = ; + label = "mas-sdcc-2"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_0>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 + &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + }; + + mas_snoc_pcnoc: mas-snoc-pcnoc { + cell-id = ; + label = "mas-snoc-pcnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_int_2>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + }; + + mas_qpic: mas-qpic { + cell-id = ; + label = "mas-qpic"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <14>; + qcom,qos-mode = "fixed"; + qcom,connections = <&pcnoc_int_0>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 + &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + }; + + /*SNOC Masters*/ + mas_qdss_bam: mas-qdss-bam { + cell-id = ; + label = "mas-qdss-bam"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <11>; + qcom,qos-mode = "fixed"; + qcom,connections = <&qdss_int>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + mas_bimc_snoc: mas-bimc-snoc { + cell-id = ; + label = "mas-bimc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_cats_1 &slv_cats_0 + &snoc_int_1 &snoc_int_0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + mas_pcnoc_snoc: mas-pcnoc-snoc { + cell-id = ; + label = "mas-pcnoc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_snoc_bimc_1 + &snoc_int_2 &snoc_int_0>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + mas_qdss_etr: mas-qdss-etr { + cell-id = ; + label = "mas-qdss-etr"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <10>; + qcom,qos-mode = "fixed"; + qcom,connections = <&qdss_int>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + /*Internal nodes*/ + pcnoc_int_0: pcnoc-int-0 { + cell-id = ; + label = "pcnoc-int-0"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_pcnoc_snoc &pcnoc_int_2>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_int_2: pcnoc-int-2 { + cell-id = ; + label = "pcnoc-int-2"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&pcnoc_s_10 &slv_tcu &pcnoc_s_2 + &pcnoc_s_3 &pcnoc_s_0 + &pcnoc_s_1 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_4 + &pcnoc_s_8 &pcnoc_s_9>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_int_3: pcnoc-int-3 { + cell-id = ; + label = "pcnoc-int-3"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_pcnoc_snoc>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_0: pcnoc-s-0 { + cell-id = ; + label = "pcnoc-s-0"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_prng &slv_spdm &slv_pdm>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_1: pcnoc-s-1 { + cell-id = ; + label = "pcnoc-s-1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_tcsr>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_2: pcnoc-s-2 { + cell-id = ; + label = "pcnoc-s-2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,connections = <&slv_gpu_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_3: pcnoc-s-3 { + cell-id = ; + label = "pcnoc-s-3"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_message_ram>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_4: pcnoc-s-4 { + cell-id = ; + label = "pcnoc-s-4"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_snoc_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_6: pcnoc-s-6 { + cell-id = ; + label = "pcnoc-s-6"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_blsp_1 &slv_tlmm_north>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_7: pcnoc-s-7 { + cell-id = ; + label = "pcnoc-s-7"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_tlmm_south &slv_disp_ss_cfg + &slv_sdcc_1 &slv_pcie &slv_sdcc_2>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_8: pcnoc-s-8 { + cell-id = ; + label = "pcnoc-s-8"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_crypto_0_cfg>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_9: pcnoc-s-9 { + cell-id = ; + label = "pcnoc-s-9"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_blsp_2 + &slv_tlmm_east &slv_pmic_arb>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + pcnoc_s_10: pcnoc-s-10 { + cell-id = ; + label = "pcnoc-s-10"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_usb_hs>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + qdss_int: qdss-int { + cell-id = ; + label = "qdss-int"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,connections = <&slv_snoc_bimc_1 &snoc_int_1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + snoc_int_0: snoc-int-0 { + cell-id = ; + label = "snoc-int-0"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_lpass + &slv_kpss_ahb &slv_wcss>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + snoc_int_1: snoc-int-1 { + cell-id = ; + label = "snoc-int-1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_snoc_pcnoc &snoc_int_2>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + snoc_int_2: snoc-int-2 { + cell-id = ; + label = "snoc-int-2"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qdss_stm &slv_imem>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + + /*Slaves*/ + slv_ebi:slv-ebi { + cell-id = ; + label = "slv-ebi"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_bimc>; + qcom,slv-rpm-id = ; + }; + + slv_bimc_snoc:slv-bimc-snoc { + cell-id = ; + label = "slv-bimc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_bimc>; + qcom,connections = <&mas_bimc_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_spdm:slv-spdm { + cell-id = ; + label = "slv-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pdm:slv-pdm { + cell-id = ; + label = "slv-pdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_prng:slv-prng { + cell-id = ; + label = "slv-prng"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tcsr:slv-tcsr { + cell-id = ; + label = "slv-tcsr"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_cfg:slv-snoc-cfg { + cell-id = ; + label = "slv-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_message_ram:slv-message-ram { + cell-id = ; + label = "slv-message-ram"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_disp_ss_cfg:slv-disp-ss-cfg { + cell-id = ; + label = "slv-disp-ss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_gpu_cfg:slv-gpu-cfg { + cell-id = ; + label = "slv-gpu-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_blsp_1:slv-blsp-1 { + cell-id = ; + label = "slv-blsp-1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tlmm_north:slv-tlmm-north { + cell-id = ; + label = "slv-tlmm-north"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcie:slv-pcie { + cell-id = ; + label = "slv-pcie"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_blsp_2:slv-blsp-2 { + cell-id = ; + label = "slv-blsp-2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tlmm_east:slv-tlmm-east { + cell-id = ; + label = "slv-tlmm-east"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tcu:slv-tcu { + cell-id = ; + label = "slv-tcu"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pmic_arb:slv-pmic-arb { + cell-id = ; + label = "slv-pmic-arb"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_sdcc_1:slv-sdcc-1 { + cell-id = ; + label = "slv-sdcc-1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_sdcc_2:slv-sdcc-2 { + cell-id = ; + label = "slv-sdcc-2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_tlmm_south:slv-tlmm-south { + cell-id = ; + label = "slv-tlmm-south"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_usb_hs:slv-usb-hs { + cell-id = ; + label = "slv-usb-hs"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_crypto_0_cfg:slv-crypto-0-cfg { + cell-id = ; + label = "slv-crypto-0-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_pcnoc_snoc:slv-pcnoc-snoc { + cell-id = ; + label = "slv-pcnoc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,connections = <&mas_pcnoc_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_kpss_ahb:slv-kpss-ahb { + cell-id = ; + label = "slv-kpss-ahb"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_wcss:slv-wcss { + cell-id = ; + label = "slv-wcss"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_bimc_1:slv-snoc-bimc-1 { + cell-id = ; + label = "slv-snoc-bimc-1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,connections = <&mas_snoc_bimc_1>; + qcom,slv-rpm-id = ; + }; + + slv_imem:slv-imem { + cell-id = ; + label = "slv-imem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_snoc_pcnoc:slv-snoc-pcnoc { + cell-id = ; + label = "slv-snoc-pcnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,connections = <&mas_snoc_pcnoc>; + qcom,slv-rpm-id = ; + }; + + slv_qdss_stm:slv-qdss-stm { + cell-id = ; + label = "slv-qdss-stm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_cats_0:slv-cats-0 { + cell-id = ; + label = "slv-cats-0"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_cats_1:slv-cats-1 { + cell-id = ; + label = "slv-cats-1"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + + slv_lpass:slv-lpass { + cell-id = ; + label = "slv-lpass"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = ; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cdp.dts b/arch/arm64/boot/dts/qcom/qcs405-cdp.dts index 59d4aceadf92c14c0285349cca75f4f409fbaa94..3d36f2b018a1509f6626b3f055354abd86dac231 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cdp.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-cdp.dts @@ -21,3 +21,7 @@ compatible = "qcom,qcs405-cdp", "qcom,qcs405", "qcom,cdp"; qcom,board-id = <1 0>; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi b/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi index af05002ef9fb3a4f5a8cfe69b486586b62ae8b0e..2922b606d35a6c0155d22e3bbd6f8b294980c036 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi @@ -10,3 +10,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ + +&smb1351_otg_supply { + qcom,charging-disabled; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi index 28574e12ec143e6520e933da2e154f864c1ab791..c4dc6cb25d89bbe9f0344b7b1461f116149b692e 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi @@ -12,6 +12,19 @@ &soc { + csr: csr@0x6001000 { + compatible = "qcom,coresight-csr"; + reg = <0x6001000 0x1000>; + reg-names = "csr-base"; + + coresight-name = "coresight-csr"; + qcom,usb-bam-support; + qcom,hwctrl-set-support; + qcom,set-byte-cntr-support; + + qcom,blk-size = <1>; + }; + replicator_qdss: replicator@6046000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x0003b909>; @@ -60,6 +73,7 @@ coresight-name = "coresight-tmc-etr"; coresight-ctis = <&cti0>; + coresight-csr = <&csr>; clocks = <&clock_rpmcc RPM_QDSS_CLK>, <&clock_rpmcc RPM_QDSS_A_CLK>; @@ -86,6 +100,7 @@ coresight-name = "coresight-tmc-etf"; coresight-ctis = <&cti0>; arm,default-sink; + coresight-csr = <&csr>; clocks = <&clock_rpmcc RPM_QDSS_CLK>, <&clock_rpmcc RPM_QDSS_A_CLK>; @@ -301,6 +316,15 @@ }; port@2 { + reg = <4>; + funnel_in2_in_wlan_etm0: endpoint { + slave-mode; + remote-endpoint = + <&wlan_etm0_out_funnel_in2>; + }; + }; + + port@3 { reg = <7>; funnel_in2_in_funnel_apss: endpoint { slave-mode; @@ -531,21 +555,15 @@ }; }; - tpdm_wcss: tpdm@1440000 { - compatible = "arm,primecell"; - arm,primecell-periphid = <0x0003b968>; - reg = <0x1440000 0x1000>; - reg-names = "tpdm-base"; - - coresight-name = "coresight-tpdm-wcss"; - - clocks = <&clock_rpmcc RPM_QDSS_CLK>, - <&clock_rpmcc RPM_QDSS_A_CLK>; - clock-names = "apb_pclk", "core_a_clk"; + tpdm_wcss { + compatible = "qcom,coresight-dummy"; + coresight-name = "coresight-tpdm-west-dl"; + qcom,dummy-source; port { tpdm_wcss_out_tpda: endpoint { - remote-endpoint = <&tpda_in_tpdm_wcss>; + remote-endpoint = + <&tpda_in_tpdm_wcss>; }; }; }; @@ -926,6 +944,20 @@ }; }; + wlan_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-wlan-etm0"; + qcom,inst-id = <3>; + + port{ + wlan_etm0_out_funnel_in2: endpoint { + remote-endpoint = + <&funnel_in2_in_wlan_etm0>; + }; + }; + }; + etm0: etm@61bc000 { compatible = "arm,primecell"; arm,primecell-periphid = <0x000bb95d>; diff --git a/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ed8a630383b6054963f2286e19e41036b7d215f9 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi @@ -0,0 +1,187 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + msm_bus: qcom,kgsl-busmon { + label = "kgsl-busmon"; + compatible = "qcom,kgsl-busmon"; + }; + + gpu_bw_tbl: gpu-bw-tbl { + compatible = "operating-points-v2"; + opp-0 { opp-hz = /bits/ 64 < 0 >; }; /* OFF */ + opp-100 { opp-hz = /bits/ 64 < 769 >; }; /* 1. DDR:100.80MHz */ + opp-211 { opp-hz = /bits/ 64 < 1611 >; }; /* 2. DDR:211.20MHz */ + opp-297 { opp-hz = /bits/ 64 < 2270 >; }; /* 3. DDR:297.60MHz */ + opp-384 { opp-hz = /bits/ 64 < 2929 >; }; /* 4. DDR:384.00MHz */ + opp-556 { opp-hz = /bits/ 64 < 4248 >; }; /* 5. DDR:556.80MHz */ + opp-595 { opp-hz = /bits/ 64 < 4541 >; }; /* 6. DDR:595.20MHz */ + opp-672 { opp-hz = /bits/ 64 < 5126 >; }; /* 7. DDR:672.00MHz */ + opp-740 { opp-hz = /bits/ 64 < 5639 >; }; /* 8. DDR:739.20MHz */ + }; + + /* Bus governor */ + gpubw: qcom,gpubw { + compatible = "qcom,devbw"; + governor = "bw_vbif"; + qcom,src-dst-ports = <26 512>; + operating-points-v2 = <&gpu_bw_tbl>; + }; + + msm_gpu: qcom,kgsl-3d0@1c00000 { + label = "kgsl-3d0"; + compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d"; + reg = <0x1c00000 0x10000 + 0x1c10000 0x10000 + 0x00a0000 0x06fff>; + reg-names = "kgsl_3d0_reg_memory", "kgsl_3d0_shader_memory", + "qfprom_memory"; + interrupts = <0 33 0>; + interrupt-names = "kgsl_3d0_irq"; + qcom,id = <0>; + + qcom,chipid = <0x03000620>; + + qcom,initial-pwrlevel = <3>; + + qcom,idle-timeout = <80>; //msecs + qcom,strtstp-sleepwake; + qcom,gpu-bimc-interface-clk-freq = <400000000>; //In Hz + qcom,gpu-disable-fuse = <0x41a0 0x00000001 29>; + + clocks = <&clock_gcc GCC_OXILI_GFX3D_CLK>, + <&clock_gcc GCC_OXILI_AHB_CLK>, + <&clock_gcc GCC_BIMC_GFX_CLK>, + <&clock_gcc GCC_BIMC_GPU_CLK>, + <&clock_gcc GCC_GTCU_AHB_CLK>, + <&clock_gcc GCC_GFX_TCU_CLK>, + <&clock_gcc GCC_GFX_TBU_CLK>, + <&clock_rpmcc RPM_SMD_BIMC_GPU_CLK>; + + clock-names = "core_clk", "iface_clk", "mem_iface_clk", + "alt_mem_iface_clk", "gtcu_iface_clk", + "gtcu_clk", "gtbu_clk", "bimc_gpu_clk"; + + /* Bus Scale Settings */ + qcom,gpubw-dev = <&gpubw>; + qcom,bus-control; + qcom,bus-width = <16>; + qcom,msm-bus,name = "grp3d"; + qcom,msm-bus,num-cases = <9>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <26 512 0 0>, /* off */ + <26 512 0 806400>, /* 1. 100.80 MHz */ + <26 512 0 1689600>, /* 2. 211.20 MHz */ + <26 512 0 2380800>, /* 3. 297.60 MHz */ + <26 512 0 3072000>, /* 4. 384.00 MHz */ + <26 512 0 4454400>, /* 5. 556.80 MHz */ + <26 512 0 4761600>, /* 6. 595.20 MHz */ + <26 512 0 5376000>, /* 7. 672.00 MHz */ + <26 512 0 5913600>; /* 8. 739.20 MHz */ + + /* GDSC regulator names */ + regulator-names = "vdd"; + /* GDSC oxili regulators */ + vdd-supply = <&gdsc_oxili_gx>; + + /* CPU latency parameter */ + qcom,pm-qos-active-latency = <651>; + + /* Enable gpu cooling device */ + #cooling-cells = <2>; + + /* Power levels */ + qcom,gpu-pwrlevels { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "qcom,gpu-pwrlevels"; + + /* TURBO */ + qcom,gpu-pwrlevel@0 { + reg = <0>; + qcom,gpu-freq = <650000000>; + qcom,bus-freq = <8>; + qcom,bus-min = <8>; + qcom,bus-max = <8>; + }; + + /* NOM+ */ + qcom,gpu-pwrlevel@1 { + reg = <1>; + qcom,gpu-freq = <523200000>; + qcom,bus-freq = <6>; + qcom,bus-min = <5>; + qcom,bus-max = <7>; + }; + + /* NOM */ + qcom,gpu-pwrlevel@2 { + reg = <2>; + qcom,gpu-freq = <484800000>; + qcom,bus-freq = <5>; + qcom,bus-min = <4>; + qcom,bus-max = <6>; + }; + + /* SVS+ */ + qcom,gpu-pwrlevel@3 { + reg = <3>; + qcom,gpu-freq = <400000000>; + qcom,bus-freq = <4>; + qcom,bus-min = <3>; + qcom,bus-max = <5>; + }; + + /* SVS */ + qcom,gpu-pwrlevel@4 { + reg = <4>; + qcom,gpu-freq = <270000000>; + qcom,bus-freq = <3>; + qcom,bus-min = <1>; + qcom,bus-max = <3>; + }; + + /* XO */ + qcom,gpu-pwrlevel@5 { + reg = <5>; + qcom,gpu-freq = <19200000>; + qcom,bus-freq = <0>; + qcom,bus-min = <0>; + qcom,bus-max = <0>; + }; + }; +}; + + kgsl_msm_iommu: qcom,kgsl-iommu@1f00000 { + compatible = "qcom,kgsl-smmu-v2"; + reg = <0x1f00000 0x10000>; + /* + * The gpu can only program a single context bank + * at this fixed offset. + */ + qcom,protect = <0xa000 0x1000>; + clocks = <&clock_gcc GCC_SMMU_CFG_CLK>, + <&clock_gcc GCC_GFX_TCU_CLK>, + <&clock_gcc GCC_GTCU_AHB_CLK>, + <&clock_gcc GCC_GFX_TBU_CLK>; + clock-names = "scfg_clk", "gtcu_clk", "gtcu_iface_clk", + "gtbu_clk"; + qcom,retention; + gfx3d_user: gfx3d_user { + compatible = "qcom,smmu-kgsl-cb"; + iommus = <&gfx_iommu 0>; + qcom,gpu-offset = <0xa000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-ion.dtsi b/arch/arm64/boot/dts/qcom/qcs405-ion.dtsi index 0fc6ca98c1b7b97a0e3bad2f11e743717b6a26bb..b6f45704957fd112fda8f4d0c0ccbd7488f4a762 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-ion.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-ion.dtsi @@ -21,6 +21,12 @@ qcom,ion-heap-type = "SYSTEM"; }; + qcom,ion-heap@8 { /* CP_MM HEAP */ + reg = <8>; + memory-region = <&secure_mem>; + qcom,ion-heap-type = "SECURE_DMA"; + }; + qcom,ion-heap@27 { /* QSEECOM HEAP */ reg = <27>; memory-region = <&qseecom_mem>; diff --git a/arch/arm64/boot/dts/qcom/qcs405-mtp.dts b/arch/arm64/boot/dts/qcom/qcs405-mtp.dts index df9d6945d623546344f886b8b5b5828da9412d6a..7e7872d997aa8e9e83008a1592d7e50b5fe11cea 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-mtp.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-mtp.dts @@ -21,3 +21,7 @@ compatible = "qcom,qcs405-mtp", "qcom,qcs405", "qcom,mtp"; qcom,board-id = <8 0>; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi index 0463c5c6372043cfba24a3591e0d4c3147261551..990142fb205b786a5271981b2020a7fcc816297a 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi @@ -15,10 +15,11 @@ tlmm: pinctrl@1000000 { compatible = "qcom,qcs405-pinctrl"; reg = <0x1000000 0x300000>; - interrupts = <0 208 0>; + interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>; gpio-controller; #gpio-cells = <2>; interrupt-controller; + interrupt-parent = <&wakegpio>; #interrupt-cells = <2>; pmx-uartconsole { @@ -781,5 +782,20 @@ bias-disable; }; }; + + /* SMB CONFIGURATION */ + smb_stat: smb_stat { + mux { + pins = "gpio107"; + function = "gpio"; + }; + + config { + pins = "gpio107"; + drive-strength = <2>; + bias-pull-up; + input-enable; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi index 5d2842fb2ad2c6b90b43f54f33e750d67a3283b7..74f0a257332b1d4f0a02745c117f9fc36eed231f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi @@ -255,13 +255,103 @@ }; }; -/* Stub regulators */ -/ { - /* VDD_APC supply */ - apc_vreg_corner: regulator-apc-corner { - compatible = "qcom,stub-regulator"; +&spmi_bus { + qcom,pms405@1 { + /* PMS405 S3 = VDD_APC_supply */ + pms405_s3: spm-regulator@1a00 { + compatible = "qcom,spm-regulator"; + reg = <0x1a00 0x100>; + regulator-name = "pms405_s3"; + regulator-min-microvolt = <1048000>; + regulator-max-microvolt = <1352000>; + }; + }; +}; + +&soc { + /* APC CPR and MEM ACC regulators */ + mem_acc_vreg_corner: regulator@1942120 { + compatible = "qcom,mem-acc-regulator"; + regulator-name = "mem_acc_corner"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <2>; + + qcom,acc-reg-addr-list = + <0x01942138 0x01942130 0x01942120 0x01942124>; + + qcom,acc-init-reg-config = <1 0xff>, <2 0x5555>; + + qcom,num-acc-corners = <2>; + qcom,boot-acc-corner = <2>; + qcom,corner1-reg-config = + /* INT2 => INT2 */ + <(-1) (-1)>, <(-1) (-1)>, + /* INT2 => NOM */ + < 3 0x0>, < 4 0x0>; + + qcom,corner2-reg-config = + /* NOM => INT2 */ + < 3 0x1041041>, < 4 0x41>, + /* NOM => NOM */ + <(-1) (-1)>, <(-1) (-1)>; + }; + + apc_vreg_corner: regulator@b018000 { + compatible = "qcom,cpr-regulator"; + reg = <0xb018000 0x1000>, <0xb011064 4>, <0xa4000 0x1000>; + reg-names = "rbcpr", "rbcpr_clk", "efuse_addr"; + interrupts = <0 15 0>; regulator-name = "apc_corner"; regulator-min-microvolt = <1>; regulator-max-microvolt = <3>; + + qcom,cpr-fuse-corners = <3>; + qcom,cpr-voltage-ceiling = <1224000 1288000 1352000>; + qcom,cpr-voltage-floor = <1048000 1048000 1088000>; + vdd-apc-supply = <&pms405_s3>; + + mem-acc-supply = <&mem_acc_vreg_corner>; + + qcom,cpr-ref-clk = <19200>; + qcom,cpr-timer-delay = <5000>; + qcom,cpr-timer-cons-up = <0>; + qcom,cpr-timer-cons-down = <2>; + qcom,cpr-irq-line = <0>; + qcom,cpr-step-quotient = <25>; + qcom,cpr-up-threshold = <1>; + qcom,cpr-down-threshold = <3>; + qcom,cpr-idle-clocks = <15>; + qcom,cpr-gcnt-time = <1>; + qcom,vdd-apc-step-up-limit = <1>; + qcom,vdd-apc-step-down-limit = <1>; + qcom,cpr-apc-volt-step = <8000>; + + qcom,cpr-fuse-row = <69 0>; + qcom,cpr-fuse-target-quot = <30 42 64>; + qcom,cpr-fuse-ro-sel = <0 4 8>; + qcom,cpr-init-voltage-ref = <1224000 1288000 1352000>; + qcom,cpr-fuse-init-voltage = + <69 12 6 0>, + <69 18 6 0>, + <69 24 6 0>; + qcom,cpr-fuse-quot-offset = + <70 12 7 0>, + <70 19 7 0>, + <70 26 7 0>; + qcom,cpr-fuse-quot-offset-scale = <5 5 5>; + qcom,cpr-init-voltage-step = <10000>; + qcom,cpr-corner-map = <1 2 3>; + qcom,mem-acc-corner-map = <1 2 2>; + qcom,cpr-corner-frequency-map = + <1 1094400000>, + <2 1248000000>, + <3 1401600000>; + qcom,speed-bin-fuse-sel = <39 34 3 0>; + qcom,cpr-fuse-revision = <67 3 3 0>; + qcom,cpr-speed-bin-max-corners = + <(-1) (-1) 1 2 3>; + qcom,cpr-quot-adjust-scaling-factor-max = <0 1400 1400>; + qcom,cpr-voltage-scaling-factor-max = <0 2000 2000>; + qcom,cpr-scaled-init-voltage-as-ceiling; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-rumi.dts b/arch/arm64/boot/dts/qcom/qcs405-rumi.dts index 75c9b5174d444bf8e4c1563ab64a811f25dc21eb..c2a2a78b3d702c08ee85e17cc9309481f8b95f6f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-rumi.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-rumi.dts @@ -21,3 +21,7 @@ compatible = "qcom,qcs405-rumi", "qcom,qcs405", "qcom,rumi"; qcom,board-id = <15 0>; }; + +&qnand_1 { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi index 12fdb0f555ce3b143f2ce39550105fe06680c3f2..6e855d4284c0ccc33a23bb7f02b74601b9f87be8 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi @@ -75,6 +75,8 @@ &soc { /delete-node/ qcom,spmi@200f000; + /delete-node/ regulator@1942120; + /delete-node/ regulator@b018000; }; &rpm_bus { @@ -120,3 +122,7 @@ status = "disabled"; }; + +&smb1351_otg_supply { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-stub-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs405-stub-regulator.dtsi index 378135169d42f83113feb70e8fd697ee2f71f250..9ef1c13731cf0f2e2bdad871d0a6e026904fd256 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-stub-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-stub-regulator.dtsi @@ -183,4 +183,12 @@ regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; }; + + /* VDD_APC supply */ + apc_vreg_corner: regulator-apc-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "apc_corner"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <3>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi index 64559e4f8d32eae4cd9bbce1ccb6b5bb06a38711..aa3374de60d23d7eeb67ce8830c03da71ce479e6 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-thermal.dtsi @@ -234,6 +234,14 @@ type = "passive"; }; }; + cooling-maps { + gpu_cdev0 { + trip = <&gpu_step_trip>; + cooling-device = + <&msm_gpu THERMAL_NO_LIMIT + THERMAL_NO_LIMIT>; + }; + }; }; cpuss-0-step { diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index b2e7aeae6ec7cf53b325d3de52553b52691576c8..3d6b81b1811c94dca55fcb82606929a4abfcf134 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -23,10 +23,10 @@ model = "Qualcomm Technologies, Inc. QCS405"; compatible = "qcom,qcs405"; qcom,msm-id = <352 0x0>; - interrupt-parent = <&intc>; + interrupt-parent = <&wakegic>; chosen { - bootargs = "sched_enable_hmp=1"; + bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; }; reserved-memory { @@ -104,6 +104,7 @@ aliases { sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ sdhc2 = &sdhc_2; /* SDC2 SD Card slot */ + qpic_nand1 = &qnand_1; }; soc: soc { }; @@ -115,6 +116,8 @@ #include "qcs405-cpu.dtsi" #include "qcs405-ion.dtsi" #include "qcs405-pm.dtsi" +#include "msm-arm-smmu-qcs405.dtsi" +#include "qcs405-gpu.dtsi" &soc { #address-cells = <1>; @@ -125,11 +128,31 @@ intc: interrupt-controller@b000000 { compatible = "qcom,msm-qgic2"; interrupt-controller; + interrupt-parent = <&intc>; #interrupt-cells = <3>; reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>; }; + wakegic: wake-gic { + compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic"; + interrupts = ; + reg = <0x601d0 0x1000>, + <0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */ + reg-names = "vmpm", "ipc"; + qcom,num-mpm-irqs = <64>; + interrupt-controller; + interrupt-parent = <&intc>; + #interrupt-cells = <3>; + }; + + wakegpio: wake-gpio { + compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio"; + interrupt-controller; + interrupt-parent = <&intc>; + #interrupt-cells = <2>; + }; + timer { compatible = "arm,armv8-timer"; interrupts = <1 2 0xff08>, @@ -139,61 +162,61 @@ clock-frequency = <19200000>; }; - timer@b020000 { + timer@b120000 { #address-cells = <1>; #size-cells = <1>; ranges; compatible = "arm,armv7-timer-mem"; - reg = <0xb020000 0x1000>; + reg = <0xb120000 0x1000>; clock-frequency = <19200000>; - frame@b021000 { + frame@b121000 { frame-number = <0>; interrupts = <0 8 0x4>, <0 7 0x4>; - reg = <0xb021000 0x1000>, - <0xb022000 0x1000>; + reg = <0xb121000 0x1000>, + <0xb122000 0x1000>; }; - frame@b023000 { + frame@b123000 { frame-number = <1>; interrupts = <0 9 0x4>; - reg = <0xb023000 0x1000>; + reg = <0xb123000 0x1000>; status = "disabled"; }; - frame@b024000 { + frame@b124000 { frame-number = <2>; interrupts = <0 10 0x4>; - reg = <0xb024000 0x1000>; + reg = <0xb124000 0x1000>; status = "disabled"; }; - frame@b025000 { + frame@b125000 { frame-number = <3>; interrupts = <0 11 0x4>; - reg = <0xb025000 0x1000>; + reg = <0xb125000 0x1000>; status = "disabled"; }; - frame@b026000 { + frame@b126000 { frame-number = <4>; interrupts = <0 12 0x4>; - reg = <0xb026000 0x1000>; + reg = <0xb126000 0x1000>; status = "disabled"; }; - frame@b027000 { + frame@b127000 { frame-number = <5>; interrupts = <0 13 0x4>; - reg = <0xb027000 0x1000>; + reg = <0xb127000 0x1000>; status = "disabled"; }; - frame@b028000 { + frame@b128000 { frame-number = <6>; interrupts = <0 14 0x4>; - reg = <0xb028000 0x1000>; + reg = <0xb128000 0x1000>; status = "disabled"; }; }; @@ -222,6 +245,7 @@ reg-names = "cc_base"; vdd_cx-supply = <&pms405_s1_level>; clocks = <&clock_rpmcc RPM_SMD_XO_CLK_SRC>; + qcom,gfx3d_clk_src-opp-handle = <&msm_gpu>; clock-names = "cxo"; #clock-cells = <1>; #reset-cells = <1>; @@ -230,14 +254,34 @@ clock_debugcc: qcom,cc-debug { compatible = "qcom,debugcc-qcs405"; qcom,gcc = <&clock_gcc>; + qcom,cpucc = <&cpucc_debug>; clocks = <&clock_rpmcc RPM_SMD_XO_CLK_SRC>; clock-names = "xo_clk_src"; #clock-cells = <1>; }; - clock_cpu: qcom,cpu { - compatible = "qcom,dummycc"; - clock-output-names = "cpu_clocks"; + cpucc_debug: syscon@0b01101c { + compatible = "syscon"; + reg = <0xb01101c 0x4>; + }; + + clock_cpu: qcom,clock-cpu@0b011050 { + status = "disabled"; + compatible = "qcom,cpu-qcs405"; + clocks = <&clock_rpmcc RPM_SMD_XO_A_CLK_SRC>, + <&clock_gcc GPLL0_AO_OUT_MAIN>; + clock-names = "xo_ao", "gpll0_ao" ; + reg = <0x0b011050 0x8>, + <0xb016000 0x34>; + reg-names = "apcs_cmd" , "apcs_pll"; + cpu-vdd-supply = <&apc_vreg_corner>; + vdd_dig_ao-supply = <&pms405_s1_level>; + qcom,speed0-bin-v0 = + < 0 0>, + < 960000000 1>, + < 1113600000 2>, + < 1267200000 3>, + < 1382400000 4>; #clock-cells = <1>; }; @@ -283,7 +327,7 @@ }; dcc: dcc_v2@b2000 { - compatible = "qcom,dcc_v2"; + compatible = "qcom,dcc-v2"; reg = <0x000b2000 0x1000>, <0x000bf800 0x800>; reg-names = "dcc-base", "dcc-ram-base"; @@ -368,6 +412,119 @@ }; }; + qcom,lpass@c000000 { + compatible = "qcom,pil-tz-generic"; + reg = <0xc000000 0x00100>; + + vdd_cx-supply = <&pms405_s2_level>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + + clocks = <&clock_rpmcc CXO_SMD_PIL_LPASS_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <1>; + qcom,complete-ramdump; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <423>; + qcom,sysmon-id = <1>; + qcom,ssctl-instance-id = <0x14>; + qcom,firmware-name = "adsp"; + + /* GPIO inputs from lpass */ + interrupts-extended = <&intc 0 293 1>, + <&adsp_smp2p_in 0 0>, + <&adsp_smp2p_in 2 0>, + <&adsp_smp2p_in 1 0>, + <&adsp_smp2p_in 3 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack"; + /* GPIO output to lpass */ + qcom,smem-states = <&adsp_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + memory-region = <&adsp_fw_mem>; + }; + + qcom,turing@800000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x800000 0x00100>; + + vdd_cx-supply = <&pms405_s1_level>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + + clocks = <&clock_rpmcc CXO_SMD_PIL_CDSP_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <18>; + qcom,complete-ramdump; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <601>; + qcom,sysmon-id = <7>; + qcom,ssctl-instance-id = <0x17>; + qcom,firmware-name = "cdsp"; + + /* GPIO inputs from turing */ + interrupts-extended = <&intc 0 229 1>, + <&cdsp_smp2p_in 0 0>, + <&cdsp_smp2p_in 2 0>, + <&cdsp_smp2p_in 1 0>, + <&cdsp_smp2p_in 3 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack"; + /* GPIO output to turing */ + qcom,smem-states = <&cdsp_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + memory-region = <&cdsp_fw_mem>; + }; + + qcom,wlan_dsp@7000000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x07000000 0x580000>; + + vdd_cx-supply = <&pms405_s1_level>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + + clocks = <&clock_rpmcc CXO_SMD_PIL_PRONTO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <6>; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <422>; + qcom,sysmon-id = <0>; + qcom,ssctl-instance-id = <0x12>; + qcom,firmware-name = "wcnss"; + + /* GPIO inputs from wcnss */ + interrupts-extended = <&intc 0 153 1>, + <&modem_smp2p_in 0 0>, + <&modem_smp2p_in 2 0>, + <&modem_smp2p_in 1 0>, + <&modem_smp2p_in 3 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack"; + + /* GPIO output to wcnss */ + qcom,gpio-force-stop = <&modem_smp2p_out 0 0>; + memory-region = <&wlan_fw_mem>; + }; + tsens0: tsens@4a8000 { compatible = "qcom,qcs405-tsens"; reg = <0x4a8000 0x1000>, @@ -415,6 +572,74 @@ #mbox-cells = <1>; }; + qcom,msm-adsprpc-mem { + compatible = "qcom,msm-adsprpc-mem-region"; + memory-region = <&adsp_mem>; + }; + + qcom,msm_fastrpc { + compatible = "qcom,msm-fastrpc-compute"; + qcom,rpc-latency-us = <611>; + qcom,fastrpc-adsp-audio-pdr; + qcom,fastrpc-adsp-sensors-pdr; + + qcom,msm_fastrpc_compute_cb1 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + iommus = <&apps_smmu 0x1001 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb2 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + iommus = <&apps_smmu 0x1002 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb3 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + iommus = <&apps_smmu 0x1003 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb4 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + iommus = <&apps_smmu 0x1004 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb5 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "cdsprpc-smd"; + iommus = <&apps_smmu 0x1005 0x0>; + dma-coherent; + }; + qcom,msm_fastrpc_compute_cb6 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&apps_smmu 0x804 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb7 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&apps_smmu 0x805 0x0>; + dma-coherent; + }; + + qcom,msm_fastrpc_compute_cb8 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&apps_smmu 0x806 0x0>; + shared-cb = <5>; + dma-coherent; + }; + }; + rpm-glink { compatible = "qcom,glink-rpm"; interrupts = ; @@ -422,56 +647,110 @@ mboxes = <&apcs_glb 0>; }; + qcom,msm-cdsp-loader { + compatible = "qcom,cdsp-loader"; + qcom,proc-img-to-load = "cdsp"; + }; + qcom,glink { compatible = "qcom,glink"; - modem { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + glink_wcnss: wcnss { qcom,remote-pid = <1>; transport = "smem"; mboxes = <&apcs_glb 16>; - mbox-names = "mpss_smem"; + mbox-names = "wcnss_smem"; interrupts = ; - modem_qrtr { + label = "wcnss"; + qcom,glink-label = "wcnss"; + + qcom,wcnss_qrtr { qcom,glink-channels = "IPCRTR"; qcom,intents = <0x800 5 0x2000 3 0x4400 2>; }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,wcnss_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_adsp>, + <&glink_cdsp>; + }; }; - adsp { + glink_adsp: adsp { qcom,remote-pid = <2>; transport = "smem"; mboxes = <&apcs_glb 8>; mbox-names = "adsp_smem"; interrupts = ; - adsp_qrtr { + label = "adsp"; + qcom,glink-label = "lpass"; + + qcom,adsp_qrtr { qcom,glink-channels = "IPCRTR"; qcom,intents = <0x800 5 0x2000 3 0x4400 2>; }; - apr_tal_rpmsg { + qcom,apr_tal_rpmsg { qcom,glink-channels = "apr_audio_svc"; qcom,intents = <0x200 20>; }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,adsp_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_wcnss>, + <&glink_cdsp>; + }; }; - cdsp { + glink_cdsp: cdsp { qcom,remote-pid = <5>; transport = "smem"; mboxes = <&apcs_glb 12>; mbox-names = "cdsp_smem"; interrupts = ; - cdsp_qrtr { + label = "cdsp"; + qcom,glink-label = "cdsp"; + + qcom,cdsp_qrtr { qcom,glink-channels = "IPCRTR"; qcom,intents = <0x800 5 0x2000 3 0x4400 2>; }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,cdsp_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_wcnss>, + <&glink_adsp>; + }; }; }; @@ -664,6 +943,21 @@ status = "disabled"; }; + + qnand_1: nand@4c0000 { + compatible = "qcom,msm-nand"; + reg = <0x004c0000 0x1000>, + <0x004c4000 0x1a000>; + reg-names = "nand_phys", + "bam_phys"; + qcom,reg-adjustment-offset = <0x4000>; + qcom,qpic-clk-rpmh; + + interrupts = <0 49 0>; + interrupt-names = "bam_irq"; + + status = "disabled"; + }; }; #include "qcs405-gdsc.dtsi" @@ -671,6 +965,7 @@ #include "pms405-rpm-regulator.dtsi" #include "qcs405-regulator.dtsi" #include "qcs405-thermal.dtsi" +#include "qcs405-bus.dtsi" &gdsc_mdss { status = "ok"; @@ -681,3 +976,21 @@ }; #include "qcs405-coresight.dtsi" + +&i2c_5 { + status = "ok"; + smb1351_otg_supply: smb1351-charger@55 { + compatible = "qcom,smb1351-charger"; + reg = <0x55>; + interrupt-parent = <&tlmm>; + interrupts = <107 IRQ_TYPE_LEVEL_LOW>; + qcom,float-voltage-mv = <4350>; + qcom,charging-timeout = <1536>; + qcom,recharge-thresh-mv = <200>; + qcom,iterm-ma = <100>; + regulator-name = "smb1351_otg_supply"; + pinctrl-names = "default"; + pinctrl-0 = <&smb_stat>; + qcom,switch-freq = <2>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..7a7053222d1f3c7a3ee7503fb261703aba47721e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-gdsc.dtsi @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + /* GDSCs in Global CC */ + pcie_0_gdsc: qcom,gdsc@16b004 { + compatible = "regulator-fixed"; + regulator-name = "pcie_0_gdsc"; + reg = <0x16b004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + pcie_tbu_gdsc: qcom,gdsc@128004 { + compatible = "regulator-fixed"; + regulator-name = "pcie_tbu_gdsc"; + reg = <0x128004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + ufs_phy_gdsc: qcom,gdsc@177004 { + compatible = "regulator-fixed"; + regulator-name = "ufs_phy_gdsc"; + reg = <0x177004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + usb30_prim_gdsc: qcom,gdsc@10f004 { + compatible = "regulator-fixed"; + regulator-name = "usb30_prim_gdsc"; + reg = <0x10f004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc: qcom,gdsc@17d030 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc"; + reg = <0x17d030 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc: qcom,gdsc@17d03c { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc"; + reg = <0x17d03c 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_aggre_noc_mmu_tbu1_gdsc: qcom,gdsc@17d034 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc"; + reg = <0x17d034 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_aggre_noc_mmu_tbu2_gdsc: qcom,gdsc@17d038 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc"; + reg = <0x17d038 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d040 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc"; + reg = <0x17d040 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d048 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc"; + reg = <0x17d048 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + hlos1_vote_mmnoc_mmu_tbu_sf_gdsc: qcom,gdsc@17d044 { + compatible = "regulator-fixed"; + regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc"; + reg = <0x17d044 0x4>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + status = "disabled"; + }; + + /* GDSCs in Camera CC */ + bps_gdsc: qcom,gdsc@ad07004 { + compatible = "regulator-fixed"; + regulator-name = "bps_gdsc"; + reg = <0xad07004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + ife_0_gdsc: qcom,gdsc@ad0a004 { + compatible = "regulator-fixed"; + regulator-name = "ife_0_gdsc"; + reg = <0xad0a004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + ife_1_gdsc: qcom,gdsc@ad0b004 { + compatible = "regulator-fixed"; + regulator-name = "ife_1_gdsc"; + reg = <0xad0b004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + ipe_0_gdsc: qcom,gdsc@ad08004 { + compatible = "regulator-fixed"; + regulator-name = "ipe_0_gdsc"; + reg = <0xad08004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + ipe_1_gdsc: qcom,gdsc@ad09004 { + compatible = "regulator-fixed"; + regulator-name = "ipe_1_gdsc"; + reg = <0xad09004 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + titan_top_gdsc: qcom,gdsc@ad0c1c4 { + compatible = "regulator-fixed"; + regulator-name = "titan_top_gdsc"; + reg = <0xad0c1c4 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + /* GDSCs in Display CC */ + mdss_core_gdsc: qcom,gdsc@0f03000 { + compatible = "regulator-fixed"; + regulator-name = "mdss_core_gdsc"; + reg = <0xaf03000 0x4>; + qcom,poll-cfg-gdscr; + qcom,support-hw-trigger; + status = "disabled"; + proxy-supply = <&mdss_core_gdsc>; + qcom,proxy-consumer-enable; + }; + + /* GDSCs in Graphics CC */ + gpu_cx_hw_ctrl: syscon@5091540 { + compatible = "syscon"; + reg = <0x5091540 0x4>; + }; + + gpu_cx_gdsc: qcom,gdsc@509106c { + compatible = "regulator-fixed"; + regulator-name = "gpu_cx_gdsc"; + reg = <0x509106c 0x4>; + hw-ctrl-addr = <&gpu_cx_hw_ctrl>; + qcom,no-status-check-on-disable; + qcom,gds-timeout = <500>; + qcom,clk-dis-wait-val = <8>; + status = "disabled"; + }; + + gpu_gx_gdsc: qcom,gdsc@509100c { + compatible = "regulator-fixed"; + regulator-name = "gpu_gx_gdsc"; + reg = <0x509100c 0x4>; + qcom,poll-cfg-gdscr; + status = "disabled"; + }; + + /* GDSCs in Video CC */ + mvsc_gdsc: qcom,gdsc@0b00814 { + compatible = "regulator-fixed"; + regulator-name = "mvsc_gdsc"; + reg = <0xab00814 0x4>; + status = "disabled"; + }; + + mvs0_gdsc: qcom,gdsc@ab00874 { + compatible = "regulator-fixed"; + regulator-name = "mvs0_gdsc"; + reg = <0xab00874 0x4>; + status = "disabled"; + }; + + mvs1_gdsc: qcom,gdsc@ab008b4 { + compatible = "regulator-fixed"; + regulator-name = "mvs1_gdsc"; + reg = <0xab008b4 0x4>; + status = "disabled"; + }; + + /* GDSCs in NPU CC */ + npu_core_gdsc: qcom,gdsc@9911028 { + compatible = "regulator-fixed"; + regulator-name = "npu_core_gdsc"; + reg = <0x9911028 0x4>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..74a472a6a4283c45c503562559db4c148da3b2e3 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,msm-id = <365 0x0>; + qcom,board-id = <34 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts new file mode 100644 index 0000000000000000000000000000000000000000..273174c8c44140f88fa6269de9c0a7008131e06f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" +#include "sdmmagpie-idp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE IDP"; + compatible = "qcom,sdmmagpie-idp", "qcom,sdmmagpie", "qcom,idp"; + qcom,board-id = <34 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..04867feb26b6f065d80dc3c5b502db5160ddf459 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-idp.dtsi @@ -0,0 +1,14 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..d466f4d3ab7ee44cffe24c214b4777b3540d0d6a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi @@ -0,0 +1,23 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + tlmm: pinctrl@3400000 { + compatible = "qcom,sdmmagpie-pinctrl"; + reg = <0x03400000 0xdc2000>; + interrupts = <0 208 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..b497ca644406fee3721140f9812319508602c001 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-qrd.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE QRD"; + compatible = "qcom,sdmmagpie-qrd", "qcom,sdmmagpie", "qcom,qrd"; + qcom,msm-id = <365 0x0>; + qcom,board-id = <11 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts new file mode 100644 index 0000000000000000000000000000000000000000..f7e9368e05867a27c4cb0ed08ec0ffcd375292e1 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" +#include "sdmmagpie-qrd.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE QRD"; + compatible = "qcom,sdmmagpie-qrd", "qcom,sdmmagpie", "qcom,qrd"; + qcom,board-id = <11 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..04867feb26b6f065d80dc3c5b502db5160ddf459 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qrd.dtsi @@ -0,0 +1,14 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..d83c3519baacea67c9add1e4ca7b75d7b2343824 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi-overlay.dts @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include + +#include "sdmmagpie-rumi.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE RUMI"; + compatible = "qcom,sdmmagpie-rumi", "qcom,sdmmagpie", "qcom,rumi"; + qcom,msm-id = <365 0x0>; + qcom,board-id = <15 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts new file mode 100644 index 0000000000000000000000000000000000000000..ba9b05c9a9bfcb7d85cf7ad33cf48ee12ca93511 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" +#include "sdmmagpie-rumi.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE RUMI"; + compatible = "qcom,sdmmagpie-rumi", "qcom,sdmmagpie", "qcom,rumi"; + qcom,board-id = <15 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..a683d878cc32bc5bc3a0ec129f3d5584f838d720 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi @@ -0,0 +1,25 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + timer { + clock-frequency = <1000000>; + }; + + timer@0x17c00000 { + clock-frequency = <1000000>; + }; + + wdog: qcom,wdt@17c10000{ + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dts b/arch/arm64/boot/dts/qcom/sdmmagpie.dts new file mode 100644 index 0000000000000000000000000000000000000000..504b85f5c3e184438af7517749637735294e0f9d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dts @@ -0,0 +1,21 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdmmagpie.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE SoC"; + compatible = "qcom,sdmmagpie"; + qcom,board-id = <0 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..cb536c68516c1e363513a20b1dfd3291107f815e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -0,0 +1,818 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "skeleton64.dtsi" +#include +#include +#include +#include +#include +#include +#include +#include + +/ { + model = "Qualcomm Technologies, Inc. SDMMAGPIE"; + compatible = "qcom,sdmmagpie"; + qcom,msm-id = <365 0x0>; + interrupt-parent = <&intc>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x0>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_0>; + L2_0: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + + L3_0: l3-cache { + compatible = "arm,arch-cache"; + cache-size = <0x100000>; + cache-level = <3>; + }; + }; + + L1_I_0: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_0: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_0: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + CPU1: cpu@100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_1>; + L2_1: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_1: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_1: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_1: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + + CPU2: cpu@200 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_2>; + L2_2: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_2: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_2: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_2: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + CPU3: cpu@300 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_3>; + L2_3: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_3: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_3: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_3: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + CPU4: cpu@400 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x400>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_4>; + L2_4: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_4: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_4: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_4: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + CPU5: cpu@500 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x500>; + enable-method = "psci"; + cache-size = <0x8000>; + next-level-cache = <&L2_5>; + L2_5: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x20000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_5: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_D_5: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0xa000>; + }; + + L1_TLB_5: l1-tlb { + qcom,dump-size = <0x3000>; + }; + }; + + CPU6: cpu@600 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x600>; + enable-method = "psci"; + cache-size = <0x10000>; + next-level-cache = <&L2_6>; + L2_6: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x40000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_100: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + + L1_D_100: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + + L1_TLB_100: l1-tlb { + qcom,dump-size = <0x3c00>; + }; + }; + + CPU7: cpu@700 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x700>; + enable-method = "psci"; + cache-size = <0x10000>; + next-level-cache = <&L2_7>; + L2_7: l2-cache { + compatible = "arm,arch-cache"; + cache-size = <0x40000>; + cache-level = <2>; + next-level-cache = <&L3_0>; + }; + + L1_I_200: l1-icache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + + L1_D_200: l1-dcache { + compatible = "arm,arch-cache"; + qcom,dump-size = <0x14000>; + }; + + L1_TLB_200: l1-tlb { + qcom,dump-size = <0x3c00>; + }; + }; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + + core1 { + cpu = <&CPU1>; + }; + + core2 { + cpu = <&CPU2>; + }; + + core3 { + cpu = <&CPU3>; + }; + + core4 { + cpu = <&CPU4>; + }; + + core5 { + cpu = <&CPU5>; + }; + + }; + + cluster1 { + core6 { + cpu = <&CPU6>; + }; + + core7 { + cpu = <&CPU7>; + }; + }; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + chosen { + bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; + }; + + soc: soc { }; + + firmware: firmware { + android { + compatible = "android,firmware"; + fstab { + compatible = "android,fstab"; + vendor { + compatible = "android,vendor"; + dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor"; + type = "ext4"; + mnt_flags = "ro,barrier=1,discard"; + fsmgr_flags = "wait,slotselect,avb"; + status = "ok"; + }; + }; + }; + }; +}; + +&soc { + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0 0 0xffffffff>; + compatible = "simple-bus"; + + intc: interrupt-controller@17a00000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + interrupt-controller; + #redistributor-regions = <1>; + redistributor-stride = <0x0 0x20000>; + reg = <0x17a00000 0x10000>, /* GICD */ + <0x17a60000 0x100000>; /* GICR * 8 */ + interrupts = ; + interrupt-parent = <&intc>; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + clock-frequency = <19200000>; + }; + + timer@0x17c20000{ + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0x17c20000 0x1000>; + clock-frequency = <19200000>; + + frame@0x17c21000 { + frame-number = <0>; + interrupts = , + ; + reg = <0x17c21000 0x1000>, + <0x17c22000 0x1000>; + }; + + frame@17c23000 { + frame-number = <1>; + interrupts = ; + reg = <0x17c23000 0x1000>; + status = "disabled"; + }; + + frame@17c25000 { + frame-number = <2>; + interrupts = ; + reg = <0x17c25000 0x1000>; + status = "disabled"; + }; + + frame@17c27000 { + frame-number = <3>; + interrupts = ; + reg = <0x17c27000 0x1000>; + status = "disabled"; + }; + + frame@17c29000 { + frame-number = <4>; + interrupts = ; + reg = <0x17c29000 0x1000>; + status = "disabled"; + }; + + frame@17c2b000 { + frame-number = <5>; + interrupts = ; + reg = <0x17c2b000 0x1000>; + status = "disabled"; + }; + + frame@17c2d000 { + frame-number = <6>; + interrupts = ; + reg = <0x17c2d000 0x1000>; + status = "disabled"; + }; + }; + + clock_rpmh: qcom,rpmh { + compatible = "qcom,dummycc"; + clock-output-names = "rpm_clocks"; + #clock-cells = <1>; + }; + + clock_gcc: qcom,gcc { + compatible = "qcom,dummycc"; + clock-output-names = "gcc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_camcc: qcom,camcc { + compatible = "qcom,dummycc"; + clock-output-names = "camcc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_gpucc: qcom,gpucc { + compatible = "qcom,dummycc"; + clock-output-names = "gpucc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_videocc: qcom,videocc { + compatible = "qcom,dummycc"; + clock-output-names = "videocc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_dispcc: qcom,dispcc { + compatible = "qcom,dummycc"; + clock-output-names = "dispcc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_npucc: qcom,npucc { + compatible = "qcom,dummycc"; + clock-output-names = "npucc_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + cpu_pmu: cpu-pmu { + compatible = "arm,armv8-pmuv3"; + qcom,irq-is-percpu; + interrupts = ; + }; + + qcom,msm-imem@146aa000 { + compatible = "qcom,msm-imem"; + reg = <0x146aa000 0x1000>; + ranges = <0x0 0x146aa000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + mem_dump_table@10 { + compatible = "qcom,msm-imem-mem_dump_table"; + reg = <0x10 8>; + }; + + restart_reason@65c { + compatible = "qcom,msm-imem-restart_reason"; + reg = <0x65c 4>; + }; + + boot_stats@6b0 { + compatible = "qcom,msm-imem-boot_stats"; + reg = <0x6b0 32>; + }; + + kaslr_offset@6d0 { + compatible = "qcom,msm-imem-kaslr_offset"; + reg = <0x6d0 12>; + }; + + pil@94c { + compatible = "qcom,msm-imem-pil"; + reg = <0x94c 200>; + }; + }; + + restart@c264000 { + compatible = "qcom,pshold"; + reg = <0xc264000 0x4>, + <0x1fd3000 0x4>; + reg-names = "pshold-base", "tcsr-boot-misc-detect"; + }; + + qcom,msm-rtb { + compatible = "qcom,msm-rtb"; + qcom,rtb-size = <0x100000>; + }; + + wdog: qcom,wdt@17c10000{ + compatible = "qcom,msm-watchdog"; + reg = <0x17c10000 0x1000>; + reg-names = "wdt-base"; + interrupts = , + ; + qcom,bark-time = <11000>; + qcom,pet-time = <9360>; + qcom,ipi-ping; + qcom,wakeup-enable; + }; + + qcom,chd_sliver { + compatible = "qcom,core-hang-detect"; + label = "silver"; + qcom,threshold-arr = <0x18000058 0x18010058 + 0x18020058 0x18030058 + 0x18040058 0x18050058>; + qcom,config-arr = <0x18000060 0x18010060 + 0x18020060 0x18030060 + 0x18040060 0x18050060>; + }; + + qcom,chd_gold { + compatible = "qcom,core-hang-detect"; + label = "gold"; + qcom,threshold-arr = <0x18060058 0x18070058>; + qcom,config-arr = <0x18060060 0x18070060>; + }; + + kryo-erp { + compatible = "arm,arm64-kryo-cpu-erp"; + interrupts = , + ; + + interrupt-names = "l1-l2-faultirq", + "l3-scu-faultirq"; + }; + + qcom,ghd { + compatible = "qcom,gladiator-hang-detect-v3"; + qcom,threshold-arr = <0x17e0041C>; + qcom,config-reg = <0x17e00434>; + }; + + cpuss_dump { + compatible = "qcom,cpuss-dump"; + + qcom,l1_i_cache0 { + qcom,dump-node = <&L1_I_0>; + qcom,dump-id = <0x60>; + }; + + qcom,l1_i_cache1 { + qcom,dump-node = <&L1_I_1>; + qcom,dump-id = <0x61>; + }; + + qcom,l1_i_cache2 { + qcom,dump-node = <&L1_I_2>; + qcom,dump-id = <0x62>; + }; + + qcom,l1_i_cache3 { + qcom,dump-node = <&L1_I_3>; + qcom,dump-id = <0x63>; + }; + + qcom,l1_i_cache4 { + qcom,dump-node = <&L1_I_4>; + qcom,dump-id = <0x64>; + }; + + qcom,l1_i_cache5 { + qcom,dump-node = <&L1_I_5>; + qcom,dump-id = <0x65>; + }; + + qcom,l1_i_cache100 { + qcom,dump-node = <&L1_I_100>; + qcom,dump-id = <0x66>; + }; + + qcom,l1_i_cache200 { + qcom,dump-node = <&L1_I_200>; + qcom,dump-id = <0x67>; + }; + + qcom,l1_d_cache0 { + qcom,dump-node = <&L1_D_0>; + qcom,dump-id = <0x80>; + }; + + qcom,l1_d_cache1 { + qcom,dump-node = <&L1_D_1>; + qcom,dump-id = <0x81>; + }; + + qcom,l1_d_cache2 { + qcom,dump-node = <&L1_D_2>; + qcom,dump-id = <0x82>; + }; + + qcom,l1_d_cache3 { + qcom,dump-node = <&L1_D_3>; + qcom,dump-id = <0x83>; + }; + + qcom,l1_d_cache4 { + qcom,dump-node = <&L1_D_4>; + qcom,dump-id = <0x84>; + }; + + qcom,l1_d_cache5 { + qcom,dump-node = <&L1_D_5>; + qcom,dump-id = <0x85>; + }; + + qcom,l1_d_cache100 { + qcom,dump-node = <&L1_D_100>; + qcom,dump-id = <0x86>; + }; + + qcom,l1_d_cache200 { + qcom,dump-node = <&L1_D_200>; + qcom,dump-id = <0x87>; + }; + + qcom,l1_tlb_dump0 { + qcom,dump-node = <&L1_TLB_0>; + qcom,dump-id = <0x20>; + }; + + qcom,l1_tlb_dump1 { + qcom,dump-node = <&L1_TLB_1>; + qcom,dump-id = <0x21>; + }; + + qcom,l1_tlb_dump2 { + qcom,dump-node = <&L1_TLB_2>; + qcom,dump-id = <0x22>; + }; + + qcom,l1_tlb_dump3 { + qcom,dump-node = <&L1_TLB_3>; + qcom,dump-id = <0x23>; + }; + + qcom,l1_tlb_dump4 { + qcom,dump-node = <&L1_TLB_4>; + qcom,dump-id = <0x24>; + }; + + qcom,l1_tlb_dump5 { + qcom,dump-node = <&L1_TLB_5>; + qcom,dump-id = <0x25>; + }; + + qcom,l1_tlb_dump100 { + qcom,dump-node = <&L1_TLB_100>; + qcom,dump-id = <0x26>; + }; + + qcom,l1_tlb_dump200 { + qcom,dump-node = <&L1_TLB_200>; + qcom,dump-id = <0x27>; + }; + }; + + qcom,llcc@9200000 { + compatible = "qcom,llcc-core", "syscon", "simple-mfd"; + reg = <0x9200000 0x450000>; + reg-names = "llcc_base"; + qcom,llcc-banks-off = <0x0 0x80000>; + qcom,llcc-broadcast-off = <0x400000>; + + llcc: qcom,sdmmagpie-llcc { + compatible = "qcom,sdmmagpie-llcc"; + #cache-cells = <1>; + max-slices = <32>; + cap-based-alloc-and-pwr-collapse; + }; + + qcom,llcc-erp { + compatible = "qcom,llcc-erp"; + interrupt-names = "ecc_irq"; + interrupts = ; + }; + + qcom,llcc-amon { + compatible = "qcom,llcc-amon"; + }; + }; +}; + +#include "sdmmagpie-pinctrl.dtsi" +#include "sdmmagpie-gdsc.dtsi" + +&pcie_0_gdsc { + status = "ok"; +}; + +&pcie_tbu_gdsc { + status = "ok"; +}; + +&usb30_prim_gdsc { + status = "ok"; +}; + +&ufs_phy_gdsc { + status = "ok"; +}; + +&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc { + status = "ok"; +}; + +&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc { + status = "ok"; +}; + +&hlos1_vote_aggre_noc_mmu_tbu1_gdsc { + status = "ok"; +}; + +&hlos1_vote_aggre_noc_mmu_tbu2_gdsc { + status = "ok"; +}; + +&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc { + status = "ok"; +}; + +&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc { + status = "ok"; +}; + +&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc { + status = "ok"; +}; + +&bps_gdsc { + status = "ok"; +}; + +&ife_0_gdsc { + status = "ok"; +}; + +&ife_1_gdsc { + status = "ok"; +}; + +&ipe_0_gdsc { + status = "ok"; +}; + +&ipe_1_gdsc { + status = "ok"; +}; + +&titan_top_gdsc { + status = "ok"; +}; + +&mdss_core_gdsc { + status = "ok"; +}; + +&gpu_cx_gdsc { + status = "ok"; +}; + +&gpu_gx_gdsc { + status = "ok"; +}; + +&mvsc_gdsc { + status = "ok"; +}; + +&mvs0_gdsc { + status = "ok"; +}; + +&mvs1_gdsc { + status = "ok"; +}; + +&npu_core_gdsc { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi index 83cbf89a00a70608c4fffd2f17e3cde39995f84f..d4db086685f50460ce0a7eea24728c0a13a77350 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi @@ -16,3 +16,7 @@ &qupv3_se0_2uart { status = "ok"; }; + +&pm6150_charger { + qcom,batteryless-platform; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..2e5d6ecb62f3227b6c36871f7f0981f947c364a7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-coresight.dtsi @@ -0,0 +1,2753 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + csr: csr@6001000 { + compatible = "qcom,coresight-csr"; + reg = <0x6001000 0x1000>; + reg-names = "csr-base"; + + coresight-name = "coresight-csr"; + qcom,usb-bam-support; + qcom,hwctrl-set-support; + qcom,set-byte-cntr-support; + + qcom,blk-size = <1>; + }; + + replicator_qdss: replicator@6046000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b909>; + + reg = <0x6046000 0x1000>; + reg-names = "replicator-base"; + + coresight-name = "coresight-replicator-qdss"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + replicator0_out_tmc_etr: endpoint { + remote-endpoint= + <&tmc_etr_in_replicator0>; + }; + }; + + port@1 { + reg = <1>; + replicator0_out_replicator1_in: endpoint { + remote-endpoint= + <&replicator1_in_replicator0_out>; + }; + }; + + port@2 { + reg = <0>; + replicator0_in_tmc_etf: endpoint { + slave-mode; + remote-endpoint= + <&tmc_etf_out_replicator0>; + }; + }; + }; + }; + + replicator_qdss1: replicator@604a000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b909>; + + reg = <0x604a000 0x1000>; + reg-names = "replicator-base"; + + coresight-name = "coresight-replicator-qdss1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <1>; + replicator1_out_funnel_swao: endpoint { + remote-endpoint= + <&funnel_swao_in_replicator1_out>; + }; + }; + + port@1 { + reg = <1>; + replicator1_in_replicator0_out: endpoint { + slave-mode; + remote-endpoint= + <&replicator0_out_replicator1_in>; + }; + }; + }; + }; + + tmc_etr: tmc@6048000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b961>; + reg = <0x6048000 0x1000>, + <0x6064000 0x15000>; + reg-names = "tmc-base", "bam-base"; + + qcom,smmu-s1-bypass; + arm,buffer-size = <0x400000>; + + coresight-name = "coresight-tmc-etr"; + coresight-ctis = <&cti0>; + coresight-csr = <&csr>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + interrupts = ; + interrupt-names = "byte-cntr-irq"; + + port { + tmc_etr_in_replicator0: endpoint { + slave-mode; + remote-endpoint = <&replicator0_out_tmc_etr>; + }; + }; + }; + + tmc_etf: tmc@6047000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b961>; + + reg = <0x6047000 0x1000>; + reg-names = "tmc-base"; + + coresight-name = "coresight-tmc-etf"; + coresight-ctis = <&cti0>; + coresight-csr = <&csr>; + arm,default-sink; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + tmc_etf_out_replicator0: endpoint { + remote-endpoint = + <&replicator0_in_tmc_etf>; + }; + }; + + port@1 { + reg = <0>; + tmc_etf_in_funnel_merg: endpoint { + slave-mode; + remote-endpoint = + <&funnel_merg_out_tmc_etf>; + }; + }; + }; + + }; + + funnel_merg: funnel@6045000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6045000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-merg"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_merg_out_tmc_etf: endpoint { + remote-endpoint = + <&tmc_etf_in_funnel_merg>; + }; + }; + + port@1 { + reg = <0>; + funnel_merg_in_funnel_in0: endpoint { + slave-mode; + remote-endpoint = + <&funnel_in0_out_funnel_merg>; + }; + }; + + port@2 { + reg = <1>; + funnel_merg_in_funnel_in1: endpoint { + slave-mode; + remote-endpoint = + <&funnel_in1_out_funnel_merg>; + }; + }; + }; + }; + + funnel_in0: funnel@6041000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6041000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in0_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in0>; + }; + }; + + port@1 { + reg = <6>; + funnel_in0_in_funnel_qatb: endpoint { + slave-mode; + remote-endpoint = + <&funnel_qatb_out_funnel_in0>; + }; + }; + + port@2 { + reg = <7>; + funnel_in0_in_stm: endpoint { + slave-mode; + remote-endpoint = <&stm_out_funnel_in0>; + }; + }; + }; + }; + + funnel_qatb: funnel@6005000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6005000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-qatb"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_qatb_out_funnel_in0: endpoint { + remote-endpoint = + <&funnel_in0_in_funnel_qatb>; + }; + }; + + port@1 { + reg = <0>; + funnel_qatb_in_tpda: endpoint { + slave-mode; + remote-endpoint = + <&tpda_out_funnel_qatb>; + }; + }; + + port@2 { + reg = <5>; + funnel_qatb_in_funnel_monaq_1: endpoint { + slave-mode; + remote-endpoint = + <&funnel_monaq_1_out_funnel_qatb>; + }; + }; + + port@3 { + reg = <7>; + funnel_qatb_in_funnel_turing_1: endpoint { + slave-mode; + remote-endpoint = + <&funnel_turing_1_out_funnel_qatb>; + }; + }; + }; + }; + + tpda: tpda@6004000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x6004000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda"; + + qcom,tpda-atid = <65>; + qcom,bc-elem-size = <10 32>, + <13 32>; + qcom,tc-elem-size = <13 32>; + qcom,dsb-elem-size = <0 32>, + <2 32>, + <3 32>, + <5 32>, + <6 32>, + <10 32>, + <11 32>, + <13 32>; + qcom,cmb-elem-size = <3 64>, + <7 64>, + <13 64>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_out_funnel_qatb: endpoint { + remote-endpoint = + <&funnel_qatb_in_tpda>; + }; + + }; + + port@1 { + reg = <0>; + tpda_in_tpdm_center: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_center_out_tpda>; + }; + }; + + port@2 { + reg = <4>; + tpda_in_funnel_monaq: endpoint { + slave-mode; + remote-endpoint = + <&funnel_monaq_out_tpda>; + }; + }; + + port@3 { + reg = <5>; + tpda_in_funnel_ddr_0: endpoint { + slave-mode; + remote-endpoint = + <&funnel_ddr_0_out_tpda>; + }; + }; + + port@4 { + reg = <6>; + tpda_in_funnel_turing: endpoint { + slave-mode; + remote-endpoint = + <&funnel_turing_out_tpda>; + }; + }; + + port@5 { + reg = <7>; + tpda_in_tpdm_vsense: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_vsense_out_tpda>; + }; + }; + + port@6 { + reg = <8>; + tpda_in_tpdm_dcc: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_dcc_out_tpda>; + }; + }; + + port@7 { + reg = <9>; + tpda_in_tpdm_prng: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_prng_out_tpda>; + }; + }; + + port@8 { + reg = <11>; + tpda_in_tpdm_qm: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_qm_out_tpda>; + }; + }; + + port@9 { + reg = <14>; + tpda_in_tpdm_pimem: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_pimem_out_tpda>; + }; + }; + + port@10 { + reg = <12>; + tpda_in_tpdm_west: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_west_out_tpda>; + }; + }; + }; + }; + + tpdm_west: tpdm@6b48000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6b48000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-west"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,msr-fix-req; + + port { + tpdm_west_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_west>; + }; + }; + }; + + tpdm_center: tpdm@6c28000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6c28000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-center"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,msr-fix-req; + + port { + tpdm_center_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_center>; + }; + }; + }; + + funnel_monaq: funnel@69c3000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x69c3000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-monaq"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_monaq_out_tpda: endpoint { + remote-endpoint = + <&tpda_in_funnel_monaq>; + }; + }; + + port@1 { + reg = <0>; + funnel_monaq_in_tpdm_dl_monaq: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_dl_monaq_out_funnel_monaq>; + }; + }; + }; + }; + + funnel_monaq1: funnel_1@69c3000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x69c1000 0x1>, + <0x69c3000 0x1000>; + reg-names = "funnel-base-dummy", "funnel-base-real"; + + coresight-name = "coresight-funnel-monaq1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + qcom,duplicate-funnel; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_monaq_1_out_funnel_qatb: endpoint { + remote-endpoint = + <&funnel_qatb_in_funnel_monaq_1>; + }; + }; + + port@1 { + reg = <1>; + funnel_monaq_1_in_audio_etm0: endpoint { + slave-mode; + remote-endpoint = + <&audio_etm0_out_funnel_monaq_1>; + }; + }; + + port@2 { + reg = <6>; + funnel_monaq_1_in_funnel_modem: endpoint { + slave-mode; + remote-endpoint = + <&funnel_modem_out_funnel_monaq_1>; + }; + }; + + port@3 { + reg = <7>; + funnel_monaq_1_in_modem_etm0: endpoint { + slave-mode; + remote-endpoint = + <&modem_etm0_out_funnel_monaq_1>; + }; + }; + }; + }; + + funnel_modem: funnel@6832000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6832000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-modem"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_modem_out_funnel_monaq_1: endpoint { + remote-endpoint = + <&funnel_monaq_1_in_funnel_modem>; + }; + }; + + port@1 { + reg = <0>; + funnel_modem_in_tpda_modem: endpoint { + slave-mode; + remote-endpoint = + <&tpda_modem_out_funnel_modem>; + }; + }; + }; + }; + + tpda_modem: tpda@6831000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x6831000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-modem"; + + qcom,tpda-atid = <67>; + qcom,dsb-elem-size = <0 32>; + qcom,cmb-elem-size = <0 64>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_modem_out_funnel_modem: endpoint { + remote-endpoint = + <&funnel_modem_in_tpda_modem>; + }; + }; + + port@1 { + reg = <0>; + tpda_modem_in_tpdm_modem: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_modem_out_tpda_modem>; + }; + }; + }; + }; + + tpdm_modem: tpdm@6830000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6830000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-modem"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_modem_out_tpda_modem: endpoint { + remote-endpoint = + <&tpda_modem_in_tpdm_modem>; + }; + }; + }; + + modem_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-modem-etm0"; + qcom,inst-id = <2>; + + port { + modem_etm0_out_funnel_monaq_1: endpoint { + remote-endpoint = + <&funnel_monaq_1_in_modem_etm0>; + }; + }; + }; + + audio_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-audio-etm0"; + qcom,inst-id = <5>; + + port { + audio_etm0_out_funnel_monaq_1: endpoint { + remote-endpoint = + <&funnel_monaq_1_in_audio_etm0>; + }; + }; + }; + + tpdm_dl_monaq: tpdm@69c0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x69c0000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-dl-monaq"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_dl_monaq_out_funnel_monaq: endpoint { + remote-endpoint = + <&funnel_monaq_in_tpdm_dl_monaq>; + }; + }; + }; + + funnel_ddr_0: funnel@6a05000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6a05000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-ddr-0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_ddr_0_out_tpda: endpoint { + remote-endpoint = + <&tpda_in_funnel_ddr_0>; + }; + }; + + port@1 { + reg = <0>; + funnel_ddr_0_in_tpdm_ddr: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_ddr_out_funnel_ddr_0>; + }; + }; + }; + }; + + tpdm_ddr: tpdm@6a00000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6a00000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-ddr"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,msr-fix-req; + + port { + tpdm_ddr_out_funnel_ddr_0: endpoint { + remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>; + }; + }; + }; + + funnel_turing: funnel@6861000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6861000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-turing"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_turing_out_tpda: endpoint { + remote-endpoint = + <&tpda_in_funnel_turing>; + }; + }; + + port@1 { + reg = <0>; + funnel_turing_in_tpdm_turing: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_turing_out_funnel_turing>; + }; + }; + }; + }; + + funnel_turing1: funnel_1@6861000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6867010 0x10>, + <0x6861000 0x1000>; + reg-names = "funnel-base-dummy", "funnel-base-real"; + + coresight-name = "coresight-funnel-turing1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + qcom,duplicate-funnel; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_turing_1_out_funnel_qatb: endpoint { + remote-endpoint = + <&funnel_qatb_in_funnel_turing_1>; + }; + }; + + port@1 { + reg = <0>; + funnel_turing_1_in_turing_etm0: endpoint { + slave-mode; + remote-endpoint = + <&turing_etm0_out_funnel_turing_1>; + }; + }; + }; + }; + + turing_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-turing-etm0"; + qcom,inst-id = <13>; + + port{ + turing_etm0_out_funnel_turing_1: endpoint { + remote-endpoint = + <&funnel_turing_1_in_turing_etm0>; + }; + }; + }; + + tpdm_turing: tpdm@6860000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6860000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-turing"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,msr-fix-req; + + port { + tpdm_turing_out_funnel_turing: endpoint { + remote-endpoint = + <&funnel_turing_in_tpdm_turing>; + }; + }; + }; + + tpdm_vsense: tpdm@6840000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6840000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-vsense"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port{ + tpdm_vsense_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_vsense>; + }; + }; + }; + + tpdm_dcc: tpdm@6870000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6870000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-dcc"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port{ + tpdm_dcc_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_dcc>; + }; + }; + }; + + tpdm_prng: tpdm@684c000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x684c000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-prng"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port{ + tpdm_prng_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_prng>; + }; + }; + }; + + tpdm_qm: tpdm@69d0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x69d0000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-qm"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_qm_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_qm>; + }; + }; + }; + + tpdm_pimem: tpdm@6850000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6850000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-pimem"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_pimem_out_tpda: endpoint { + remote-endpoint = <&tpda_in_tpdm_pimem>; + }; + }; + }; + + stm: stm@6002000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b962>; + + reg = <0x6002000 0x1000>, + <0x16280000 0x180000>; + reg-names = "stm-base", "stm-stimulus-base"; + + coresight-name = "coresight-stm"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + stm_out_funnel_in0: endpoint { + remote-endpoint = <&funnel_in0_in_stm>; + }; + }; + + }; + + funnel_in1: funnel@6042000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6042000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-in1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_in1_out_funnel_merg: endpoint { + remote-endpoint = + <&funnel_merg_in_funnel_in1>; + }; + }; + + port@1 { + reg = <3>; + funnel_in1_in_replicator_swao: endpoint { + slave-mode; + remote-endpoint = + <&replicator_swao_out_funnel_in1>; + }; + }; + + port@2 { + reg = <7>; + funnel_in1_in_funnel_apss_merg: endpoint { + slave-mode; + remote-endpoint = + <&funnel_apss_merg_out_funnel_in1>; + }; + }; + }; + }; + + replicator_swao: replicator@6b0a000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b909>; + + reg = <0x6b0a000 0x1000>; + reg-names = "replicator-base"; + + coresight-name = "coresight-replicator-swao"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* Always have EUD before funnel leading to ETR. If both + * sink are active we need to give preference to EUD + * over ETR + */ + port@0 { + reg = <1>; + replicator_swao_out_eud: endpoint { + remote-endpoint = + <&eud_in_replicator_swao>; + }; + }; + + port@1 { + reg = <0>; + replicator_swao_out_funnel_in1: endpoint { + remote-endpoint = + <&funnel_in1_in_replicator_swao>; + }; + }; + + port@2 { + reg = <0>; + replicator_swao_in_tmc_etf_swao: endpoint { + slave-mode; + remote-endpoint = + <&tmc_etf_swao_out_replicator_swao>; + }; + }; + + }; + }; + + tmc_etf_swao: tmc@6b09000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b961>; + + reg = <0x6b09000 0x1000>; + reg-names = "tmc-base"; + + coresight-name = "coresight-tmc-etf-swao"; + coresight-csr = <&csr>; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + tmc_etf_swao_out_replicator_swao: endpoint { + remote-endpoint= + <&replicator_swao_in_tmc_etf_swao>; + }; + }; + + port@1 { + reg = <0>; + tmc_etf_swao_in_funnel_swao: endpoint { + slave-mode; + remote-endpoint= + <&funnel_swao_out_tmc_etf_swao>; + }; + }; + }; + + }; + + swao_csr: csr@6b0e000 { + compatible = "qcom,coresight-csr"; + reg = <0x6b0e000 0x1000>; + reg-names = "csr-base"; + + coresight-name = "coresight-swao-csr"; + qcom,timestamp-support; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,blk-size = <1>; + }; + + funnel_swao:funnel@6b08000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6b08000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-swao"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_swao_out_tmc_etf_swao: endpoint { + remote-endpoint = + <&tmc_etf_swao_in_funnel_swao>; + }; + }; + + port@1 { + reg = <5>; + funnel_swao_in_funnel_ssc: endpoint { + slave-mode; + remote-endpoint= + <&funnel_ssc_out_funnel_swao>; + }; + }; + + port@2 { + reg = <6>; + funnel_swao_in_replicator1_out: endpoint { + slave-mode; + remote-endpoint= + <&replicator1_out_funnel_swao>; + }; + }; + port@3 { + reg = <7>; + funnel_swao_in_tpda_swao: endpoint { + slave-mode; + remote-endpoint= + <&tpda_swao_out_funnel_swao>; + }; + }; + }; + }; + + funnel_ssc: funnel@6b14000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x6b14000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-ssc"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_ssc_out_funnel_swao: endpoint { + remote-endpoint = + <&funnel_swao_in_funnel_ssc>; + }; + }; + + port@1 { + reg = <0>; + funnel_ssc_in_ssc_etm0: endpoint { + slave-mode; + remote-endpoint = + <&ssc_etm0_out_funnel_ssc>; + }; + }; + + port@2 { + reg = <0>; + funnel_ssc_in_ssc_stm: endpoint { + slave-mode; + remote-endpoint = + <&ssc_stm_out_funnel_ssc>; + }; + }; + }; + }; + + ssc_stm: stm@6b13000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b962>; + + reg = <0x06b13000 0x1000>; + reg-names = "stm-base"; + coresight-name = "coresight-ssc-stm"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + ssc_stm_out_funnel_ssc: endpoint { + remote-endpoint = <&funnel_ssc_in_ssc_stm>; + }; + }; + }; + + ssc_etm0 { + compatible = "qcom,coresight-remote-etm"; + + coresight-name = "coresight-ssc-etm0"; + qcom,inst-id = <8>; + + port { + ssc_etm0_out_funnel_ssc: endpoint { + remote-endpoint = + <&funnel_ssc_in_ssc_etm0>; + }; + }; + }; + + tpda_swao: tpda@6b01000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x6b01000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-swao"; + + qcom,tpda-atid = <71>; + qcom,dsb-elem-size = <1 32>; + qcom,cmb-elem-size = <0 64>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + tpda_swao_out_funnel_swao: endpoint { + remote-endpoint = + <&funnel_swao_in_tpda_swao>; + }; + + }; + + port@1 { + reg = <0>; + tpda_swao_in_tpdm_swao0: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_swao0_out_tpda_swao>; + }; + }; + + port@2 { + reg = <1>; + tpda_swao_in_tpdm_swao1: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_swao1_out_tpda_swao>; + }; + + }; + }; + }; + + tpdm_swao0: tpdm@6b02000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + + reg = <0x6b02000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-swao-0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_swao0_out_tpda_swao: endpoint { + remote-endpoint = <&tpda_swao_in_tpdm_swao0>; + }; + }; + }; + + tpdm_swao1: tpdm@6b03000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x6b03000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name="coresight-tpdm-swao-1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + qcom,msr-fix-req; + + port { + tpdm_swao1_out_tpda_swao: endpoint { + remote-endpoint = <&tpda_swao_in_tpdm_swao1>; + }; + }; + }; + + dummy_eud: dummy_sink { + compatible = "qcom,coresight-dummy"; + + coresight-name = "coresight-eud"; + + qcom,dummy-sink; + port { + eud_in_replicator_swao: endpoint { + slave-mode; + remote-endpoint = + <&replicator_swao_out_eud>; + }; + }; + }; + + cti_mss_q6: cti@683b000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x683b000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-mss-q6"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_turing: cti@6867000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6867000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-turing"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti2_ssc_sdc: cti@6b10000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b10000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-ssc_sdc_cti2"; + status = "disabled"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti1_ssc: cti@6b11000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b11000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-ssc_cti1"; + status = "disabled"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti0_ssc_q6: cti@6b1b000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b1b000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-ssc_q6_cti0"; + status = "disabled"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_ssc_noc: cti@6b1e000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b1e000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-ssc_noc"; + status = "disabled"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti6_ssc_noc: cti@6b1f000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b1f000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-ssc_noc_cti6"; + status = "disabled"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti0_swao: cti@6b04000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b04000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-swao_cti0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti1_swao: cti@6b05000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b05000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-swao_cti1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti2_swao: cti@6b06000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b06000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-swao_cti2"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti3_swao: cti@6b07000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b07000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-swao_cti3"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_aop_m3: cti@6b21000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6b21000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-aop-m3"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_titan: cti@6c13000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6c13000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-titan"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_venus_arm9: cti@6c20000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6c20000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-venus-arm9"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti0_apss: cti@78e0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x78e0000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-apss_cti0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti1_apss: cti@78f0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x78f0000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-apss_cti1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti2_apss: cti@7900000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7900000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-apss_cti2"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti0: cti@6010000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6010000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti1: cti@6011000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6011000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti2: cti@6012000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6012000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti2"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti3: cti@6013000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6013000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti3"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti4: cti@6014000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6014000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti4"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti5: cti@6015000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6015000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti5"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti6: cti@6016000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6016000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti6"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti7: cti@6017000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6017000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti7"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti8: cti@6018000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6018000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti8"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti9: cti@6019000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6019000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti9"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti10: cti@601a000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601a000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti10"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti11: cti@601b000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601b000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti11"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti12: cti@601c000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601c000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti12"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti13: cti@601d000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601d000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti13"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti14: cti@601e000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601e000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti14"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti15: cti@601f000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601f000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti15"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti_cpu0: cti@7020000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7020000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu0"; + cpu = <&CPU0>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti_cpu1: cti@7120000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7120000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu1"; + cpu = <&CPU1>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu2: cti@7220000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7220000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu2"; + cpu = <&CPU2>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu3: cti@7320000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7320000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu3"; + cpu = <&CPU3>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu4: cti@7420000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7420000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu4"; + cpu = <&CPU4>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu5: cti@7520000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7520000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu5"; + cpu = <&CPU5>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu6: cti@7620000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7620000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu6"; + cpu = <&CPU6>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu7: cti@7720000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7720000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu7"; + cpu = <&CPU7>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + etm0: etm@7040000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7040000 0x1000>; + cpu = <&CPU0>; + + coresight-name = "coresight-etm0"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm0_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm0>; + }; + }; + }; + + etm1: etm@7140000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7140000 0x1000>; + cpu = <&CPU1>; + + coresight-name = "coresight-etm1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm1_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm1>; + }; + }; + }; + + etm2: etm@7240000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7240000 0x1000>; + cpu = <&CPU2>; + + coresight-name = "coresight-etm2"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm2_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm2>; + }; + }; + }; + + etm3: etm@7340000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7340000 0x1000>; + cpu = <&CPU3>; + + coresight-name = "coresight-etm3"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm3_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm3>; + }; + }; + }; + + etm4: etm@7440000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7440000 0x1000>; + cpu = <&CPU4>; + + coresight-name = "coresight-etm4"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm4_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm4>; + }; + }; + }; + + etm5: etm@7540000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7540000 0x1000>; + cpu = <&CPU5>; + + coresight-name = "coresight-etm5"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm5_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm5>; + }; + }; + }; + + etm6: etm@7640000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7640000 0x1000>; + cpu = <&CPU6>; + + coresight-name = "coresight-etm6"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm6_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm6>; + }; + }; + }; + + etm7: etm@7740000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x000bb95d>; + + reg = <0x7740000 0x1000>; + cpu = <&CPU7>; + + coresight-name = "coresight-etm7"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etm7_out_funnel_apss: endpoint { + remote-endpoint = <&funnel_apss_in_etm7>; + }; + }; + }; + + funnel_apss_merg: funnel@7810000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x7810000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-apss-merg"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_apss_merg_out_funnel_in1: endpoint { + remote-endpoint = + <&funnel_in1_in_funnel_apss_merg>; + }; + }; + + port@1 { + reg = <0>; + funnel_apss_merg_in_funnel_apss: endpoint { + slave-mode; + remote-endpoint = + <&funnel_apss_out_funnel_apss_merg>; + }; + }; + + port@2 { + reg = <2>; + funnel_apss_merg_in_tpda_olc: endpoint { + slave-mode; + remote-endpoint = + <&tpda_olc_out_funnel_apss_merg>; + }; + }; + + port@3 { + reg = <3>; + funnel_apss_merg_in_tpda_llm_silver: endpoint { + slave-mode; + remote-endpoint = + <&tpda_llm_silver_out_funnel_apss_merg>; + }; + }; + + port@4 { + reg = <4>; + funnel_apss_merg_in_tpda_llm_gold: endpoint { + slave-mode; + remote-endpoint = + <&tpda_llm_gold_out_funnel_apss_merg>; + }; + }; + + port@5 { + reg = <5>; + funnel_apss_merg_in_tpda_apss: endpoint { + slave-mode; + remote-endpoint = + <&tpda_apss_out_funnel_apss_merg>; + }; + }; + }; + }; + + tpda_olc: tpda@7832000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x7832000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-olc"; + + qcom,tpda-atid = <69>; + qcom,cmb-elem-size = <0 64>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_olc_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_tpda_olc>; + }; + }; + port@1 { + reg = <0>; + tpda_olc_in_tpdm_olc: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_olc_out_tpda_olc>; + }; + }; + }; + }; + + tpdm_olc: tpdm@7830000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x7830000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-olc"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port{ + tpdm_olc_out_tpda_olc: endpoint { + remote-endpoint = <&tpda_olc_in_tpdm_olc>; + }; + }; + }; + + tpda_apss: tpda@7862000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x7862000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-apss"; + + qcom,tpda-atid = <66>; + qcom,dsb-elem-size = <0 32>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_apss_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_tpda_apss>; + }; + }; + + port@1 { + reg = <0>; + tpda_apss_in_tpdm_apss: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_apss_out_tpda_apss>; + }; + }; + }; + }; + + tpdm_apss: tpdm@7860000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x7860000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-apss"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_apss_out_tpda_apss: endpoint { + remote-endpoint = <&tpda_apss_in_tpdm_apss>; + }; + }; + }; + + tpda_llm_silver: tpda@78c0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x78c0000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-llm-silver"; + + qcom,tpda-atid = <72>; + qcom,cmb-elem-size = <0 32>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_llm_silver_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_tpda_llm_silver>; + }; + }; + + port@1 { + reg = <0>; + tpda_llm_silver_in_tpdm_llm_silver: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_llm_silver_out_tpda_llm_silver>; + }; + }; + }; + }; + + tpdm_llm_silver: tpdm@78a0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x78a0000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-llm-silver"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_llm_silver_out_tpda_llm_silver: endpoint { + remote-endpoint = + <&tpda_llm_silver_in_tpdm_llm_silver>; + }; + }; + }; + + tpda_llm_gold: tpda@78d0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b969>; + reg = <0x78d0000 0x1000>; + reg-names = "tpda-base"; + + coresight-name = "coresight-tpda-llm-gold"; + + qcom,tpda-atid = <73>; + qcom,cmb-elem-size = <0 32>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + tpda_llm_gold_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_tpda_llm_gold>; + }; + }; + + port@1 { + reg = <0>; + tpda_llm_gold_in_tpdm_llm_gold: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_llm_gold_out_tpda_llm_gold>; + }; + }; + }; + }; + + tpdm_llm_gold: tpdm@78b0000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b968>; + reg = <0x78b0000 0x1000>; + reg-names = "tpdm-base"; + + coresight-name = "coresight-tpdm-llm-gold"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpdm_llm_gold_out_tpda_llm_gold: endpoint { + remote-endpoint = + <&tpda_llm_gold_in_tpdm_llm_gold>; + }; + }; + }; + + funnel_apss: funnel@7800000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b908>; + + reg = <0x7800000 0x1000>; + reg-names = "funnel-base"; + + coresight-name = "coresight-funnel-apss"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel_apss_out_funnel_apss_merg: endpoint { + remote-endpoint = + <&funnel_apss_merg_in_funnel_apss>; + }; + }; + port@1 { + reg = <0>; + funnel_apss_in_etm0: endpoint { + slave-mode; + remote-endpoint = + <&etm0_out_funnel_apss>; + }; + }; + + port@2 { + reg = <1>; + funnel_apss_in_etm1: endpoint { + slave-mode; + remote-endpoint = + <&etm1_out_funnel_apss>; + }; + }; + + port@3 { + reg = <2>; + funnel_apss_in_etm2: endpoint { + slave-mode; + remote-endpoint = + <&etm2_out_funnel_apss>; + }; + }; + + port@4 { + reg = <3>; + funnel_apss_in_etm3: endpoint { + slave-mode; + remote-endpoint = + <&etm3_out_funnel_apss>; + }; + }; + + port@5 { + reg = <4>; + funnel_apss_in_etm4: endpoint { + slave-mode; + remote-endpoint = + <&etm4_out_funnel_apss>; + }; + }; + + port@6 { + reg = <5>; + funnel_apss_in_etm5: endpoint { + slave-mode; + remote-endpoint = + <&etm5_out_funnel_apss>; + }; + }; + + port@7 { + reg = <6>; + funnel_apss_in_etm6: endpoint { + slave-mode; + remote-endpoint = + <&etm6_out_funnel_apss>; + }; + }; + + port@8 { + reg = <7>; + funnel_apss_in_etm7: endpoint { + slave-mode; + remote-endpoint = + <&etm7_out_funnel_apss>; + }; + }; + }; + }; + + cti1: cti@6011000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6011000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti1"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti2: cti@6012000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6012000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti2"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti3: cti@6013000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6013000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti3"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti4: cti@6014000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6014000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti4"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti5: cti@6015000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6015000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti5"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti6: cti@6016000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6016000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti6"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti7: cti@6017000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6017000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti7"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti8: cti@6018000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6018000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti8"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti9: cti@6019000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x6019000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti9"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti10: cti@601a000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601a000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti10"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti11: cti@601b000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601b000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti11"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti12: cti@601c000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601c000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti12"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti13: cti@601d000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601d000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti13"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti14: cti@601e000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601e000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti14"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti15: cti@601f000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x601f000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti15"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti_cpu0: cti@7020000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7020000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu0"; + cpu = <&CPU0>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + + }; + + cti_cpu1: cti@7120000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7120000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu1"; + cpu = <&CPU1>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu2: cti@7220000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7220000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu2"; + cpu = <&CPU2>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu3: cti@7320000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7320000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu3"; + cpu = <&CPU3>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu4: cti@7420000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7420000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu4"; + cpu = <&CPU4>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu5: cti@7520000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7520000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu5"; + cpu = <&CPU5>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu6: cti@7620000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7620000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu6"; + cpu = <&CPU6>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + cti_cpu7: cti@7720000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b966>; + reg = <0x7720000 0x1000>; + reg-names = "cti-base"; + + coresight-name = "coresight-cti-cpu7"; + cpu = <&CPU7>; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + hwevent: hwevent@91866f0 { + compatible = "qcom,coresight-hwevent"; + reg = <0x091866f0 0x4>, + <0x91966f0 0x4>, + <0x9186038 0x4>, + <0x9196038 0x4>, + <0x17e00034 0x4>, + <0x18200050 0x80>, + <0x02c8d050 0x80>, + <0x0af20050 0x80>; + reg-names = "ddr-ch0-cfg", "ddr-ch23-cfg", "ddr-ch0-ctrl", + "ddr-ch23-ctrl", "apss-testbus-mux-cfg", + "apss-rsc-hwevent-mux0-select", + "gpu-rsc-hwevent-mux0-select", + "sde-rsc-hwevent-mux0-select"; + + coresight-name = "coresight-hwevent"; + coresight-csr = <&csr>; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; + + ipcb_tgu: tgu@6b0c000 { + compatible = "arm,primecell"; + arm,primecell-periphid = <0x0003b999>; + reg = <0x06b0c000 0x1000>; + reg-names = "tgu-base"; + tgu-steps = <3>; + tgu-conditions = <4>; + tgu-regs = <4>; + tgu-timer-counters = <8>; + + coresight-name = "coresight-tgu-ipcb"; + + clocks = <&clock_aop QDSS_CLK>; + clock-names = "apb_pclk"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi index 3d61fe738ce7e6252ecfbc1a7db561b6a4efc1b2..184c771ea70e4e07d7cc375f40477968d5e5b503 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi @@ -714,6 +714,8 @@ ; + qcom,mode-threshold-currents = <0 1000000 2000000>; + qcom,send-defaults; BOB: pm6150l_bob: regulator-pm6150l-bob { regulator-name = "pm6150l_bob"; diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index 52c2f772134e41da594542ab3bb91d9f9febb4e0..e384dd622a7c28560476a95321a9a83a2d342dc1 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -341,7 +341,7 @@ compatible = "android,fstab"; vendor { compatible = "android,vendor"; - dev = "/dev/block/platform/soc/7c4000.sdhci/by-name/vendor"; + dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor"; type = "ext4"; mnt_flags = "ro,barrier=1,discard"; fsmgr_flags = "wait,slotselect,avb"; @@ -478,8 +478,8 @@ size = <0 0x5c00000>; }; - cont_splash_memory: cont_splash_region@9d400000 { - reg = <0x0 0x9d400000 0x0 0x02400000>; + cont_splash_memory: cont_splash_region@9c000000 { + reg = <0x0 0x9c000000 0x0 0x02400000>; label = "cont_splash_region"; }; @@ -708,7 +708,7 @@ <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>; qcom,ev-factor = <2>; qcom,max-num-gpii = <8>; - qcom,gpii-mask = <0x1f>; + qcom,gpii-mask = <0x0f>; iommus = <&apps_smmu 0x00d6 0x0>; qcom,smmu-cfg = <0x1>; qcom,iova-range = <0x0 0x100000 0x0 0x100000>; @@ -724,7 +724,7 @@ <0 283 0>, <0 284 0>, <0 293 0>, <0 294 0>; qcom,ev-factor = <2>; qcom,max-num-gpii = <8>; - qcom,gpii-mask = <0x1f>; + qcom,gpii-mask = <0x0f>; qcom,smmu-cfg = <0x1>; qcom,iova-range = <0x0 0x100000 0x0 0x100000>; iommus = <&apps_smmu 0x0376 0x0>; @@ -946,6 +946,15 @@ reg = <0xc3f000c 8>; }; + dcc: dcc_v2@10a2000 { + compatible = "qcom,dcc-v2"; + reg = <0x10a2000 0x1000>, + <0x10ae000 0x2000>; + reg-names = "dcc-base", "dcc-ram-base"; + + dcc-ram-offset = <0x6000>; + }; + qcom,llcc@9200000 { compatible = "qcom,llcc-core", "syscon", "simple-mfd"; reg = <0x9200000 0x450000>; @@ -1109,6 +1118,11 @@ #mbox-cells = <1>; }; + qcom,msm-cdsp-loader { + compatible = "qcom,cdsp-loader"; + qcom,proc-img-to-load = "cdsp"; + }; + qcom,msm-adsprpc-mem { compatible = "qcom,msm-adsprpc-mem-region"; memory-region = <&adsp_mem>; @@ -1571,3 +1585,4 @@ #include "sm6150-ion.dtsi" #include "msm-arm-smmu-sm6150.dtsi" +#include "sm6150-coresight.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi index 4ac0b4f1deefbb61f1350f69c11b8bb2289c5187..155b29fd483ff4dc9ca341f0f44926ac0cd494af 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi @@ -15,6 +15,26 @@ #include "sm8150-auto-pmic-overlay.dtsi" +&qupv3_se12_2uart { + status = "ok"; +}; + +&qupv3_se13_4uart { + status = "ok"; +}; + +&qupv3_se3_spi { + status = "ok"; +}; + +&qupv3_se4_i2c { + status = "ok"; +}; +&pil_modem { + status = "disabled"; +}; + + &soc { qcom,lpass@17300000 { status = "disabled"; @@ -30,6 +50,42 @@ }; }; + qcom,turing@8300000 { + status = "ok"; + }; + + qcom,venus@aae0000 { + status = "ok"; + }; + + qcom,spss@1880000 { + status = "ok"; + }; + + qcom,npu@0x9800000 { + status = "ok"; + }; + + qcom,rmnet-ipa { + status = "ok"; + }; + + qcom,ipa_fws { + status = "ok"; + }; + + qcom,msm-cdsp-loader { + status = "ok"; + }; + + ssc_sensors: qcom,msm-ssc-sensors { + status = "disabled"; + }; + + ipa_hw: qcom,ipa@1e00000 { + status = "ok"; + }; + gpio_keys { compatible = "gpio-keys"; label = "gpio-keys"; @@ -60,6 +116,32 @@ }; }; +&ufsphy_mem { + compatible = "qcom,ufs-phy-qmp-v4"; + + vdda-phy-supply = <&pm8150_2_l18>; + vdda-pll-supply = <&pm8150_2_l8>; + vdda-phy-max-microamp = <87100>; + vdda-pll-max-microamp = <18300>; + + status = "ok"; +}; + +&ufshc_mem { + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm8150_1_l10>; + vcc-voltage-level = <2950000 2960000>; + vccq2-supply = <&pm8150_1_s4>; + vcc-max-microamp = <750000>; + vccq2-max-microamp = <750000>; + + qcom,vddp-ref-clk-supply = <&pm8150_2_l5>; + qcom,vddp-ref-clk-max-microamp = <100>; + + status = "ok"; +}; + &sdhc_2 { vdd-supply = <&pm8150_1_l17>; qcom,vdd-voltage-level = <2950000 2960000>; @@ -79,3 +161,17 @@ status = "ok"; }; + +&usb0 { + dwc3@a600000 { + usb-phy = <&usb2_phy0>, <&usb_nop_phy>; + maximum-speed = "high-speed"; + }; +}; + +&usb1 { + status = "ok"; + dwc3@a800000 { + dr_mode = "host"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi b/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi index 6790abcdf65285f0ae58cea64ced48669c6dbf73..5b1c5faa8cf4c120848d7c9dc7824ba9e9e84970 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi @@ -133,6 +133,19 @@ core-supply = <&pm8150_2_l8>; }; +&usb2_phy1 { + vdd-supply = <&pm8150_1_l5>; + vdda18-supply = <&pm8150_1_l12>; + vdda33-supply = <&pm8150_1_l2>; + status = "ok"; +}; + +&usb_qmp_phy { + vdd-supply = <&pm8150_1_l5>; + core-supply = <&pm8150_2_l8>; + status = "ok"; +}; + &icnss { vdd-cx-mx-supply = <&pm8150_1_l1>; vdd-1.8-xo-supply = <&pm8150_1_l7>; @@ -300,10 +313,3 @@ }; }; }; - -&usb0 { - dwc3@a600000 { - usb-phy = <&usb2_phy0>, <&usb_nop_phy>; - maximum-speed = "high-speed"; - }; -}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi b/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi index 1c8943d80814335e5a689508caf19bb55430b5c5..f8c8138a803f3c340579d9958b2a2e0b650c7ff3 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-bus.dtsi @@ -1521,6 +1521,8 @@ qcom,bus-dev = <&fab_config_noc>; qcom,bcms = <&bcm_cn0>; qcom,disable-ports = <10 11 34>; + mmcx-supply = <&VDD_MMCX_LEVEL>; + node-reg-names = "mmcx"; }; slv_qhs_clk_ctl:slv-qhs-clk-ctl { @@ -1595,6 +1597,8 @@ qcom,bus-dev = <&fab_config_noc>; qcom,bcms = <&bcm_cn0>; qcom,disable-ports = <12 13>; + mmcx-supply = <&VDD_MMCX_LEVEL>; + node-reg-names = "mmcx"; }; slv_qhs_emac_cfg:slv-qhs-emac-cfg { @@ -1904,6 +1908,8 @@ qcom,bus-dev = <&fab_config_noc>; qcom,bcms = <&bcm_cn0>; qcom,disable-ports = <15 16 35>; + mmcx-supply = <&VDD_MMCX_LEVEL>; + node-reg-names = "mmcx"; }; slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg { diff --git a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi index a22b029ab4227d74a4bf4674782550712be7e01c..c566fa8f34c22527d9244a0384f3c8fa73701ea2 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi @@ -38,6 +38,18 @@ qca,bt-vdd-pa-current-level = <0>; /* LPM/PFM */ qca,bt-vdd-ldo-current-level = <0>; /* LPM/PFM */ }; + + extcon_usb1: extcon_usb1 { + compatible = "linux,extcon-usb-gpio"; + vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>; + id-gpio = <&tlmm 101 GPIO_ACTIVE_HIGH>; + vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>; + + pinctrl-names = "default"; + pinctrl-0 = <&usb2_vbus_det_default + &usb2_id_det_default + &usb2_vbus_boost_default>; + }; }; &qupv3_se13_4uart { @@ -88,6 +100,7 @@ clock-frequency = <25000000>; qcom,ipc-gpio = <&tlmm 118 0>; qcom,finger-detect-gpio = <&pm8150_gpios 1 0>; + status = "disabled"; }; }; @@ -289,6 +302,10 @@ status = "ok"; }; +&spmi_debug_bus { + status = "ok"; +}; + &pm8150l_wled { qcom,string-cfg= <7>; status = "ok"; @@ -438,7 +455,6 @@ &mhi_0 { mhi,fw-name = "debug.mbn"; - status = "okay"; }; &pm8150b_adc_tm { @@ -548,3 +564,7 @@ }; }; }; + +&usb1 { + extcon = <&extcon_usb1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..b5055c8ab3ec6132ca24929c252ac6d0566f223b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&pcie1 { + pci,bus@1 { + reg = <0 0 0 0 0>; + + mhi_0: qcom,mhi@0 { + reg = <0 0 0 0 0 >; + + /* controller specific configuration */ + qcom,smmu-cfg = <0x3>; + qcom,msm-bus,name = "mhi"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <100 512 0 0>, + <100 512 1200000000 650000000>; + + /* mhi bus specific settings */ + mhi,max-channels = <106>; + mhi,timeout = <2000>; + + #address-cells = <1>; + #size-cells = <0>; + + mhi_chan@0 { + reg = <0>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@1 { + reg = <1>; + label = "LOOPBACK"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@2 { + reg = <2>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <1>; + }; + + mhi_chan@3 { + reg = <3>; + label = "SAHARA"; + mhi,num-elements = <128>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <1>; + }; + + mhi_chan@4 { + reg = <4>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@5 { + reg = <5>; + label = "DIAG"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@8 { + reg = <8>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@9 { + reg = <9>; + label = "QDSS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@10 { + reg = <10>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@11 { + reg = <11>; + label = "EFS"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@14 { + reg = <14>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@15 { + reg = <15>; + label = "QMI0"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@16 { + reg = <16>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@17 { + reg = <17>; + label = "QMI1"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@18 { + reg = <18>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@19 { + reg = <19>; + label = "IP_CTRL"; + mhi,num-elements = <64>; + mhi,event-ring = <1>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + mhi,auto-queue; + }; + + mhi_chan@20 { + reg = <20>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + mhi,auto-start; + }; + + mhi_chan@21 { + reg = <21>; + label = "IPCR"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + mhi,auto-queue; + mhi,auto-start; + }; + + mhi_chan@22 { + reg = <22>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@23 { + reg = <23>; + label = "TF"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@24 { + reg = <24>; + label = "BL"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <1>; + }; + + mhi_chan@25 { + reg = <25>; + label = "BL"; + mhi,num-elements = <64>; + mhi,event-ring = <2>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <1>; + }; + + mhi_chan@26 { + reg = <26>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@27 { + reg = <27>; + label = "DCI"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@32 { + reg = <32>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <1>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@33 { + reg = <33>; + label = "DUN"; + mhi,num-elements = <64>; + mhi,event-ring = <3>; + mhi,chan-dir = <2>; + mhi,data-type = <0>; + mhi,doorbell-mode = <2>; + mhi,ee = <2>; + }; + + mhi_chan@100 { + reg = <100>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <4>; + mhi,chan-dir = <1>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <2>; + mhi,db-mode-switch; + }; + + mhi_chan@101 { + reg = <101>; + label = "IP_HW0"; + mhi,num-elements = <512>; + mhi,event-ring = <5>; + mhi,chan-dir = <2>; + mhi,data-type = <1>; + mhi,doorbell-mode = <3>; + mhi,ee = <2>; + }; + + mhi_event@0 { + mhi,num-elements = <32>; + mhi,intmod = <1>; + mhi,msi = <1>; + mhi,priority = <1>; + mhi,brstmode = <2>; + mhi,data-type = <1>; + }; + + mhi_event@1 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <2>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@2 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <3>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@3 { + mhi,num-elements = <256>; + mhi,intmod = <1>; + mhi,msi = <4>; + mhi,priority = <1>; + mhi,brstmode = <2>; + }; + + mhi_event@4 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <5>; + mhi,chan = <100>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + }; + + mhi_event@5 { + mhi,num-elements = <1024>; + mhi,intmod = <5>; + mhi,msi = <6>; + mhi,chan = <101>; + mhi,priority = <1>; + mhi,brstmode = <3>; + mhi,hw-ev; + mhi,client-manage; + }; + + mhi_netdev_0: mhi_rmnet@0 { + reg = <0x0>; + mhi,chan = "IP_HW0"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; + }; + + mhi_netdev_1: mhi_rmnet@1 { + reg = <0x1>; + mhi,chan = "IP_HW_ADPL"; + mhi,interface-name = "rmnet_mhi"; + mhi,mru = <0x4000>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi index 239f3066c510d23368bbdcd09403c36caddd4033..e330b3a35b2236a28d0befff6f2d404e6cd225d5 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi @@ -44,6 +44,18 @@ qcom,batt-id-range-pct = <15>; #include "fg-gen4-batterydata-alium-3600mah.dtsi" }; + + extcon_usb1: extcon_usb1 { + compatible = "linux,extcon-usb-gpio"; + vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>; + id-gpio = <&tlmm 101 GPIO_ACTIVE_HIGH>; + vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>; + + pinctrl-names = "default"; + pinctrl-0 = <&usb2_vbus_det_default + &usb2_id_det_default + &usb2_vbus_boost_default>; + }; }; &qupv3_se13_4uart { @@ -64,18 +76,7 @@ label = "gpio-keys"; pinctrl-names = "default"; - pinctrl-0 = <&key_home_default - &key_vol_up_default>; - - home { - label = "home"; - gpios = <&pm8150_gpios 1 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - linux,code = ; - gpio-key,wakeup; - debounce-interval = <15>; - linux,can-disable; - }; + pinctrl-0 = <&key_vol_up_default>; vol_up { label = "volume_up"; @@ -93,6 +94,8 @@ clock-names = "core", "iface"; clock-frequency = <25000000>; qcom,ipc-gpio = <&tlmm 118 0>; + pinctrl-names = "default"; + pinctrl-0 = <&key_home_default>; qcom,finger-detect-gpio = <&pm8150_gpios 1 0>; }; }; @@ -276,6 +279,10 @@ status = "ok"; }; +&spmi_debug_bus { + status = "ok"; +}; + &pm8150l_wled { qcom,string-cfg= <7>; status = "ok"; @@ -287,6 +294,8 @@ &pm8150b_fg { qcom,battery-data = <&mtp_batterydata>; + qcom,hold-soc-while-full; + qcom,linearize-soc; }; &sdhc_2 { @@ -437,7 +446,6 @@ &mhi_0 { mhi,fw-name = "debug.mbn"; - status = "okay"; }; &pm8150b_adc_tm { @@ -550,6 +558,7 @@ &pm8150b_charger { qcom,sec-charger-config = <1>; + qcom,auto-recharge-soc = <98>; io-channels = <&pm8150b_vadc ADC_USB_IN_V_16>, <&pm8150b_vadc ADC_USB_IN_I>, <&pm8150b_vadc ADC_CHG_TEMP>; @@ -567,3 +576,7 @@ &smb1390_charger { status = "ok"; }; + +&usb1 { + extcon = <&extcon_usb1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi index 9dc312e994e4dbd694954e5ace815414ec4729f5..fd99bfeb4482afadff44cb1b3f554d3c29d46eb1 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi @@ -16,7 +16,10 @@ status = "ok"; reg = <0x9800000 0x800000>; reg-names = "npu_base"; - interrupts = ; + interrupts = , + , + ; + interrupt-names = "error_irq", "wdg_bite_irq", "ipc_irq"; iommus = <&apps_smmu 0x1461 0x0>, <&apps_smmu 0x2061 0x0>; cache-slice-names = "npu"; cache-slices = <&llcc 23>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index 618c6044d1e5dcaae53b70d831cd672ca00fd72a..9fc716ff912672a975cba05f0756372ddcf5e474 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -3938,5 +3938,14 @@ bias-disable; }; }; + + usb2_id_det_default: usb2_id_det_default { + config { + pins = "gpio101"; + function = "gpio"; + input-enable; + bias-pull-up; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi index 6b5ad82623c74b444e60be92d30741fdacf0d872..6d137062270aa618b75495fa1fc1e898c7ccec0c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi @@ -37,6 +37,25 @@ power-source = <1>; }; }; + + usb2_vbus_boost { + usb2_vbus_boost_default: usb2_vbus_boost_default { + pins = "gpio9"; + function = "normal"; + output-low; + power-source = <1>; /* 1.8V input supply */ + }; + }; + + usb2_vbus_det { + usb2_vbus_det_default: usb2_vbus_det_default { + pins = "gpio10"; + function = "normal"; + input-enable; + bias-pull-down; + power-source = <1>; /* 1.8V input supply */ + }; + }; }; &pm8150l_gpios { diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi index 0c05eb48d78c3b6cc246bdb9fd11c7b38b743617..f347370cc17a41c722477f49b59def2aa4769896 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi @@ -44,6 +44,8 @@ &pm8150b_fg { qcom,battery-data = <&qrd_batterydata>; + qcom,hold-soc-while-full; + qcom,linearize-soc; }; &soc { @@ -52,18 +54,7 @@ label = "gpio-keys"; pinctrl-names = "default"; - pinctrl-0 = <&key_home_default - &key_vol_up_default>; - - home { - label = "home"; - gpios = <&pm8150_gpios 1 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - linux,code = ; - gpio-key,wakeup; - debounce-interval = <15>; - linux,can-disable; - }; + pinctrl-0 = <&key_vol_up_default>; vol_up { label = "volume_up"; @@ -81,6 +72,8 @@ clock-names = "core", "iface"; clock-frequency = <25000000>; qcom,ipc-gpio = <&tlmm 118 0>; + pinctrl-names = "default"; + pinctrl-0 = <&key_home_default>; qcom,finger-detect-gpio = <&pm8150_gpios 1 0>; }; }; @@ -488,8 +481,13 @@ }; }; +&spmi_debug_bus { + status = "ok"; +}; + &pm8150b_charger { qcom,sec-charger-config = <1>; + qcom,auto-recharge-soc = <98>; io-channels = <&pm8150b_vadc ADC_USB_IN_V_16>, <&pm8150b_vadc ADC_USB_IN_I>, <&pm8150b_vadc ADC_CHG_TEMP>; @@ -507,3 +505,9 @@ &smb1390_charger { status = "ok"; }; + +&usb2_phy0 { + qcom,param-override-seq = + <0x49 0x70 + 0x28 0x74>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi index 77cbbacc9193144ccc2a51ff04f2cc10e24d463d..d50dd42bdbc4c24e12d3319ba902eebaf014e09b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi @@ -400,6 +400,13 @@ /* PHY TIMINGS REVISION P */ &dsi_dual_nt35597_truly_video { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x9c>; + qcom,mdss-dsi-panel-on-check-value = <0x9c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07 @@ -412,6 +419,13 @@ }; &dsi_dual_nt35597_truly_cmd { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x9c>; + qcom,mdss-dsi-panel-on-check-value = <0x9c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 1c 08 07 23 22 07 @@ -450,6 +464,13 @@ }; &dsi_sharp_4k_dsc_video { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0c]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x7>; + qcom,mdss-dsi-panel-on-check-value = <0x7>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08 @@ -461,6 +482,13 @@ }; &dsi_sharp_4k_dsc_cmd { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0c]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x7>; + qcom,mdss-dsi-panel-on-check-value = <0x7>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 1e 08 07 24 22 08 diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi index 267a023a1a251164a85a329a2b655625eb01a0e9..78780b926d67c5fd86751276b941b79a377d76e2 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi @@ -26,11 +26,13 @@ <&clock_gcc GCC_DISP_HF_AXI_CLK>, <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, <&clock_dispcc DISP_CC_MDSS_MDP_CLK>, - <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>; + <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>, + <&clock_dispcc DISP_CC_MDSS_MDP_LUT_CLK>; clock-names = "gcc_iface", "gcc_bus", - "iface_clk", "core_clk", "vsync_clk"; - clock-rate = <0 0 0 300000000 19200000>; - clock-max-rate = <0 0 0 460000000 19200000>; + "iface_clk", "core_clk", "vsync_clk", + "lut_clk"; + clock-rate = <0 0 0 300000000 19200000 300000000>; + clock-max-rate = <0 0 0 460000000 19200000 460000000>; sde-vdd-supply = <&mdss_core_gdsc>; @@ -193,12 +195,7 @@ qcom,sde-qos-cpu-mask = <0x3>; qcom,sde-qos-cpu-dma-latency = <300>; - qcom,sde-inline-rotator = <&mdss_rotator 0>; - qcom,sde-inline-rot-xin = <10 11>; - qcom,sde-inline-rot-xin-type = "sspp", "wb"; - /* offsets are relative to "mdp_phys + qcom,sde-off */ - qcom,sde-inline-rot-clk-ctrl = <0x2bc 0x8>, <0x2bc 0xc>; qcom,sde-reg-dma-off = <0>; qcom,sde-reg-dma-version = <0x00010001>; @@ -240,6 +237,7 @@ qcom,sde-dspp-gamut = <0x1000 0x00040001>; qcom,sde-dspp-pcc = <0x1700 0x00040000>; qcom,sde-dspp-gc = <0x17c0 0x00010008>; + qcom,sde-dspp-dither = <0x82c 0x00010007>; }; qcom,platform-supply-entries { @@ -419,13 +417,6 @@ qcom,mdss-rot-danger-lut = <0x0 0x0>; qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>; - /* Inline rotator QoS Setting */ - /* setting default register values for RD - qos/danger/safe */ - qcom,mdss-inline-rot-qos-lut = <0x44556677 0x00112233 - 0x44556677 0x00112233>; - qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>; - qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000f000>; - qcom,mdss-default-ot-rd-limit = <32>; qcom,mdss-default-ot-wr-limit = <32>; @@ -485,7 +476,7 @@ qcom,supply-min-voltage = <1200000>; qcom,supply-max-voltage = <1200000>; qcom,supply-enable-load = <21800>; - qcom,supply-disable-load = <4>; + qcom,supply-disable-load = <0>; }; }; qcom,core-supply-entries { @@ -531,7 +522,7 @@ qcom,supply-min-voltage = <1200000>; qcom,supply-max-voltage = <1200000>; qcom,supply-enable-load = <21800>; - qcom,supply-disable-load = <4>; + qcom,supply-disable-load = <0>; }; }; qcom,core-supply-entries { @@ -576,7 +567,7 @@ qcom,supply-min-voltage = <880000>; qcom,supply-max-voltage = <880000>; qcom,supply-enable-load = <36000>; - qcom,supply-disable-load = <32>; + qcom,supply-disable-load = <0>; }; }; }; @@ -608,7 +599,7 @@ qcom,supply-min-voltage = <880000>; qcom,supply-max-voltage = <880000>; qcom,supply-enable-load = <36000>; - qcom,supply-disable-load = <32>; + qcom,supply-disable-load = <0>; }; }; }; @@ -690,7 +681,7 @@ qcom,supply-min-voltage = <1200000>; qcom,supply-max-voltage = <1200000>; qcom,supply-enable-load = <21800>; - qcom,supply-disable-load = <4>; + qcom,supply-disable-load = <0>; }; }; @@ -704,7 +695,7 @@ qcom,supply-min-voltage = <880000>; qcom,supply-max-voltage = <880000>; qcom,supply-enable-load = <36000>; - qcom,supply-disable-load = <32>; + qcom,supply-disable-load = <0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..4e590ea853d38e72c97ea2a2c4f20c5982a482d8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts @@ -0,0 +1,30 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include +#include +#include + +#include "sm8150-mtp.dtsi" + +#include "sdx50m-external-soc.dtsi" +#include "sm8150-sdx50m.dtsi" + +/ { + model = "SDX50M 2.5k panel MTP"; + compatible = "qcom,sm8150-mtp", "qcom,sm8150", "qcom,mtp"; + qcom,board-id = <0x01010008 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-qrd-overlay.dts new file mode 100644 index 0000000000000000000000000000000000000000..b9fa78eea6effcf1db591dc38f8689111ff6231d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-qrd-overlay.dts @@ -0,0 +1,30 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include +#include +#include + +#include "sm8150-qrd.dtsi" + +#include "sdx50m-external-soc.dtsi" +#include "sm8150-sdx50m.dtsi" + +/ { + model = "SDX50M QRD"; + compatible = "qcom,sm8150-qrd", "qcom,sm8150", "qcom,qrd"; + qcom,board-id = <11 1>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi index 391e8457c6e5d05db553a5be1d8c1d374ddf5ba1..e715a75c3addd8e0f5ca5648c939e2ef4dcb640c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi @@ -48,7 +48,6 @@ qcom,smmu-cfg = <0x1d>; qcom,addr-win = <0x0 0x20000000 0x0 0x3fffffff>; mhi,fw-name = "sdx50m/sbl1.mbn"; - status = "okay"; }; &tlmm { @@ -85,3 +84,23 @@ qcom,mhi-erdb-base = <0x40300700>; }; }; + +&reserved_memory { + pil_buffer_p2_mem: pil_buffer_p2_region@a0000000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0xa0000000 0x0 0x01000000>; + }; + + pil_buffer_p1_mem: pil_buffer_p1_region@0a1000000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0xa1000000 0x0 0x02c00000>; + }; + + pil_pcie_mem: pil_pcie_mem_region@a3c00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0xa3c00000 0x0 0x01000000>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi index 1291bedf4f5cbdb16c891044cb1cc964a1dd8235..36261b8c6982c5c1bb47ef3563873c7dd025f9d0 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi @@ -152,6 +152,7 @@ qcom,vdd-voltage-level = <0 880000 880000>; core-supply = <&pm8150l_l3>; qcom,vbus-valid-override; + qcom,link-training-reset; qcom,qmp-phy-init-seq = /* */ ; qcom,qmp-phy-reg-offset = @@ -296,7 +298,10 @@ USB3_DP_COM_PHY_MODE_CTRL USB3_DP_COM_TYPEC_CTRL USB3_DP_COM_SWI_CTRL - USB3_DP_PCS_CLAMP_ENABLE>; + USB3_DP_PCS_CLAMP_ENABLE + USB3_DP_PCS_PCS_STATUS2 + USB3_DP_PCS_INSIG_SW_CTRL3 + USB3_DP_PCS_INSIG_MX_CTRL3>; clocks = <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>, <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, @@ -321,4 +326,229 @@ usb_nop_phy: usb_nop_phy { compatible = "usb-nop-xceiv"; }; + + /* Secondary USB port related controller */ + usb1: ssusb@a800000 { + compatible = "qcom,dwc-usb3-msm"; + reg = <0x0a800000 0x100000>; + reg-names = "core_base"; + + iommus = <&apps_smmu 0x160 0x0>; + qcom,smmu-s1-bypass; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + interrupts = <0 491 0>, <0 135 0>, <0 487 0>, <0 490 0>; + interrupt-names = "dp_hs_phy_irq", "pwr_event_irq", + "ss_phy_irq", "dm_hs_phy_irq"; + qcom,use-pdc-interrupts; + + USB3_GDSC-supply = <&usb30_sec_gdsc>; + clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>, + <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>, + <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>, + <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>, + <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>, + <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>; + clock-names = "core_clk", "iface_clk", "bus_aggr_clk", + "utmi_clk", "sleep_clk", "xo"; + + resets = <&clock_gcc GCC_USB30_SEC_BCR>; + reset-names = "core_reset"; + + qcom,core-clk-rate = <200000000>; + qcom,core-clk-rate-hs = <66666667>; + qcom,num-gsi-evt-buffs = <0x3>; + qcom,dwc-usb3-msm-tx-fifo-size = <27696>; + qcom,charging-disabled; + + qcom,msm-bus,name = "usb1"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <3>; + qcom,msm-bus,vectors-KBps = + /* suspend vote */ + , + , + , + + /* nominal vote */ + , + , + , + + /* svs vote */ + , + , + ; + + status = "disabled"; + + dwc3@a800000 { + compatible = "snps,dwc3"; + reg = <0x0a800000 0xcd00>; + interrupts = <0 138 0>; + usb-phy = <&usb2_phy1>, <&usb_qmp_phy>; + linux,sysdev_is_parent; + snps,disable-clk-gating; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + snps,usb3_lpm_capable; + usb-core-id = <0>; + maximum-speed = "super-speed"; + dr_mode = "otg"; + }; + }; + + /* Primary USB port related High Speed PHY */ + usb2_phy1: hsphy@88e3000 { + compatible = "qcom,usb-hsphy-snps-femto"; + reg = <0x88e3000 0x110>; + reg-names = "hsusb_phy_base"; + + vdd-supply = <&pm8150_l5>; + vdda18-supply = <&pm8150_l12>; + vdda33-supply = <&pm8150_l2>; + qcom,vdd-voltage-level = <0 880000 880000>; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "ref_clk_src"; + + resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>; + reset-names = "phy_reset"; + + status = "disabled"; + }; + + /* Secondary USB port related QMP PHY */ + usb_qmp_phy: ssphy@88eb000 { + compatible = "qcom,usb-ssphy-qmp-v2"; + reg = <0x88eb000 0x1000>, + <0x088eb88c 0x4>; + reg-names = "qmp_phy_base", + "pcs_clamp_enable_reg"; + + vdd-supply = <&pm8150_l5>; + qcom,vdd-voltage-level = <0 880000 880000>; + core-supply = <&pm8150l_l3>; + qcom,vbus-valid-override; + qcom,qmp-phy-init-seq = + /* */ + ; + + qcom,qmp-phy-reg-offset = + ; + + clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>, + <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>, + <&clock_gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>; + clock-names = "aux_clk", "pipe_clk", "ref_clk_src", + "ref_clk", "com_aux_clk"; + + resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>, + <&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>; + reset-names = "phy_reset", "phy_phy_reset"; + + status = "disabled"; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi index 3a1cc7fa02ddeb005db6cce85433742cd00b6dd8..5380383db666de4c4d91b838634f6178c8711a3c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi @@ -17,3 +17,23 @@ qcom,msm-name = "SM8150 V2"; qcom,msm-id = <339 0x20000>; }; + +&clock_gcc { + compatible = "qcom,gcc-sm8150-v2", "syscon"; +}; + +&clock_camcc { + compatible = "qcom,camcc-sm8150-v2", "syscon"; +}; + +&clock_dispcc { + compatible = "qcom,dispcc-sm8150-v2", "syscon"; +}; + +&clock_videocc { + compatible = "qcom,videocc-sm8150-v2", "syscon"; +}; + +&clock_npucc { + compatible = "qcom,npucc-sm8150-v2", "syscon"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi b/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi index b95c24621064cedce27cbaeda335fae2b6f287dc..09e81e318c0f7136cead13027bd1e41b62f2b101 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-vidc.dtsi @@ -124,9 +124,9 @@ }; /* Memory Heaps */ - qcom,msm-vidc,mem_adsp { - compatible = "qcom,msm-vidc,mem-adsp"; - memory-region = <&adsp_mem>; + qcom,msm-vidc,mem_cdsp { + compatible = "qcom,msm-vidc,mem-cdsp"; + memory-region = <&cdsp_mem>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index fc37c99832e9e442e28befe47b877f35289fe444..9ebd06d7773a42cda87e0fd53c214db635c11ff4 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -25,6 +25,7 @@ #include #include #include +#include #define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024)) #define BW_OPP_ENTRY(mhz, w) opp-mhz {opp-hz = /bits/ 64 ;} @@ -36,6 +37,13 @@ qcom,msm-id = <339 0x10000>; interrupt-parent = <&pdc>; + mem-offline { + compatible = "qcom,mem-offline"; + mem-percent = "35"; + granule = <512>; + mboxes = <&qmp_aop 0>; + }; + aliases { ufshc1 = &ufshc_mem; /* Embedded UFS slot */ sdhc2 = &sdhc_2; /* SDC2 SD card slot */ @@ -553,7 +561,7 @@ }; chosen { - bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7"; + bootargs = "rcupdate.rcu_expedited=1 rcu_nocbs=0-7 cgroup.memory=nokmem,nosocket"; }; soc: soc { }; @@ -575,7 +583,7 @@ }; }; - reserved-memory { + reserved_memory: reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; @@ -698,7 +706,15 @@ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; reusable; alignment = <0x0 0x400000>; - size = <0x0 0x1000000>; + size = <0x0 0xc00000>; + }; + + cdsp_mem: cdsp_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x400000>; }; qseecom_ta_mem: qseecom_ta_region { @@ -1257,6 +1273,11 @@ reg = <0x65c 4>; }; + dload_type@1c { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x1c 0x4>; + }; + boot_stats@6b0 { compatible = "qcom,msm-imem-boot_stats"; reg = <0x6b0 32>; @@ -1487,6 +1508,65 @@ cell-index = <0>; }; + spmi_debug_bus: qcom,spmi-debug@6b22000 { + compatible = "qcom,spmi-pmic-arb-debug"; + reg = <0x6b22000 0x60>, <0x7820a8 4>; + reg-names = "core", "fuse"; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "core_clk"; + qcom,fuse-disable-bit = <24>; + #address-cells = <2>; + #size-cells = <0>; + status = "disabled"; + + qcom,pm8150-debug@0 { + compatible = "qcom,spmi-pmic"; + reg = <0x0 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + + qcom,pm8150-debug@1 { + compatible = "qcom,spmi-pmic"; + reg = <0x1 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + + qcom,pm8150b-debug@2 { + compatible = "qcom,spmi-pmic"; + reg = <0x2 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + + qcom,pm8150b-debug@3 { + compatible = "qcom,spmi-pmic"; + reg = <0x3 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + + qcom,pm8150l-debug@4 { + compatible = "qcom,spmi-pmic"; + reg = <0x4 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + + qcom,pm8150l-debug@5 { + compatible = "qcom,spmi-pmic"; + reg = <0x5 SPMI_USID>; + #address-cells = <2>; + #size-cells = <0>; + qcom,can-sleep; + }; + }; eud: qcom,msm-eud@88e0000 { compatible = "qcom,msm-eud"; @@ -1817,6 +1897,10 @@ cap-based-alloc-and-pwr-collapse; }; + qcom,llcc-perfmon { + compatible = "qcom,llcc-perfmon"; + }; + qcom,llcc-erp { compatible = "qcom,llcc-erp"; interrupt-names = "ecc_irq"; @@ -2093,10 +2177,10 @@ <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; freq-table-hz = - <50000000 200000000>, + <37500000 300000000>, <0 0>, <0 0>, - <37500000 150000000>, + <37500000 300000000>, <75000000 300000000>, <0 0>, <0 0>, @@ -2883,7 +2967,7 @@ }; dcc: dcc_v2@10a2000 { - compatible = "qcom,dcc_v2"; + compatible = "qcom,dcc-v2"; reg = <0x10a2000 0x1000>, <0x10ae000 0x2000>; reg-names = "dcc-base", "dcc-ram-base"; @@ -2891,212 +2975,256 @@ dcc-ram-offset = <0x6000>; qcom,curr-link-list = <2>; - qcom,link-list = , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; + qcom,link-list = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; }; tsens0: tsens@c222000 { @@ -3413,73 +3541,6 @@ qcom,smmu-coherent; status = "disabled"; }; - - mhi_0: qcom,mhi@0 { - /* controller specific configuration */ - compatible = "qcom,mhi"; - qcom,pci-domain = <1>; - qcom,pci-bus = <1>; - qcom,pci-slot = <0>; - qcom,smmu-cfg = <0x3>; - qcom,msm-bus,name = "mhi"; - qcom,msm-bus,num-cases = <2>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = - <100 512 0 0>, - <100 512 1200000000 650000000>; - - /* mhi bus specific settings */ - mhi,max-channels = <106>; - mhi,chan-cfg = - <0 64 2 1 2 0 2 0 0>, <1 64 2 2 2 0 2 0 0>, - <2 128 1 1 2 0 1 0 0>, <3 128 1 2 2 0 1 0 0>, - <4 64 1 1 2 0 2 0 0>, <5 64 3 2 2 0 2 0 0>, - <8 64 1 1 2 0 2 0 0>, <9 64 1 2 2 0 2 0 0>, - <10 64 1 1 2 0 2 0 0>, <11 64 1 2 2 0 2 0 0>, - <14 64 1 1 2 0 2 0 0>, <15 64 2 2 2 0 2 0 0>, - <16 64 3 1 2 0 2 0 0>, <17 64 3 2 2 0 2 0 0>, - <18 64 1 1 2 0 2 0 0>, <19 64 1 2 2 0 2 0 8>, - <20 64 2 1 2 0 2 1 16>, <21 64 2 2 2 0 2 0 24>, - <22 64 2 1 2 0 2 0 0>, <23 64 2 2 2 0 2 0 0>, - <24 64 2 1 2 0 1 0 0>, <25 64 2 2 2 0 1 0 0>, - <26 64 3 1 2 0 2 0 0>, <27 64 3 2 2 0 2 0 0>, - <32 64 3 1 2 0 2 0 0>, <33 64 3 2 2 0 2 0 0>, - <100 512 4 1 3 1 2 1 0x4>, <101 512 5 2 3 1 2 1 0>; - mhi,chan-names = "LOOPBACK", "LOOPBACK", - "SAHARA", "SAHARA", - "DIAG", "DIAG", - "QDSS", "QDSS", - "EFS", "EFS", - "QMI0", "QMI0", - "QMI1", "QMI1", - "IP_CTRL", "IP_CTRL", - "IPCR", "IPCR", - "TF", "TF", - "BL", "BL", - "DCI", "DCI", - "DUN", "DUN", - "IP_HW0", "IP_HW0"; - mhi,ev-cfg = <32 0 1 0 1 2 0x8>, - <256 1 2 0 1 2 0>, - <256 1 3 0 1 2 0>, - <256 1 4 0 1 2 0>, - <1024 5 5 100 1 3 0x1>, - <1024 5 6 101 1 3 0x3>; - mhi,timeout = <2000>; - status = "disabled"; - - mhi_netdev_0: mhi_rmnet@0 { - mhi,chan = "IP_HW0"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; - - mhi_netdev_1: mhi_rmnet@1 { - mhi,chan = "IP_HW_ADPL"; - mhi,interface-name = "rmnet_mhi"; - mhi,mru = <0x4000>; - }; - }; }; &emac_gdsc { @@ -3675,6 +3736,7 @@ qcom,msm-bus,vectors-KBps = , ; + qcom,support-hw-trigger; status = "ok"; }; @@ -3689,6 +3751,7 @@ qcom,msm-bus,vectors-KBps = , ; + qcom,support-hw-trigger; status = "ok"; }; @@ -3713,3 +3776,4 @@ #include "sm8150-thermal.dtsi" #include "sm8150-usb.dtsi" #include "sm8150-gpu.dtsi" +#include "sm8150-mhi.dtsi" diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index d4f80786e7c20c0a46bacb9636f8defc7ef137b4..28257724a56e74b79b83c69a76bea0da4e0fd9ed 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -136,11 +136,12 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmiim1_pins>; + snps,force_thresh_dma_mode; snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - tx_delay = <0x26>; - rx_delay = <0x11>; + tx_delay = <0x24>; + rx_delay = <0x18>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 41d61840fb99ce52ec553c94e119ab63bb79cdbe..d70e409e2b0cbd7a2a1d07f2c84e86475fe8fd02 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -683,7 +683,7 @@ interrupts = ; clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>, <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; @@ -694,7 +694,7 @@ interrupts = ; clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>, <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; @@ -705,7 +705,7 @@ interrupts = ; clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>, <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 1070c8264c13376a578338e95421f71321825243..2313aea0e69edb8496a9e4d424599f0a04e86736 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -257,7 +257,7 @@ max-frequency = <150000000>; clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>, <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>; - clock-names = "biu", "ciu", "ciu_drv", "ciu_sample"; + clock-names = "biu", "ciu", "ciu-drive", "ciu-sample"; fifo-depth = <0x100>; interrupts = ; resets = <&cru SRST_SDIO0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 199a5118b20dab39f744dfc21814b1a9dafb166f..264a6bb60c5386a81a75a6b5e2eb15a2fb591f22 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -406,8 +406,9 @@ wlan_pd_n: wlan-pd-n { compatible = "regulator-fixed"; regulator-name = "wlan_pd_n"; + pinctrl-names = "default"; + pinctrl-0 = <&wlan_module_reset_l>; - /* Note the wlan_module_reset_l pinctrl */ enable-active-high; gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>; @@ -940,12 +941,6 @@ ap_i2c_audio: &i2c8 { pinctrl-0 = < &ap_pwroff /* AP will auto-assert this when in S3 */ &clk_32k /* This pin is always 32k on gru boards */ - - /* - * We want this driven low ASAP; firmware should help us, but - * we can help ourselves too. - */ - &wlan_module_reset_l >; pcfg_output_low: pcfg-output-low { @@ -1125,12 +1120,7 @@ ap_i2c_audio: &i2c8 { }; wlan_module_reset_l: wlan-module-reset-l { - /* - * We want this driven low ASAP (As {Soon,Strongly} As - * Possible), to avoid leakage through the powered-down - * WiFi. - */ - rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>; + rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>; }; bt_host_wake_l: bt-host-wake-l { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 0f873c897d0de5a75f9d4e4d90d7c658b7a173d3..ce592a4c0c4cdeb473a1c96106620583a8a4abef 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi @@ -457,7 +457,7 @@ assigned-clocks = <&cru SCLK_PCIEPHY_REF>; assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; assigned-clock-rates = <100000000>; - ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>; + ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; num-lanes = <4>; pinctrl-names = "default"; pinctrl-0 = <&pcie_clkreqn_cpm>; diff --git a/arch/arm64/configs/qcs405-perf_defconfig b/arch/arm64/configs/qcs405-perf_defconfig deleted file mode 100644 index cf9257f6924a20ecd268c0b1a59641c23ce85af6..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/qcs405-perf_defconfig +++ /dev/null @@ -1,411 +0,0 @@ -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_QCS405=y -CONFIG_PREEMPT=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_QRTR=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_BT=y -CONFIG_BT_RFCOMM=y -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=y -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=y -CONFIG_CFG80211=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_DMA_CMA=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_UID_SYS_STATS=y -CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_KS8851=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_USB_NET_SMSC75XX=y -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_EVBUG=m -CONFIG_INPUT_KEYRESET=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_XPAD=y -CONFIG_INPUT_TABLET=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ATMEL_MXT=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_KEYCHORD=y -CONFIG_INPUT_UINPUT=y -CONFIG_INPUT_GPIO=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_SERIAL_MSM_HS=y -CONFIG_HW_RANDOM=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MSM_V2=y -CONFIG_SPI=y -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_SLIMBUS=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCS405=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_MFD_SYSCON=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_CPR=y -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_RPM_SMD=y -CONFIG_REGULATOR_SPM=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SOC_CAMERA=y -CONFIG_SOC_CAMERA_PLATFORM=y -CONFIG_FB=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_SOC=y -CONFIG_HIDRAW=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_STORAGE_DATAFAB=y -CONFIG_USB_STORAGE_FREECOM=y -CONFIG_USB_STORAGE_ISD200=y -CONFIG_USB_STORAGE_USBAT=y -CONFIG_USB_STORAGE_SDDR09=y -CONFIG_USB_STORAGE_SDDR55=y -CONFIG_USB_STORAGE_JUMPSHOT=y -CONFIG_USB_STORAGE_ALAUDA=y -CONFIG_USB_STORAGE_KARMA=y -CONFIG_USB_STORAGE_CYPRESS_ATACB=y -CONFIG_USB_DWC3=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_SPS_DMA=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_QCOM_CLK_SMD_RPM=y -CONFIG_MDM_GCC_QCS405=y -CONFIG_MDM_DEBUGCC_QCS405=y -CONFIG_HWSPINLOCK=y -CONFIG_MAILBOX=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -CONFIG_RPMSG_QCOM_SMD=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_MSM_SPM=y -CONFIG_MSM_L2_SPM=y -CONFIG_QCOM_SCM=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_MSM_RPM_SMD=y -CONFIG_IIO=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_PAGE_OWNER=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_IPC_LOGGING=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_LSM_MMAP_MIN_ADDR=4096 -CONFIG_HARDENED_USERCOPY=y -CONFIG_SECURITY_SELINUX=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm64/configs/qcs405_defconfig b/arch/arm64/configs/qcs405_defconfig index 5e6eb86de19362a9566ef276b2ad5bd6526783b1..90325456690544158ea9ff393f2114355ee4d918 100644 --- a/arch/arm64/configs/qcs405_defconfig +++ b/arch/arm64/configs/qcs405_defconfig @@ -37,7 +37,6 @@ CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y -CONFIG_ARM64_DMA_USE_IOMMU=y CONFIG_SECCOMP=y # CONFIG_HARDEN_BRANCH_PREDICTOR is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y @@ -175,6 +174,7 @@ CONFIG_NET_EMATCH_META=y CONFIG_NET_EMATCH_TEXT=y CONFIG_NET_CLS_ACT=y CONFIG_QRTR=y +CONFIG_QRTR_SMD=y CONFIG_RMNET_DATA=y CONFIG_RMNET_DATA_FC=y CONFIG_RMNET_DATA_DEBUG_PKT=y @@ -191,6 +191,12 @@ CONFIG_RFKILL=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y +CONFIG_MTD_NAND=y +CONFIG_MTD_UBI=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y @@ -265,6 +271,7 @@ CONFIG_SLIMBUS=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PINCTRL_QCS405=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_SMB1351_USB_CHARGER=y CONFIG_THERMAL=y CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y @@ -277,7 +284,6 @@ CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_MFD_SPMI_PMIC=y -CONFIG_MFD_SYSCON=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y @@ -295,6 +301,11 @@ CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_SOC_CAMERA=y CONFIG_SOC_CAMERA_PLATFORM=y CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_SOUND=y CONFIG_SND=y @@ -364,24 +375,43 @@ CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_MDM_GCC_QCS405=y CONFIG_MDM_DEBUGCC_QCS405=y CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_ARM_SMMU=y CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_RPMSG_QCOM_SMD=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_SMD_RPM=y CONFIG_MSM_SPM=y CONFIG_MSM_L2_SPM=y CONFIG_QCOM_SCM=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_MSM_PM=y CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y CONFIG_PWM=y CONFIG_PWM_QTI_LPG=y +CONFIG_QTI_MPM=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT2_FS=y @@ -397,6 +427,8 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y @@ -440,6 +472,7 @@ CONFIG_CORESIGHT_CTI=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 CONFIG_CORESIGHT_EVENT=y diff --git a/arch/arm64/configs/sdmsteppe-perf_defconfig b/arch/arm64/configs/sdmsteppe-perf_defconfig index 4dbf1b12b6251a07d989f8df6f00d3cb128140e7..e4e007d601ba3ee03e043d290deb6c405f5d11c1 100644 --- a/arch/arm64/configs/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/sdmsteppe-perf_defconfig @@ -53,6 +53,7 @@ CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM6150=y +CONFIG_ARCH_SDMMAGPIE=y CONFIG_PCI=y CONFIG_PCI_MSM=y CONFIG_SCHED_MC=y @@ -309,6 +310,7 @@ CONFIG_PM8150B_PMIC_SIMULATOR=y CONFIG_PM8150L_PMIC_SIMULATOR=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SDMMAGPIE=y CONFIG_PINCTRL_SM6150=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y @@ -479,6 +481,7 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM6150_LLCC=y +CONFIG_QCOM_SDMMAGPIE_LLCC=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y diff --git a/arch/arm64/configs/sdmsteppe_defconfig b/arch/arm64/configs/sdmsteppe_defconfig index 72792fdf3c43bbf2e5b77512b7affd7638aa82a0..696fde9ce5c8ffcba9eb9d2b68fca84640bc16fe 100644 --- a/arch/arm64/configs/sdmsteppe_defconfig +++ b/arch/arm64/configs/sdmsteppe_defconfig @@ -55,6 +55,7 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM6150=y +CONFIG_ARCH_SDMMAGPIE=y CONFIG_PCI=y CONFIG_PCI_MSM=y CONFIG_SCHED_MC=y @@ -318,6 +319,7 @@ CONFIG_PM8150B_PMIC_SIMULATOR=y CONFIG_PM8150L_PMIC_SIMULATOR=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SDMMAGPIE=y CONFIG_PINCTRL_SM6150=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET_QCOM=y @@ -495,6 +497,7 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM6150_LLCC=y +CONFIG_QCOM_SDMMAGPIE_LLCC=y CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -503,6 +506,11 @@ CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y CONFIG_QCOM_WDOG_IPI_ENABLE=y CONFIG_QCOM_SMP2P=y CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_BOOT_STATS=y CONFIG_MSM_CORE_HANG_DETECT=y CONFIG_QCOM_DCC_V2=y diff --git a/arch/arm64/configs/sm8150-auto-perf_defconfig b/arch/arm64/configs/sm8150-auto-perf_defconfig index a233bfb2dc45b578fc27b223269ef3f02aa21ec1..1e5d70ac5ccca268c2473b57f1242d31ed5474db 100644 --- a/arch/arm64/configs/sm8150-auto-perf_defconfig +++ b/arch/arm64/configs/sm8150-auto-perf_defconfig @@ -17,6 +17,7 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -43,6 +44,7 @@ CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_PROFILING=y CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -51,6 +53,7 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y +CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM8150=y CONFIG_PCI=y @@ -141,6 +144,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y CONFIG_NETFILTER_XT_MATCH_COMMENT=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y @@ -227,9 +231,13 @@ CONFIG_RMNET_DATA=y CONFIG_RMNET_DATA_FC=y CONFIG_RMNET_DATA_DEBUG_PKT=y CONFIG_SOCKEV_NLMCAST=y +CONFIG_CAN=y +CONFIG_QTI_CAN=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y CONFIG_CFG80211_INTERNAL_REGDB=y CONFIG_RFKILL=y CONFIG_NFC_NQ=y @@ -244,6 +252,7 @@ CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y @@ -323,6 +332,7 @@ CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y CONFIG_QPNP_FG_GEN4=y CONFIG_QPNP_SMB5=y +CONFIG_SMB1390_CHARGE_PUMP=y CONFIG_THERMAL=y CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y @@ -339,6 +349,7 @@ CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_QTI_ADC_TM=y +CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_PROXY_CONSUMER=y diff --git a/arch/arm64/configs/sm8150-auto_defconfig b/arch/arm64/configs/sm8150-auto_defconfig index 8a456b915b92e1c7544dbe9d38e2ccb9e8d2ba29..9077f3d381f7aaeaa7fa1bc75858e10f1531e0e8 100644 --- a/arch/arm64/configs/sm8150-auto_defconfig +++ b/arch/arm64/configs/sm8150-auto_defconfig @@ -16,6 +16,8 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_DEBUG_BLK_CGROUP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -43,6 +45,8 @@ CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_PROFILING=y CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y +CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y @@ -53,6 +57,7 @@ CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM8150=y CONFIG_PCI=y @@ -68,6 +73,7 @@ CONFIG_ZSMALLOC=y CONFIG_SECCOMP=y # CONFIG_UNMAP_KERNEL_AT_EL0 is not set # CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_PRINT_VMEMLAYOUT=y CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y @@ -145,6 +151,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y CONFIG_NETFILTER_XT_MATCH_COMMENT=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y @@ -233,9 +240,13 @@ CONFIG_RMNET_DATA=y CONFIG_RMNET_DATA_FC=y CONFIG_RMNET_DATA_DEBUG_PKT=y CONFIG_SOCKEV_NLMCAST=y +CONFIG_CAN=y +CONFIG_QTI_CAN=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y CONFIG_CFG80211_INTERNAL_REGDB=y # CONFIG_CFG80211_CRDA_SUPPORT is not set CONFIG_RFKILL=y @@ -252,6 +263,7 @@ CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y CONFIG_QSEECOM=y CONFIG_UID_SYS_STATS=y CONFIG_MEMORY_STATE_TIME=y @@ -334,6 +346,7 @@ CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y CONFIG_QPNP_FG_GEN4=y CONFIG_QPNP_SMB5=y +CONFIG_SMB1390_CHARGE_PUMP=y CONFIG_THERMAL=y CONFIG_THERMAL_WRITABLE_TRIPS=y CONFIG_THERMAL_GOV_USER_SPACE=y @@ -350,6 +363,7 @@ CONFIG_REGULATOR_COOLING_DEVICE=y CONFIG_QTI_BCL_PMIC5=y CONFIG_QTI_BCL_SOC_DRIVER=y CONFIG_QTI_ADC_TM=y +CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_PROXY_CONSUMER=y @@ -579,9 +593,7 @@ CONFIG_ESOC_MDM_4x=y CONFIG_ESOC_MDM_DRV=y CONFIG_ESOC_MDM_DBG_ENG=y CONFIG_MSM_TZ_LOG=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_QUOTA=y diff --git a/arch/arm64/configs/sm8150-perf_defconfig b/arch/arm64/configs/sm8150-perf_defconfig index 5e29088f7e1e733602785f50f7936a20489ddb43..9742d70e107ce24267352c61b94b644dd503517e 100644 --- a/arch/arm64/configs/sm8150-perf_defconfig +++ b/arch/arm64/configs/sm8150-perf_defconfig @@ -17,6 +17,7 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -52,6 +53,7 @@ CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y +CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM8150=y CONFIG_PCI=y @@ -101,6 +103,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -112,6 +115,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y @@ -329,6 +333,7 @@ CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y CONFIG_QPNP_FG_GEN4=y +CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_SMB5=y CONFIG_SMB1390_CHARGE_PUMP=y CONFIG_THERMAL=y @@ -468,6 +473,7 @@ CONFIG_RNDIS_IPA=y CONFIG_IPA3_MHI_PROXY=y CONFIG_IPA_UT=y CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_QCOM_MDSS_PLL=y CONFIG_SPMI_PMIC_CLKDIV=y CONFIG_MSM_CLK_AOP_QMP=y @@ -498,6 +504,7 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM8150_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -516,6 +523,7 @@ CONFIG_QCOM_SECURE_BUFFER=y CONFIG_ICNSS=y CONFIG_ICNSS_QMI=y CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y @@ -526,6 +534,7 @@ CONFIG_QTI_RPMH_API=y CONFIG_QSEE_IPC_IRQ_BRIDGE=y CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y +CONFIG_QCOM_QDSS_BRIDGE=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMCINVOKE=y @@ -567,9 +576,7 @@ CONFIG_ESOC_MDM_4x=y CONFIG_ESOC_MDM_DRV=y CONFIG_ESOC_MDM_DBG_ENG=y CONFIG_MSM_TZ_LOG=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_QUOTA=y @@ -602,6 +609,7 @@ CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_DUMMY=y CONFIG_CORESIGHT_REMOTE_ETM=y CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_TGU=y CONFIG_CORESIGHT_EVENT=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y diff --git a/arch/arm64/configs/sm8150_defconfig b/arch/arm64/configs/sm8150_defconfig index 07b5e3b28259b29807fa0662e8a8f28832897761..565aef44d40732520a4fd02fb3ac04d2d858b802 100644 --- a/arch/arm64/configs/sm8150_defconfig +++ b/arch/arm64/configs/sm8150_defconfig @@ -16,6 +16,8 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y +CONFIG_BLK_CGROUP=y +CONFIG_DEBUG_BLK_CGROUP=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y @@ -55,6 +57,7 @@ CONFIG_MODULE_SIG_SHA512=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_SM8150=y CONFIG_PCI=y @@ -107,6 +110,7 @@ CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y CONFIG_INET_AH=y CONFIG_INET_ESP=y CONFIG_INET_IPCOMP=y @@ -118,6 +122,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y @@ -342,6 +347,7 @@ CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y CONFIG_QPNP_FG_GEN4=y +CONFIG_SMB1355_SLAVE_CHARGER=y CONFIG_QPNP_SMB5=y CONFIG_SMB1390_CHARGE_PUMP=y CONFIG_THERMAL=y @@ -488,6 +494,7 @@ CONFIG_RNDIS_IPA=y CONFIG_IPA3_MHI_PROXY=y CONFIG_IPA_UT=y CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_QCOM_MDSS_PLL=y CONFIG_SPMI_PMIC_CLKDIV=y CONFIG_MSM_CLK_AOP_QMP=y @@ -518,6 +525,7 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SM8150_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m CONFIG_QCOM_QMI_HELPERS=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -540,6 +548,7 @@ CONFIG_ICNSS=y CONFIG_ICNSS_DEBUG=y CONFIG_ICNSS_QMI=y CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y CONFIG_QCOM_BUS_SCALING=y CONFIG_QCOM_BUS_CONFIG_RPMH=y CONFIG_QCOM_COMMAND_DB=y @@ -550,6 +559,7 @@ CONFIG_QTI_RPMH_API=y CONFIG_QSEE_IPC_IRQ_BRIDGE=y CONFIG_QCOM_GLINK=y CONFIG_QCOM_GLINK_PKT=y +CONFIG_QCOM_QDSS_BRIDGE=y CONFIG_QTI_RPM_STATS_LOG=y CONFIG_MSM_CDSP_LOADER=y CONFIG_QCOM_SMCINVOKE=y @@ -593,9 +603,7 @@ CONFIG_ESOC_MDM_4x=y CONFIG_ESOC_MDM_DRV=y CONFIG_ESOC_MDM_DBG_ENG=y CONFIG_MSM_TZ_LOG=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_QUOTA=y diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 2326e39d58929e3c7d10ce94579e5a4a34901e36..e63d0a8312de81bade74507304d3871e3ceec010 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h +generic-y += qrwlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 9ef0797380cbbdf182a86e934c2eec5aa97d889d..f9b0b09153e0eaa3b15728fd42471c77c2d1955a 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v) /* LSE atomics */ " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]") - : [i] "+r" (w0), [v] "+Q" (v->counter) + : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ /* LSE atomics */ \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v) /* LSE atomics */ " neg %w[i], %w[i]\n" " stadd %w[i], %[v]") - : [i] "+r" (w0), [v] "+Q" (v->counter) + : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \ " add %w[i], %w[i], w30") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS , ##cl); \ \ @@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ + : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v) /* LSE atomics */ " mvn %[i], %[i]\n" " stclr %[i], %[v]") - : [i] "+r" (x0), [v] "+Q" (v->counter) + : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ /* LSE atomics */ \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) /* LSE atomics */ " neg %[i], %[i]\n" " stadd %[i], %[v]") - : [i] "+r" (x0), [v] "+Q" (v->counter) + : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } @@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \ " add %[i], %[i], x30") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ + : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ @@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" "2:") - : [ret] "+r" (x0), [v] "+Q" (v->counter) + : [ret] "+&r" (x0), [v] "+Q" (v->counter) : : __LL_SC_CLOBBERS, "cc", "memory"); @@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ " orr %[old1], %[old1], %[old2]") \ - : [old1] "+r" (x0), [old2] "+r" (x1), \ + : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(unsigned long *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 2e7b236bc596204724d7f5b04d9c6eb6ada744e0..a5f476752f4e917edb75a4fe7becc799f899d51c 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -43,7 +43,8 @@ #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 +#define ARM64_HW_DBM 26 -#define ARM64_NCAPS 26 +#define ARM64_NCAPS 27 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 30da0918d04619a46e02eeee854b339dd2555226..3483ed12d9a422c7f5c4b61a44366db6b4dadcd8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -84,6 +84,8 @@ #define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A73 0xD09 #define ARM_CPU_PART_CORTEX_A75 0xD0A +#define ARM_CPU_PART_KRYO3S 0x803 +#define ARM_CPU_PART_KRYO4S 0x803 #define APM_CPU_PART_POTENZA 0x000 @@ -104,6 +106,8 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_KRYO3S MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO3S) +#define MIDR_KRYO4S MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO4S) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -119,6 +123,45 @@ #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) +/* + * Represent a range of MIDR values for a given CPU model and a + * range of variant/revision values. + * + * @model - CPU model as defined by MIDR_CPU_MODEL + * @rv_min - Minimum value for the revision/variant as defined by + * MIDR_CPU_VAR_REV + * @rv_max - Maximum value for the variant/revision for the range. + */ +struct midr_range { + u32 model; + u32 rv_min; + u32 rv_max; +}; + +#define GENERIC_MIDR_RANGE(m, v_min, r_min, v_max, r_max) \ + { \ + .model = m, \ + .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \ + .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \ + } + +#define GENERIC_MIDR_ALL_VERSIONS(m) GENERIC_MIDR_RANGE(m, 0, 0, 0xf, 0xf) + +static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) +{ + return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, + range->rv_min, range->rv_max); +} + +static inline bool +is_midr_in_range_list(u32 midr, struct midr_range const *ranges) +{ + while (ranges->model) + if (is_midr_in_range(midr, ranges++)) + return true; + return false; +} + /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 2d6d4bd9de52b48cbaaeae1cea86544c3f406bec..fe55b516f018d0adf8d0b31d1d93d66618931cce 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; } +/* + * We are not in the kvm->srcu critical section most of the time, so we take + * the SRCU read lock here. Since we copy the data from the user page, we + * can immediately drop the lock again. + */ +static inline int kvm_read_guest_lock(struct kvm *kvm, + gpa_t gpa, void *data, unsigned long len) +{ + int srcu_idx = srcu_read_lock(&kvm->srcu); + int ret = kvm_read_guest(kvm, gpa, data, len); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + return ret; +} + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #include diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 74a9d301819fbfa1128bcce879b95ae9c24a2e76..81c69fe1adc0c15bf914f259312087a96aecf2e8 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -18,7 +18,11 @@ #ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS 48 +#ifndef CONFIG_MEMORY_HOTPLUG #define SECTION_SIZE_BITS 30 +#else +#define SECTION_SIZE_BITS CONFIG_HOTPLUG_SIZE_BITS +#endif #endif #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index cae05ab5b467b0e95870ec6f6d0edabc37266a16..4eeda5edb21015b89b1fb49ee6e001b2bc9ba3d8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -138,169 +138,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) } #define arch_spin_is_contended arch_spin_is_contended -/* - * Write lock implementation. - * - * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is - * exclusively held. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - */ - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned int tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " sevl\n" - "1: wfe\n" - "2: ldaxr %w0, %1\n" - " cbnz %w0, 1b\n" - " stxr %w0, %w2, %1\n" - " cbnz %w0, 2b\n" - __nops(1), - /* LSE atomics */ - "1: mov %w0, wzr\n" - "2: casa %w0, %w2, %1\n" - " cbz %w0, 3f\n" - " ldxr %w0, %1\n" - " cbz %w0, 2b\n" - " wfe\n" - " b 1b\n" - "3:") - : "=&r" (tmp), "+Q" (rw->lock) - : "r" (0x80000000) - : "memory"); -} - -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - unsigned int tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - "1: ldaxr %w0, %1\n" - " cbnz %w0, 2f\n" - " stxr %w0, %w2, %1\n" - " cbnz %w0, 1b\n" - "2:", - /* LSE atomics */ - " mov %w0, wzr\n" - " casa %w0, %w2, %1\n" - __nops(2)) - : "=&r" (tmp), "+Q" (rw->lock) - : "r" (0x80000000) - : "memory"); - - return !tmp; -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - asm volatile(ARM64_LSE_ATOMIC_INSN( - " stlr wzr, %0", - " swpl wzr, wzr, %0") - : "=Q" (rw->lock) :: "memory"); -} - -/* write_can_lock - would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->lock == 0) - -/* - * Read lock implementation. - * - * It exclusively loads the lock value, increments it and stores the new value - * back if positive and the CPU still exclusively owns the location. If the - * value is negative, the lock is already held. - * - * During unlocking there may be multiple active read locks but no write lock. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - * - * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC - * and LSE implementations may exhibit different behaviour (although this - * will have no effect on lockdep). - */ -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned int tmp, tmp2; - - asm volatile( - " sevl\n" - ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - "1: wfe\n" - "2: ldaxr %w0, %2\n" - " add %w0, %w0, #1\n" - " tbnz %w0, #31, 1b\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 2b\n" - __nops(1), - /* LSE atomics */ - "1: wfe\n" - "2: ldxr %w0, %2\n" - " adds %w1, %w0, #1\n" - " tbnz %w1, #31, 1b\n" - " casa %w0, %w1, %2\n" - " sbc %w0, %w1, %w0\n" - " cbnz %w0, 2b") - : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) - : - : "cc", "memory"); -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned int tmp, tmp2; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - "1: ldxr %w0, %2\n" - " sub %w0, %w0, #1\n" - " stlxr %w1, %w0, %2\n" - " cbnz %w1, 1b", - /* LSE atomics */ - " movn %w0, #0\n" - " staddl %w0, %2\n" - __nops(2)) - : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) - : - : "memory"); -} - -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned int tmp, tmp2; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " mov %w1, #1\n" - "1: ldaxr %w0, %2\n" - " add %w0, %w0, #1\n" - " tbnz %w0, #31, 2f\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" - "2:", - /* LSE atomics */ - " ldr %w0, %2\n" - " adds %w1, %w0, #1\n" - " tbnz %w1, #31, 1f\n" - " casa %w0, %w1, %2\n" - " sbc %w1, %w1, %w0\n" - __nops(1) - "1:") - : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) - : - : "cc", "memory"); - - return !tmp2; -} - -/* read_can_lock - would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->lock < 0x80000000) +#include #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 55be59a35e3fe98c094b3b64a15cf02bfd62c7c8..6b856012c51b9f80e658a10e22609e6d4debc0ed 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -36,10 +36,6 @@ typedef struct { #define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 6ad30776e984d071134f8762395565859691e0b0..99390755c0c4b2440635915b41da8e4fa2c52de7 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -27,7 +27,7 @@ struct stackframe { unsigned long fp; unsigned long pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - unsigned int graph; + int graph; #endif }; diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 52f15cd896e11ad631ac3092d9709337a9629bb4..b5a28336c07712af8d10aa62f1669b8a798065d8 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data) case PSCI_CONDUIT_HVC: arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if (res.a0) + if ((int)res.a0 < 0) return 0; cb = call_hvc_arch_workaround_1; smccc_start = __smccc_workaround_1_hvc_start; @@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data) case PSCI_CONDUIT_SMC: arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if (res.a0) + if ((int)res.a0 < 0) return 0; cb = call_smc_arch_workaround_1; smccc_start = __smccc_workaround_1_smc_start; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f18295e46438fae5d69628187227af522163a878..aacc6a18ac379c3c7d601b4012ef6758c3e3ab51 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -880,6 +880,84 @@ static int __init parse_kpti(char *str) __setup("kpti=", parse_kpti); #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +#ifdef CONFIG_ARM64_HW_AFDBM +static inline void __cpu_enable_hw_dbm(void) +{ + u64 tcr = read_sysreg(tcr_el1) | TCR_HD; + + write_sysreg(tcr, tcr_el1); + isb(); +} + +static bool cpu_has_broken_dbm(void) +{ + /* List of CPUs which have broken DBM support. */ + static const struct midr_range cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_1024718 + // A55 r0p0 -r1p0 + GENERIC_MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), + GENERIC_MIDR_RANGE(MIDR_KRYO3S, 7, 12, 7, 12), + GENERIC_MIDR_RANGE(MIDR_KRYO4S, 7, 12, 7, 12), +#endif + {}, + }; + + return is_midr_in_range_list(read_cpuid_id(), cpus); +} + +static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) +{ + bool has_cpu_feature; + + preempt_disable(); + has_cpu_feature = has_cpuid_feature(cap, SCOPE_LOCAL_CPU); + preempt_enable(); + + return has_cpu_feature && !cpu_has_broken_dbm(); +} + +static int cpu_enable_hw_dbm(void *entry) +{ + const struct arm64_cpu_capabilities *cap = + (const struct arm64_cpu_capabilities *) entry; + + if (cpu_can_use_dbm(cap)) + __cpu_enable_hw_dbm(); + + return 0; +} + +static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, + int __unused) +{ + static bool detected = false; + /* + * DBM is a non-conflicting feature. i.e, the kernel can safely + * run a mix of CPUs with and without the feature. So, we + * unconditionally enable the capability to allow any late CPU + * to use the feature. We only enable the control bits on the + * CPU, if it actually supports. + * + * We have to make sure we print the "feature" detection only + * when at least one CPU actually uses it. So check if this CPU + * can actually use it and print the message exactly once. + * + * This is safe as all CPUs (including secondary CPUs - due to the + * LOCAL_CPU scope - and the hotplugged CPUs - via verification) + * goes through the "matches" check exactly once. Also if a CPU + * matches the criteria, it is guaranteed that the CPU will turn + * the DBM on, as the capability is unconditionally enabled. + */ + if (!detected && cpu_can_use_dbm(cap)) { + detected = true; + pr_info("detected: Hardware dirty bit management\n"); + } + + return true; +} + +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -992,6 +1070,25 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_pos = ID_AA64ISAR1_DPB_SHIFT, .min_field_value = 1, }, +#endif +#ifdef CONFIG_ARM64_HW_AFDBM + { + /* + * Since we turn this on always, we don't want the user to + * think that the feature is available when it may not be. + * So hide the description. + * + * .desc = "Hardware pagetable Dirty Bit Management", + * + */ + .capability = ARM64_HW_DBM, + .sys_reg = SYS_ID_AA64MMFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR1_HADBS_SHIFT, + .min_field_value = 2, + .matches = has_hw_dbm, + .enable = cpu_enable_hw_dbm, + }, #endif {}, }; diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 2d0e5030f1c1454c30cbefbc3b5d620c03e770fb..354d8c361091608cac018ef104419474599b945b 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -913,9 +913,9 @@ static void __armv8pmu_probe_pmu(void *info) int pmuver; dfr0 = read_sysreg(id_aa64dfr0_el1); - pmuver = cpuid_feature_extract_signed_field(dfr0, + pmuver = cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMUVER_SHIFT); - if (pmuver < 1) + if (pmuver == 0xf || pmuver == 0) return; probe->present = true; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 41bdbbb41223ea85a683c44176dfd47a6f0e3a86..a6c8d9455fa076f825a1309f56478e11fdc1f2da 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -618,16 +618,6 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, void (*__smp_cross_call)(const struct cpumask *, unsigned int); DEFINE_PER_CPU(bool, pending_ipi); -void smp_cross_call_common(const struct cpumask *cpumask, unsigned int func) -{ - unsigned int cpu; - - for_each_cpu(cpu, cpumask) - per_cpu(pending_ipi, cpu) = true; - - __smp_cross_call(cpumask, func); -} - /* * Enumerate the possible CPU set from the device tree and build the * cpu logical map array containing MPIDR values related to logical @@ -795,6 +785,17 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) __smp_cross_call(target, ipinr); } +static void smp_cross_call_common(const struct cpumask *cpumask, + unsigned int func) +{ + unsigned int cpu; + + for_each_cpu(cpu, cpumask) + per_cpu(pending_ipi, cpu) = true; + + smp_cross_call(cpumask, func); +} + void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; @@ -841,7 +842,8 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask) void arch_irq_work_raise(void) { if (__smp_cross_call) - smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); + smp_cross_call_common(cpumask_of(smp_processor_id()), + IPI_IRQ_WORK); } #endif diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 76809ccd309ccaf330e45ac4b814922ae5949624..d5718a060672e1696618904c8844447daa042007 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { + if (WARN_ON_ONCE(frame->graph == -1)) + return -EINVAL; + if (frame->graph < -1) + frame->graph += FTRACE_NOTRACE_DEPTH; + /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index a4391280fba9631d69cf8bda434c307956b92668..f258636273c9588dca80420341a3627cfdd61d2c 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs) frame.fp = regs->regs[29]; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = -1; /* no task info */ + frame.graph = current->curr_ret_stack; #endif do { int ret = unwind_frame(NULL, &frame); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9b82719e309fba38b2f0f6318a27dfe39eefc9d7..c50223a9871aa8b0929e8224a6b8f2a12dee6949 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -40,6 +40,8 @@ #include #include #include +#include +#include #include #include @@ -315,6 +317,51 @@ static void __init arm64_memory_present(void) static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; phys_addr_t bootloader_memory_limit; +#ifdef CONFIG_OVERRIDE_MEMORY_LIMIT +static void __init update_memory_limit(void) +{ + unsigned long dt_root = of_get_flat_dt_root(); + unsigned long node, mp; + const char *p; + unsigned long long ram_sz, sz; + int ret; + + ram_sz = memblock_end_of_DRAM() - memblock_start_of_DRAM(); + node = of_get_flat_dt_subnode_by_name(dt_root, "mem-offline"); + if (node == -FDT_ERR_NOTFOUND) { + pr_err("mem-offine node not found in FDT\n"); + return; + } + p = of_get_flat_dt_prop(node, "mem-percent", NULL); + if (!p) { + pr_err("mem-offine: mem-percent property not found in FDT\n"); + return; + } + + ret = kstrtoul(p, 10, &mp); + if (ret) { + pr_err("mem-offine: kstrtoul failed\n"); + return; + } + + if (mp > 100) { + pr_err("mem-offine: Invalid mem-percent DT property\n"); + return; + } + sz = ram_sz - ((ram_sz * mp) / 100); + memory_limit = (phys_addr_t)sz; + memory_limit = ALIGN(memory_limit, MIN_MEMORY_BLOCK_SIZE); + + pr_notice("Memory limit set/overridden to %lldMB\n", + memory_limit >> 20); +} +#else +static void __init update_memory_limit(void) +{ + +} +#endif + /* * Limit the memory size that was specified via FDT. */ @@ -396,6 +443,7 @@ void __init arm64_memblock_init(void) memblock_remove(0, memstart_addr); } + update_memory_limit(); /* * Save bootloader imposed memory limit before we overwirte * memblock. diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 408e00d71e502c362e53fe9de9175a5e095685de..05edcadb723b6dd9b423241a4037f5fee0706577 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -699,11 +699,13 @@ void hotplug_paging(phys_addr_t start, phys_addr_t size) struct page *pg; phys_addr_t pgd_phys = pgd_pgtable_alloc(); pgd_t *pgd = pgd_set_fixmap(pgd_phys); + int flags; memcpy(pgd, swapper_pg_dir, PAGE_SIZE); + flags = debug_pagealloc_enabled() ? NO_BLOCK_MAPPINGS : 0; __create_pgd_mapping(pgd, start, __phys_to_virt(start), size, - PAGE_KERNEL, pgd_pgtable_alloc, !debug_pagealloc_enabled()); + PAGE_KERNEL, pgd_pgtable_alloc, flags); cpu_replace_ttbr1(__va(pgd_phys)); memcpy(swapper_pg_dir, pgd, PAGE_SIZE); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 16bc89af3cbd597f9244c0813482cb6f25170661..31f8d30e2a5fbb146914fc0ec6e5ec9295f62dd5 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -477,21 +477,15 @@ ENTRY(__cpu_setup) bfi x10, x9, #32, #3 #ifdef CONFIG_ARM64_HW_AFDBM /* - * Hardware update of the Access and Dirty bits. + * Enable hardware update of the Access Flags bit. + * Hardware dirty bit management is enabled later, + * via capabilities. */ mrs x9, ID_AA64MMFR1_EL1 and x9, x9, #0xf - cbz x9, 2f - cmp x9, #2 - b.lt 1f -#ifdef CONFIG_ARM64_ERRATUM_1024718 - /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */ - cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4 - cbnz x1, 1f -#endif - orr x10, x10, #TCR_HD // hardware Dirty flag update -1: orr x10, x10, #TCR_HA // hardware Access flag update -2: + cbz x9, 1f + orr x10, x10, #TCR_HA // hardware Access flag update +1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 ret // return to head.S diff --git a/arch/cris/include/arch-v10/arch/bug.h b/arch/cris/include/arch-v10/arch/bug.h index 905afeacfedf53de44b3ba08487b04b46141e564..06da9d49152a0fd78ef1bac764df62aa38e35c83 100644 --- a/arch/cris/include/arch-v10/arch/bug.h +++ b/arch/cris/include/arch-v10/arch/bug.h @@ -44,18 +44,25 @@ struct bug_frame { * not be used like this with newer versions of gcc. */ #define BUG() \ +do { \ __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\ "movu.w " __stringify(__LINE__) ",$r0\n\t"\ "jump 0f\n\t" \ ".section .rodata\n" \ "0:\t.string \"" __FILE__ "\"\n\t" \ - ".previous") + ".previous"); \ + unreachable(); \ +} while (0) #endif #else /* This just causes an oops. */ -#define BUG() (*(int *)0 = 0) +#define BUG() \ +do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h index bd3eeb8d1cfa379a56456a3fd27909dbeaf2b55b..66b37a5327654f87a40fc277fad53d927875c355 100644 --- a/arch/ia64/include/asm/bug.h +++ b/arch/ia64/include/asm/bug.h @@ -4,7 +4,11 @@ #ifdef CONFIG_BUG #define ia64_abort() __builtin_trap() -#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) +#define BUG() do { \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ + ia64_abort(); \ +} while (0) /* should this BUG be made generic? */ #define HAVE_ARCH_BUG diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index 85bba43e7d5dc7638138f90f1eb0dd56609c5807..658a8e06a69bb541a4e659da84ca0cd8a65daf35 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr, u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; - ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL); + ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 84938fdbbadad950a531cf62c6dbab02393d04a1..908d58347790d7518282f1891dc555599b1cf75f 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = { .id = 0, .num_resources = ARRAY_SIZE(mcf_fec0_resources), .resource = mcf_fec0_resources, - .dev.platform_data = FEC_PDATA, + .dev = { + .dma_mask = &mcf_fec0.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = FEC_PDATA, + } }; #ifdef MCFFEC_BASE1 @@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = { .id = 1, .num_resources = ARRAY_SIZE(mcf_fec1_resources), .resource = mcf_fec1_resources, - .dev.platform_data = FEC_PDATA, + .dev = { + .dma_mask = &mcf_fec1.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = FEC_PDATA, + } }; #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h index b7e2bf1ba4a60d1e139aedf50c7a82fc0b0235cd..275dca1435bf9f64202b0a9249b0a07138105c82 100644 --- a/arch/m68k/include/asm/bug.h +++ b/arch/m68k/include/asm/bug.h @@ -8,16 +8,19 @@ #ifndef CONFIG_SUN3 #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else #define BUG() do { \ pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ + barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif #else #define BUG() do { \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #endif diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index d99f5242169e7acb31f8cfa71cd6e14d24e94c82..b3aec101a65d4ed96f4f986ea22c2210b297cdef 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, parent_irq = irq_of_parse_and_map(ciu_node, 0); if (!parent_irq) { - pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", + pr_err("ERROR: Couldn't acquire parent_irq for %s\n", ciu_node->name); return -EINVAL; } @@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, addr = of_get_address(ciu_node, 0, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name); return -EINVAL; } host_data->raw_reg = (u64)phys_to_virt( @@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, addr = of_get_address(ciu_node, 1, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name); return -EINVAL; } host_data->en_reg = (u64)phys_to_virt( @@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); if (r) { - pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n", ciu_node->name); return r; } @@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, &octeon_irq_domain_cib_ops, host_data); if (!cib_domain) { - pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); + pr_err("ERROR: Couldn't irq_domain_add_linear()\n"); return -ENOMEM; } diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index aa3800c823321e50c577d78a7d9c8b63004d46f4..d99ca862dae32babbe68d039572a8e0f455ef5f0 100644 --- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -167,7 +167,7 @@ #define AR71XX_AHB_DIV_MASK 0x7 #define AR724X_PLL_REG_CPU_CONFIG 0x00 -#define AR724X_PLL_REG_PCIE_CONFIG 0x18 +#define AR724X_PLL_REG_PCIE_CONFIG 0x10 #define AR724X_PLL_FB_SHIFT 0 #define AR724X_PLL_FB_MASK 0x3ff diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h index e0d9b373d415195f95d34c628e94c621a1306462..f83879dadd1e3693023f17c6a707fbd91d2e3a98 100644 --- a/arch/mips/include/asm/machine.h +++ b/arch/mips/include/asm/machine.h @@ -52,7 +52,7 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt) if (!mach->matches) return NULL; - for (match = mach->matches; match->compatible; match++) { + for (match = mach->matches; match->compatible[0]; match++) { if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0) return match; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 2f2d176396aa51e7c941e6d1cf2dee088fa9e94c..e1ddb94a6522253cfd8e49000890297aadab7ebf 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) if (value & ~known_bits) return -EOPNOTSUPP; + /* Setting FRE without FR is not supported. */ + if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) + return -EOPNOTSUPP; + /* Avoid inadvertently triggering emulation */ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index c552c20237d4f73023cdba8e96acc0ee47e0196c..e058cd300713d19bf656bc243f3f6cec9728a962 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -454,7 +454,7 @@ static int fpr_get_msa(struct task_struct *target, /* * Copy the floating-point context to the supplied NT_PRFPREG buffer. * Choose the appropriate helper for general registers, and then copy - * the FCSR register separately. + * the FCSR and FIR registers separately. */ static int fpr_get(struct task_struct *target, const struct user_regset *regset, @@ -462,6 +462,7 @@ static int fpr_get(struct task_struct *target, void *kbuf, void __user *ubuf) { const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + const int fir_pos = fcr31_pos + sizeof(u32); int err; if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) @@ -474,6 +475,12 @@ static int fpr_get(struct task_struct *target, err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.fpu.fcr31, fcr31_pos, fcr31_pos + sizeof(u32)); + if (err) + return err; + + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &boot_cpu_data.fpu_id, + fir_pos, fir_pos + sizeof(u32)); return err; } @@ -522,7 +529,8 @@ static int fpr_set_msa(struct task_struct *target, /* * Copy the supplied NT_PRFPREG buffer to the floating-point context. * Choose the appropriate helper for general registers, and then copy - * the FCSR register separately. + * the FCSR register separately. Ignore the incoming FIR register + * contents though, as the register is read-only. * * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', * which is supposed to have been guaranteed by the kernel before @@ -536,6 +544,7 @@ static int fpr_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); + const int fir_pos = fcr31_pos + sizeof(u32); u32 fcr31; int err; @@ -563,6 +572,11 @@ static int fpr_set(struct task_struct *target, ptrace_setfcr31(target, fcr31); } + if (count > 0) + err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + fir_pos, + fir_pos + sizeof(u32)); + return err; } @@ -784,7 +798,7 @@ long arch_ptrace(struct task_struct *child, long request, fregs = get_fpu_regs(child); #ifdef CONFIG_32BIT - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even @@ -795,7 +809,7 @@ long arch_ptrace(struct task_struct *child, long request, break; } #endif - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -873,7 +887,7 @@ long arch_ptrace(struct task_struct *child, long request, init_fp_ctx(child); #ifdef CONFIG_32BIT - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 40e212d6b26b2dc6055f5e6648964b9f35877cd8..89026d33a07bf7027400e56fa49fcb65da21ea60 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -98,7 +98,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; } fregs = get_fpu_regs(child); - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even @@ -108,7 +108,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, addr & 1); break; } - tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); + tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -205,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, sizeof(child->thread.fpu)); child->thread.fpu.fcr31 = 0; } - if (test_thread_flag(TIF_32BIT_FPREGS)) { + if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 75fdeaa8c62f21a5420c963968c0188bbb459f49..9730ba734afe0cef2c44713430904db020977840 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, - { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, + { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6f534b2099717da8c2d7be70bfa035a05ed5aede..e12dfa48b478dd3ec51369236bb84040c044bd82 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* * Either no secondary cache or the available caches don't have the * subset property so we have to flush the primary caches - * explicitly + * explicitly. + * If we would need IPI to perform an INDEX-type operation, then + * we have to use the HIT-type alternative as IPI cannot be used + * here due to interrupts possibly being disabled. */ - if (size >= dcache_size) { + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; @@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) return; } - if (size >= dcache_size) { + if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 651974192c4d76c82fcebf9a0abddc25537f5b12..b479926f0167fc2bc92375197d08d67d22bca597 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \ libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c libfdtheader := fdt.h libfdt.h libfdt_internal.h -$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ +$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \ + treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \ $(addprefix $(obj)/,$(libfdtheader)) src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h new file mode 100644 index 0000000000000000000000000000000000000000..db0dedab65eec5d9ca1cce42f8358fffb918cf6c --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/slice.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H +#define _ASM_POWERPC_BOOK3S_64_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ul) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 40 +#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) +#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) + +#else /* CONFIG_PPC_MM_SLICES */ + +#define get_slice_psize(mm, addr) ((mm)->context.user_psize) +#define slice_set_user_psize(mm, psize) \ +do { \ + (mm)->context.user_psize = (psize); \ + (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ +} while (0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index ccf10c2f8899f20ad6a5be31bfcb8a774b0cd752..c3bdd2d8ec903211851048fe4bb1005c7acf1ded 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -69,6 +69,27 @@ */ #define EX_R3 EX_DAR +#define STF_ENTRY_BARRIER_SLOT \ + STF_ENTRY_BARRIER_FIXUP_SECTION; \ + nop; \ + nop; \ + nop + +#define STF_EXIT_BARRIER_SLOT \ + STF_EXIT_BARRIER_FIXUP_SECTION; \ + nop; \ + nop; \ + nop; \ + nop; \ + nop; \ + nop + +/* + * r10 must be free to use, r13 must be paca + */ +#define INTERRUPT_TO_KERNEL \ + STF_ENTRY_BARRIER_SLOT + /* * Macros for annotating the expected destination of (h)rfid * @@ -85,16 +106,19 @@ rfid #define RFI_TO_USER \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ rfid; \ b rfi_flush_fallback #define RFI_TO_USER_OR_KERNEL \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ rfid; \ b rfi_flush_fallback #define RFI_TO_GUEST \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ rfid; \ b rfi_flush_fallback @@ -103,21 +127,25 @@ hrfid #define HRFI_TO_USER \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ hrfid; \ b hrfi_flush_fallback #define HRFI_TO_USER_OR_KERNEL \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ hrfid; \ b hrfi_flush_fallback #define HRFI_TO_GUEST \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ hrfid; \ b hrfi_flush_fallback #define HRFI_TO_UNKNOWN \ + STF_EXIT_BARRIER_SLOT; \ RFI_FLUSH_SLOT; \ hrfid; \ b hrfi_flush_fallback @@ -249,6 +277,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define __EXCEPTION_PROLOG_1(area, extra, vec) \ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ + INTERRUPT_TO_KERNEL; \ SAVE_CTR(r10, area); \ mfcr r9; \ extra(vec); \ diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 1e82eb3caabd19c69289957da188b563d0bcd0d6..a9b64df34e2a365a6916c89786d3398f4311413b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -187,6 +187,22 @@ label##3: \ FTR_ENTRY_OFFSET label##1b-label##3b; \ .popsection; +#define STF_ENTRY_BARRIER_FIXUP_SECTION \ +953: \ + .pushsection __stf_entry_barrier_fixup,"a"; \ + .align 2; \ +954: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; + +#define STF_EXIT_BARRIER_FIXUP_SECTION \ +955: \ + .pushsection __stf_exit_barrier_fixup,"a"; \ + .align 2; \ +956: \ + FTR_ENTRY_OFFSET 955b-956b; \ + .popsection; + #define RFI_FLUSH_FIXUP_SECTION \ 951: \ .pushsection __rfi_flush_fixup,"a"; \ @@ -199,6 +215,9 @@ label##3: \ #ifndef __ASSEMBLY__ #include +extern long stf_barrier_fallback; +extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; +extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; void apply_feature_fixups(void); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index eca3f9c689070632492b18e9cc9d55d075024dfe..5a740feb7bd76d2f3a57a0807d39fcb3968a7510 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -337,6 +337,9 @@ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 +#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 +#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 +#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h index c6d3078bd8c3b442114326c10345b7484f5a0228..b8b0be8f1a07eec6530fe6a96f88c6dc61dce8d6 100644 --- a/arch/powerpc/include/asm/irq_work.h +++ b/arch/powerpc/include/asm/irq_work.h @@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void) { return true; } +extern void arch_irq_work_raise(void); #endif /* _ASM_POWERPC_IRQ_WORK_H */ diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h index 5bb3dbede41ad3aaf3f07060609b621ed25f7caf..1325e5b5f6801292ff144a9fe898ac7f15fd990b 100644 --- a/arch/powerpc/include/asm/mmu-8xx.h +++ b/arch/powerpc/include/asm/mmu-8xx.h @@ -169,6 +169,12 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; +#ifdef CONFIG_PPC_MM_SLICES + u16 user_psize; /* page size index */ + u64 low_slices_psize; /* page size encodings */ + unsigned char high_slices_psize[0]; + unsigned long addr_limit; +#endif } mm_context_t; #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h new file mode 100644 index 0000000000000000000000000000000000000000..95d532e180922e779cfa212d8f4a5395533484e2 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/slice.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H +#define _ASM_POWERPC_NOHASH_32_SLICE_H + +#ifdef CONFIG_PPC_MM_SLICES + +#define SLICE_LOW_SHIFT 28 +#define SLICE_LOW_TOP (0x100000000ull) +#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) +#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) + +#define SLICE_HIGH_SHIFT 0 +#define SLICE_NUM_HIGH 0ul +#define GET_HIGH_SLICE_INDEX(addr) (addr & 0) + +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h new file mode 100644 index 0000000000000000000000000000000000000000..ad0d6e3cc1c5cbdd5aa1c3899006ba72d5bd0ecb --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/slice.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H +#define _ASM_POWERPC_NOHASH_64_SLICE_H + +#ifdef CONFIG_PPC_64K_PAGES +#define get_slice_psize(mm, addr) MMU_PAGE_64K +#else /* CONFIG_PPC_64K_PAGES */ +#define get_slice_psize(mm, addr) MMU_PAGE_4K +#endif /* !CONFIG_PPC_64K_PAGES */ +#define slice_set_user_psize(mm, psize) do { BUG(); } while (0) + +#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index b8366df50d1956903e6d80d0d2c3a93043f7360e..e6bd59353e40bbf41b12bd3462725d4a94177fda 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -238,8 +238,7 @@ struct paca_struct { */ u64 exrfi[EX_SIZE] __aligned(0x80); void *rfi_flush_fallback_area; - u64 l1d_flush_congruence; - u64 l1d_flush_sets; + u64 l1d_flush_size; #endif }; diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 8da5d4c1cab2b6d52a297117c6e6b27221d941c6..d5f1c41b7dba08f385a1f3565bad04aef137adc7 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -344,5 +344,6 @@ typedef struct page *pgtable_t; #include #endif /* __ASSEMBLY__ */ +#include #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index c4d9654bd637b9f8f477f2438d7a0a75cb306b84..af04acdb873fcc41bd3e45611de2f5b72119cc81 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -86,65 +86,6 @@ extern u64 ppc64_pft_size; #endif /* __ASSEMBLY__ */ -#ifdef CONFIG_PPC_MM_SLICES - -#define SLICE_LOW_SHIFT 28 -#define SLICE_HIGH_SHIFT 40 - -#define SLICE_LOW_TOP (0x100000000ul) -#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT) -#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT) - -#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) -#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) - -#ifndef __ASSEMBLY__ -struct mm_struct; - -extern unsigned long slice_get_unmapped_area(unsigned long addr, - unsigned long len, - unsigned long flags, - unsigned int psize, - int topdown); - -extern unsigned int get_slice_psize(struct mm_struct *mm, - unsigned long addr); - -extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); -extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, - unsigned long len, unsigned int psize); - -#endif /* __ASSEMBLY__ */ -#else -#define slice_init() -#ifdef CONFIG_PPC_STD_MMU_64 -#define get_slice_psize(mm, addr) ((mm)->context.user_psize) -#define slice_set_user_psize(mm, psize) \ -do { \ - (mm)->context.user_psize = (psize); \ - (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ -} while (0) -#else /* CONFIG_PPC_STD_MMU_64 */ -#ifdef CONFIG_PPC_64K_PAGES -#define get_slice_psize(mm, addr) MMU_PAGE_64K -#else /* CONFIG_PPC_64K_PAGES */ -#define get_slice_psize(mm, addr) MMU_PAGE_4K -#endif /* !CONFIG_PPC_64K_PAGES */ -#define slice_set_user_psize(mm, psize) do { BUG(); } while(0) -#endif /* !CONFIG_PPC_STD_MMU_64 */ - -#define slice_set_range_psize(mm, start, len, psize) \ - slice_set_user_psize((mm), (psize)) -#endif /* CONFIG_PPC_MM_SLICES */ - -#ifdef CONFIG_HUGETLB_PAGE - -#ifdef CONFIG_PPC_MM_SLICES -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#endif /* !CONFIG_HUGETLB_PAGE */ - #define VM_DATA_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h new file mode 100644 index 0000000000000000000000000000000000000000..44989b22383c24b92caaf3dbb3d9831c79cd967f --- /dev/null +++ b/arch/powerpc/include/asm/security_features.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Security related feature bit definitions. + * + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ + +#ifndef _ASM_POWERPC_SECURITY_FEATURES_H +#define _ASM_POWERPC_SECURITY_FEATURES_H + + +extern unsigned long powerpc_security_features; +extern bool rfi_flush; + +/* These are bit flags */ +enum stf_barrier_type { + STF_BARRIER_NONE = 0x1, + STF_BARRIER_FALLBACK = 0x2, + STF_BARRIER_EIEIO = 0x4, + STF_BARRIER_SYNC_ORI = 0x8, +}; + +void setup_stf_barrier(void); +void do_stf_barrier_fixups(enum stf_barrier_type types); + +static inline void security_ftr_set(unsigned long feature) +{ + powerpc_security_features |= feature; +} + +static inline void security_ftr_clear(unsigned long feature) +{ + powerpc_security_features &= ~feature; +} + +static inline bool security_ftr_enabled(unsigned long feature) +{ + return !!(powerpc_security_features & feature); +} + + +// Features indicating support for Spectre/Meltdown mitigations + +// The L1-D cache can be flushed with ori r30,r30,0 +#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull + +// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2) +#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull + +// ori r31,r31,0 acts as a speculation barrier +#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull + +// Speculation past bctr is disabled +#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull + +// Entries in L1-D are private to a SMT thread +#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull + +// Indirect branch prediction cache disabled +#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull + + +// Features indicating need for Spectre/Meltdown mitigations + +// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest) +#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull + +// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace) +#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull + +// A speculation barrier should be used for bounds checks (Spectre variant 1) +#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull + +// Firmware configuration indicates user favours security over performance +#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull + + +// Features enabled by default +#define SEC_FTR_DEFAULT \ + (SEC_FTR_L1D_FLUSH_HV | \ + SEC_FTR_L1D_FLUSH_PR | \ + SEC_FTR_BNDS_CHK_SPEC_BAR | \ + SEC_FTR_FAVOUR_SECURITY) + +#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 469b7fdc9be41cd9ab0ceb7f50c2b633c19f01a4..bbcdf929be544264142e60f43e5b4a096db51bfc 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -49,7 +49,7 @@ enum l1d_flush_type { L1D_FLUSH_MTTRIG = 0x8, }; -void __init setup_rfi_flush(enum l1d_flush_type, bool enable); +void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h new file mode 100644 index 0000000000000000000000000000000000000000..172711fadb1c5dd4f52772b1d840f4c679dabb0e --- /dev/null +++ b/arch/powerpc/include/asm/slice.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_SLICE_H +#define _ASM_POWERPC_SLICE_H + +#ifdef CONFIG_PPC_BOOK3S_64 +#include +#elif defined(CONFIG_PPC64) +#include +#elif defined(CONFIG_PPC_MMU_NOHASH) +#include +#endif + +#ifdef CONFIG_PPC_MM_SLICES + +#ifdef CONFIG_HUGETLB_PAGE +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#endif +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#ifndef __ASSEMBLY__ + +struct mm_struct; + +unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long flags, unsigned int psize, + int topdown); + +unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr); + +void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); +void slice_set_range_psize(struct mm_struct *mm, unsigned long start, + unsigned long len, unsigned int psize); +#endif /* __ASSEMBLY__ */ + +#else /* CONFIG_PPC_MM_SLICES */ + +#define slice_set_range_psize(mm, start, len, psize) \ + slice_set_user_psize((mm), (psize)) +#endif /* CONFIG_PPC_MM_SLICES */ + +#endif /* _ASM_POWERPC_SLICE_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 6c6cce937dd81bf75cf850001dd61f673412f1db..1479c61e29c5494c7ae21c945e70f5c0e5ee1b6d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC64) += vdso64/ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 748cdc4bb89ab41488e6d8f49ffc6baeb2998a0f..2e5ea300258a0673453e9739c5382c487719e09b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -239,8 +239,7 @@ int main(void) OFFSET(PACA_IN_NMI, paca_struct, in_nmi); OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); OFFSET(PACA_EXRFI, paca_struct, exrfi); - OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); - OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); + OFFSET(PACA_L1D_FLUSH_SIZE, paca_struct, l1d_flush_size); #endif OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 679bbe714e8561b8c259f37bbfc9db8264555554..9daede99c1316182205fbe0c13ad20af33c1c388 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR li r4,(LPCR_LPES1 >> LPCR_LPES_SH) bl __init_LPCR_ISA206 @@ -42,6 +43,7 @@ _GLOBAL(__restore_cpu_power7) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR li r4,(LPCR_LPES1 >> LPCR_LPES_SH) bl __init_LPCR_ISA206 @@ -59,6 +61,7 @@ _GLOBAL(__setup_cpu_power8) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH li r4,0 /* LPES = 0 */ @@ -81,6 +84,7 @@ _GLOBAL(__restore_cpu_power8) beqlr li r0,0 mtspr SPRN_LPID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR ori r3, r3, LPCR_PECEDH li r4,0 /* LPES = 0 */ @@ -103,6 +107,7 @@ _GLOBAL(__setup_cpu_power9) mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 mtspr SPRN_PID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 @@ -128,6 +133,7 @@ _GLOBAL(__restore_cpu_power9) mtspr SPRN_PSSCR,r0 mtspr SPRN_LPID,r0 mtspr SPRN_PID,r0 + mtspr SPRN_PCR,r0 mfspr r3,SPRN_LPCR LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) or r3, r3, r4 diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index f047ae1b62712ca1b2dca4d704c7d6b366c08bfc..2dba206b065aa0c673822c39efd6ef9428c9907b 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -137,6 +137,7 @@ static void __restore_cpu_cpufeatures(void) if (hv_mode) { mtspr(SPRN_LPID, 0); mtspr(SPRN_HFSCR, system_registers.hfscr); + mtspr(SPRN_PCR, 0); } mtspr(SPRN_FSCR, system_registers.fscr); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f9ca4bb3d48ea14ff1652e7b9cec381a8cd57581..c09f0a6f84954260775a792c9b600d8a3210d2dd 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -825,7 +825,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif -EXC_REAL_MASKABLE(decrementer, 0x900, 0x80) +EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80) EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900) TRAMP_KVM(PACA_EXGEN, 0x900) EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) @@ -901,6 +901,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) mtctr r13; \ GET_PACA(r13); \ std r10,PACA_EXGEN+EX_R10(r13); \ + INTERRUPT_TO_KERNEL; \ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ HMT_MEDIUM; \ mfctr r9; @@ -909,7 +910,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) #define SYSCALL_KVMTEST \ HMT_MEDIUM; \ mr r9,r13; \ - GET_PACA(r13); + GET_PACA(r13); \ + INTERRUPT_TO_KERNEL; #endif #define LOAD_SYSCALL_HANDLER(reg) \ @@ -1434,45 +1436,56 @@ masked_##_H##interrupt: \ b .; \ MASKED_DEC_HANDLER(_H) +TRAMP_REAL_BEGIN(stf_barrier_fallback) + std r9,PACA_EXRFI+EX_R9(r13) + std r10,PACA_EXRFI+EX_R10(r13) + sync + ld r9,PACA_EXRFI+EX_R9(r13) + ld r10,PACA_EXRFI+EX_R10(r13) + ori 31,31,0 + .rept 14 + b 1f +1: + .endr + blr + TRAMP_REAL_BEGIN(rfi_flush_fallback) SET_SCRATCH0(r13); GET_PACA(r13); std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) - std r12,PACA_EXRFI+EX_R12(r13) - std r8,PACA_EXRFI+EX_R13(r13) mfctr r9 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) - ld r11,PACA_L1D_FLUSH_SETS(r13) - ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) - /* - * The load adresses are at staggered offsets within cachelines, - * which suits some pipelines better (on others it should not - * hurt). - */ - addi r12,r12,8 + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync -1: li r8,0 - .rept 8 /* 8-way set associative */ - ldx r11,r10,r8 - add r8,r8,r12 - xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not - add r8,r8,r11 // Add 0, this creates a dependency on the ldx - .endr - addi r10,r10,128 /* 128 byte cache line */ + + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ +1: + ld r11,(0x80 + 8)*0(r10) + ld r11,(0x80 + 8)*1(r10) + ld r11,(0x80 + 8)*2(r10) + ld r11,(0x80 + 8)*3(r10) + ld r11,(0x80 + 8)*4(r10) + ld r11,(0x80 + 8)*5(r10) + ld r11,(0x80 + 8)*6(r10) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 bdnz 1b mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) - ld r12,PACA_EXRFI+EX_R12(r13) - ld r8,PACA_EXRFI+EX_R13(r13) GET_SCRATCH0(r13); rfid @@ -1482,39 +1495,37 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback) std r9,PACA_EXRFI+EX_R9(r13) std r10,PACA_EXRFI+EX_R10(r13) std r11,PACA_EXRFI+EX_R11(r13) - std r12,PACA_EXRFI+EX_R12(r13) - std r8,PACA_EXRFI+EX_R13(r13) mfctr r9 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) - ld r11,PACA_L1D_FLUSH_SETS(r13) - ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) - /* - * The load adresses are at staggered offsets within cachelines, - * which suits some pipelines better (on others it should not - * hurt). - */ - addi r12,r12,8 + ld r11,PACA_L1D_FLUSH_SIZE(r13) + srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ mtctr r11 DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ /* order ld/st prior to dcbt stop all streams with flushing */ sync -1: li r8,0 - .rept 8 /* 8-way set associative */ - ldx r11,r10,r8 - add r8,r8,r12 - xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not - add r8,r8,r11 // Add 0, this creates a dependency on the ldx - .endr - addi r10,r10,128 /* 128 byte cache line */ + + /* + * The load adresses are at staggered offsets within cachelines, + * which suits some pipelines better (on others it should not + * hurt). + */ +1: + ld r11,(0x80 + 8)*0(r10) + ld r11,(0x80 + 8)*1(r10) + ld r11,(0x80 + 8)*2(r10) + ld r11,(0x80 + 8)*3(r10) + ld r11,(0x80 + 8)*4(r10) + ld r11,(0x80 + 8)*5(r10) + ld r11,(0x80 + 8)*6(r10) + ld r11,(0x80 + 8)*7(r10) + addi r10,r10,0x80*8 bdnz 1b mtctr r9 ld r9,PACA_EXRFI+EX_R9(r13) ld r10,PACA_EXRFI+EX_R10(r13) ld r11,PACA_EXRFI+EX_R11(r13) - ld r12,PACA_EXRFI+EX_R12(r13) - ld r8,PACA_EXRFI+EX_R13(r13) GET_SCRATCH0(r13); hrfid diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 1125c9be9e067683c43c583d1672b1b6d3269840..e35cebd45c35e0bfb90075a99a7d8de54f1d24ab 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -838,6 +838,8 @@ BEGIN_FTR_SECTION mtspr SPRN_PTCR,r4 ld r4,_RPR(r1) mtspr SPRN_RPR,r4 + ld r4,_AMOR(r1) + mtspr SPRN_AMOR,r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r4,_TSCR(r1) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c new file mode 100644 index 0000000000000000000000000000000000000000..b98a722da9151bd41351de9448b21bfbc417cd6b --- /dev/null +++ b/arch/powerpc/kernel/security.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Security related flags and so on. +// +// Copyright 2018, Michael Ellerman, IBM Corporation. + +#include +#include +#include + +#include +#include + + +unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; + +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool thread_priv; + + thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); + + if (rfi_flush || thread_priv) { + struct seq_buf s; + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + seq_buf_printf(&s, "Mitigation: "); + + if (rfi_flush) + seq_buf_printf(&s, "RFI Flush"); + + if (rfi_flush && thread_priv) + seq_buf_printf(&s, ", "); + + if (thread_priv) + seq_buf_printf(&s, "L1D private per thread"); + + seq_buf_printf(&s, "\n"); + + return s.len; + } + + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) +{ + bool bcs, ccd, ori; + struct seq_buf s; + + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); + ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); + + if (bcs || ccd) { + seq_buf_printf(&s, "Mitigation: "); + + if (bcs) + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); + + if (bcs && ccd) + seq_buf_printf(&s, ", "); + + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); + } else + seq_buf_printf(&s, "Vulnerable"); + + if (ori) + seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + + seq_buf_printf(&s, "\n"); + + return s.len; +} + +/* + * Store-forwarding barrier support. + */ + +static enum stf_barrier_type stf_enabled_flush_types; +static bool no_stf_barrier; +bool stf_barrier; + +static int __init handle_no_stf_barrier(char *p) +{ + pr_info("stf-barrier: disabled on command line."); + no_stf_barrier = true; + return 0; +} + +early_param("no_stf_barrier", handle_no_stf_barrier); + +/* This is the generic flag used by other architectures */ +static int __init handle_ssbd(char *p) +{ + if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { + /* Until firmware tells us, we have the barrier with auto */ + return 0; + } else if (strncmp(p, "off", 3) == 0) { + handle_no_stf_barrier(NULL); + return 0; + } else + return 1; + + return 0; +} +early_param("spec_store_bypass_disable", handle_ssbd); + +/* This is the generic flag used by other architectures */ +static int __init handle_no_ssbd(char *p) +{ + handle_no_stf_barrier(NULL); + return 0; +} +early_param("nospec_store_bypass_disable", handle_no_ssbd); + +static void stf_barrier_enable(bool enable) +{ + if (enable) + do_stf_barrier_fixups(stf_enabled_flush_types); + else + do_stf_barrier_fixups(STF_BARRIER_NONE); + + stf_barrier = enable; +} + +void setup_stf_barrier(void) +{ + enum stf_barrier_type type; + bool enable, hv; + + hv = cpu_has_feature(CPU_FTR_HVMODE); + + /* Default to fallback in case fw-features are not available */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + type = STF_BARRIER_EIEIO; + else if (cpu_has_feature(CPU_FTR_ARCH_207S)) + type = STF_BARRIER_SYNC_ORI; + else if (cpu_has_feature(CPU_FTR_ARCH_206)) + type = STF_BARRIER_FALLBACK; + else + type = STF_BARRIER_NONE; + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); + + if (type == STF_BARRIER_FALLBACK) { + pr_info("stf-barrier: fallback barrier available\n"); + } else if (type == STF_BARRIER_SYNC_ORI) { + pr_info("stf-barrier: hwsync barrier available\n"); + } else if (type == STF_BARRIER_EIEIO) { + pr_info("stf-barrier: eieio barrier available\n"); + } + + stf_enabled_flush_types = type; + + if (!no_stf_barrier) + stf_barrier_enable(enable); +} + +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) +{ + if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { + const char *type; + switch (stf_enabled_flush_types) { + case STF_BARRIER_EIEIO: + type = "eieio"; + break; + case STF_BARRIER_SYNC_ORI: + type = "hwsync"; + break; + case STF_BARRIER_FALLBACK: + type = "fallback"; + break; + default: + type = "unknown"; + } + return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); + } + + if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && + !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) + return sprintf(buf, "Not affected\n"); + + return sprintf(buf, "Vulnerable\n"); +} + +#ifdef CONFIG_DEBUG_FS +static int stf_barrier_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + /* Only do anything if we're changing state */ + if (enable != stf_barrier) + stf_barrier_enable(enable); + + return 0; +} + +static int stf_barrier_get(void *data, u64 *val) +{ + *val = stf_barrier ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); + +static __init int stf_barrier_debugfs_init(void) +{ + debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); + return 0; +} +device_initcall(stf_barrier_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 90bc20efb4c7b7bc20d840eed1505e015cdca210..0084476646430ea0fda4c19edb3189d7d9f499be 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned short maj; unsigned short min; - /* We only show online cpus: disable preempt (overzealous, I - * knew) to prevent cpu going down. */ - preempt_disable(); - if (!cpu_online(cpu_id)) { - preempt_enable(); - return 0; - } - #ifdef CONFIG_SMP pvr = per_cpu(cpu_pvr, cpu_id); #else @@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "\n"); #endif - - preempt_enable(); - /* If this is the last cpu, print the summary */ if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) show_cpuinfo_summary(m); @@ -926,6 +915,8 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_PPC_MM_SLICES #ifdef CONFIG_PPC64 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; +#elif defined(CONFIG_PPC_8xx) + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW; #else #error "context.addr_limit not initialized." #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 9527a4c6cbc27caa2f03c95c686512f9abe71fa1..0618aa61b26a4e91b0da643254bd97fd67c389b2 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -822,9 +822,6 @@ static void do_nothing(void *unused) void rfi_flush_enable(bool enable) { - if (rfi_flush == enable) - return; - if (enable) { do_rfi_flush_fixups(enabled_flush_types); on_each_cpu(do_nothing, NULL, 1); @@ -834,11 +831,15 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } -static void init_fallback_flush(void) +static void __ref init_fallback_flush(void) { u64 l1d_size, limit; int cpu; + /* Only allocate the fallback flush area once (at boot time). */ + if (l1d_flush_fallback_area) + return; + l1d_size = ppc64_caches.l1d.size; limit = min(safe_stack_limit(), ppc64_rma_size); @@ -851,34 +852,23 @@ static void init_fallback_flush(void) memset(l1d_flush_fallback_area, 0, l1d_size * 2); for_each_possible_cpu(cpu) { - /* - * The fallback flush is currently coded for 8-way - * associativity. Different associativity is possible, but it - * will be treated as 8-way and may not evict the lines as - * effectively. - * - * 128 byte lines are mandatory. - */ - u64 c = l1d_size / 8; - paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; - paca[cpu].l1d_flush_congruence = c; - paca[cpu].l1d_flush_sets = c / 128; + paca[cpu].l1d_flush_size = l1d_size; } } -void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) +void setup_rfi_flush(enum l1d_flush_type types, bool enable) { if (types & L1D_FLUSH_FALLBACK) { - pr_info("rfi-flush: Using fallback displacement flush\n"); + pr_info("rfi-flush: fallback displacement flush available\n"); init_fallback_flush(); } if (types & L1D_FLUSH_ORI) - pr_info("rfi-flush: Using ori type flush\n"); + pr_info("rfi-flush: ori type flush available\n"); if (types & L1D_FLUSH_MTTRIG) - pr_info("rfi-flush: Using mttrig type flush\n"); + pr_info("rfi-flush: mttrig type flush available\n"); enabled_flush_types = types; @@ -889,13 +879,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) #ifdef CONFIG_DEBUG_FS static int rfi_flush_set(void *data, u64 val) { + bool enable; + if (val == 1) - rfi_flush_enable(true); + enable = true; else if (val == 0) - rfi_flush_enable(false); + enable = false; else return -EINVAL; + /* Only do anything if we're changing state */ + if (enable != rfi_flush) + rfi_flush_enable(enable); + return 0; } @@ -914,12 +910,4 @@ static __init int rfi_flush_debugfs_init(void) } device_initcall(rfi_flush_debugfs_init); #endif - -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) -{ - if (rfi_flush) - return sprintf(buf, "Mitigation: RFI Flush\n"); - - return sprintf(buf, "Vulnerable\n"); -} #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d17007451f625a90c95f32d9835e82ae2d21be68..ac2e5e56a9f0c36a6fc9cae7182e9b8cedfe760e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -182,6 +182,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, } raw_local_irq_restore(flags); + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) == 0x100) + return; + crash_fadump(regs, "die oops"); if (kexec_should_crash(current)) @@ -246,8 +252,13 @@ void die(const char *str, struct pt_regs *regs, long err) { unsigned long flags; - if (debugger(regs)) - return; + /* + * system_reset_excption handles debugger, crash dump, panic, for 0x100 + */ + if (TRAP(regs) != 0x100) { + if (debugger(regs)) + return; + } flags = oops_begin(regs); if (__die(str, regs, err)) @@ -1379,6 +1390,22 @@ void facility_unavailable_exception(struct pt_regs *regs) value = mfspr(SPRN_FSCR); status = value >> 56; + if ((hv || status >= 2) && + (status < ARRAY_SIZE(facility_strings)) && + facility_strings[status]) + facility = facility_strings[status]; + + /* We should not have taken this interrupt in kernel */ + if (!user_mode(regs)) { + pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", + facility, status, regs->nip); + die("Unexpected facility unavailable exception", regs, SIGABRT); + } + + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + if (status == FSCR_DSCR_LG) { /* * User is accessing the DSCR register using the problem @@ -1445,25 +1472,11 @@ void facility_unavailable_exception(struct pt_regs *regs) return; } - if ((hv || status >= 2) && - (status < ARRAY_SIZE(facility_strings)) && - facility_strings[status]) - facility = facility_strings[status]; - - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); - pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); out: - if (user_mode(regs)) { - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; - } - - die("Unexpected facility unavailable exception", regs, SIGABRT); + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } #endif diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 307843d23682a79f0e4d9ae0cdb86a219e0b6e6a..c89ffb88fa3ba26ead303b8fffc594bb8ceecb08 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -133,6 +133,20 @@ SECTIONS RO_DATA(PAGE_SIZE) #ifdef CONFIG_PPC64 + . = ALIGN(8); + __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { + __start___stf_entry_barrier_fixup = .; + *(__stf_entry_barrier_fixup) + __stop___stf_entry_barrier_fixup = .; + } + + . = ALIGN(8); + __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { + __start___stf_exit_barrier_fixup = .; + *(__stf_exit_barrier_fixup) + __stop___stf_exit_barrier_fixup = .; + } + . = ALIGN(8); __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { __start___rfi_flush_fixup = .; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index d0c0b8443dcfc0e6e56a347a449d65db9640193b..762a899e85a45aa21b865386fd140fd47a4d747c 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -23,6 +23,7 @@ #include #include #include +#include #include struct fixup_entry { @@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } #ifdef CONFIG_PPC_BOOK3S_64 +void do_stf_entry_barrier_fixups(enum stf_barrier_type types) +{ + unsigned int instrs[3], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___stf_entry_barrier_fixup), + end = PTRRELOC(&__stop___stf_entry_barrier_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + + i = 0; + if (types & STF_BARRIER_FALLBACK) { + instrs[i++] = 0x7d4802a6; /* mflr r10 */ + instrs[i++] = 0x60000000; /* branch patched below */ + instrs[i++] = 0x7d4803a6; /* mtlr r10 */ + } else if (types & STF_BARRIER_EIEIO) { + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ + } else if (types & STF_BARRIER_SYNC_ORI) { + instrs[i++] = 0x7c0004ac; /* hwsync */ + instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */ + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + + if (types & STF_BARRIER_FALLBACK) + patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback, + BRANCH_SET_LINK); + else + patch_instruction(dest + 1, instrs[1]); + + patch_instruction(dest + 2, instrs[2]); + } + + printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i, + (types == STF_BARRIER_NONE) ? "no" : + (types == STF_BARRIER_FALLBACK) ? "fallback" : + (types == STF_BARRIER_EIEIO) ? "eieio" : + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" + : "unknown"); +} + +void do_stf_exit_barrier_fixups(enum stf_barrier_type types) +{ + unsigned int instrs[6], *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___stf_exit_barrier_fixup), + end = PTRRELOC(&__stop___stf_exit_barrier_fixup); + + instrs[0] = 0x60000000; /* nop */ + instrs[1] = 0x60000000; /* nop */ + instrs[2] = 0x60000000; /* nop */ + instrs[3] = 0x60000000; /* nop */ + instrs[4] = 0x60000000; /* nop */ + instrs[5] = 0x60000000; /* nop */ + + i = 0; + if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) { + if (cpu_has_feature(CPU_FTR_HVMODE)) { + instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */ + instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */ + } else { + instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */ + instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */ + } + instrs[i++] = 0x7c0004ac; /* hwsync */ + instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */ + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */ + } else { + instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */ + } + } else if (types & STF_BARRIER_EIEIO) { + instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + + patch_instruction(dest, instrs[0]); + patch_instruction(dest + 1, instrs[1]); + patch_instruction(dest + 2, instrs[2]); + patch_instruction(dest + 3, instrs[3]); + patch_instruction(dest + 4, instrs[4]); + patch_instruction(dest + 5, instrs[5]); + } + printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i, + (types == STF_BARRIER_NONE) ? "no" : + (types == STF_BARRIER_FALLBACK) ? "fallback" : + (types == STF_BARRIER_EIEIO) ? "eieio" : + (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync" + : "unknown"); +} + + +void do_stf_barrier_fixups(enum stf_barrier_type types) +{ + do_stf_entry_barrier_fixups(types); + do_stf_exit_barrier_fixups(types); +} + void do_rfi_flush_fixups(enum l1d_flush_type types) { unsigned int instrs[3], *dest; @@ -153,7 +268,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) patch_instruction(dest + 2, instrs[2]); } - printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); + printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i, + (types == L1D_FLUSH_NONE) ? "no" : + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) + ? "ori+mttrig type" + : "ori type" : + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" + : "unknown"); } #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index f29212e40f40928e9d4b7e5c5a28bc294e8d0051..0be77709446cc9320b979a52a18de6875f03f092 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd) mtspr(SPRN_M_TW, __pa(pgd) - offset); /* Update context */ - mtspr(SPRN_M_CASID, id); + mtspr(SPRN_M_CASID, id - 1); /* sync */ mb(); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1571a498a33fc6a688713f4f275b7b55cedabace..4c9e5f9c7a44d931057e9ea7ef259ba644a42762 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -552,9 +552,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, struct hstate *hstate = hstate_file(file); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); +#ifdef CONFIG_PPC_RADIX_MMU if (radix_enabled()) return radix__hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); +#endif return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); } #endif diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 4554d65276826e9719e7b69529003f7873483582..e2b28b3a512e378cfe8de496bccbf67ddd2bd509 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) { pr_hard("initing context for mm @%p\n", mm); +#ifdef CONFIG_PPC_MM_SLICES + if (!mm->context.addr_limit) + mm->context.addr_limit = DEFAULT_MAP_WINDOW; + + /* + * We have MMU_NO_CONTEXT set to be ~0. Hence check + * explicitly against context.id == 0. This ensures that we properly + * initialize context slice details for newly allocated mm's (which will + * have id == 0) and don't alter context slice inherited via fork (which + * will have id != 0). + */ + if (mm->context.id == 0) + slice_set_user_psize(mm, mmu_virtual_psize); +#endif mm->context.id = MMU_NO_CONTEXT; mm->context.active = 0; return 0; @@ -428,8 +442,8 @@ void __init mmu_context_init(void) * -- BenH */ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { - first_context = 0; - last_context = 15; + first_context = 1; + last_context = 16; no_selective_tlbil = true; } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { first_context = 1; diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index a4f93699194b6ab7c71ca00374efa94d35d816ef..8baaa6c6f21ce2dd00026496ca648df735b69665 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, unsigned long end = start + len - 1; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); if (start < SLICE_LOW_TOP) { - unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); + unsigned long mend = min(end, + (unsigned long)(SLICE_LOW_TOP - 1)); ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) - (1u << GET_LOW_SLICE_INDEX(start)); @@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) unsigned long start = slice << SLICE_HIGH_SHIFT; unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); +#ifdef CONFIG_PPC64 /* Hack, so that each addresses is controlled by exactly one * of the high or low area bitmaps, the first high area starts * at 4GB, not 0 */ if (start == 0) start = SLICE_LOW_TOP; +#endif return !slice_area_is_free(mm, start, end - start); } @@ -127,7 +131,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) unsigned long i; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); for (i = 0; i < SLICE_NUM_LOW; i++) if (!slice_low_has_vma(mm, i)) @@ -149,7 +154,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma u64 lpsizes; ret->low_slices = 0; - bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); + if (SLICE_NUM_HIGH) + bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); lpsizes = mm->context.low_slices_psize; for (i = 0; i < SLICE_NUM_LOW; i++) @@ -171,6 +177,10 @@ static int slice_check_fit(struct mm_struct *mm, DECLARE_BITMAP(result, SLICE_NUM_HIGH); unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); + if (!SLICE_NUM_HIGH) + return (mask.low_slices & available.low_slices) == + mask.low_slices; + bitmap_and(result, mask.high_slices, available.high_slices, slice_count); @@ -180,6 +190,7 @@ static int slice_check_fit(struct mm_struct *mm, static void slice_flush_segments(void *parm) { +#ifdef CONFIG_PPC64 struct mm_struct *mm = parm; unsigned long flags; @@ -191,6 +202,7 @@ static void slice_flush_segments(void *parm) local_irq_save(flags); slb_flush_and_rebolt(); local_irq_restore(flags); +#endif } static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) @@ -379,21 +391,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices |= src->low_slices; - bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_or(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) { - DECLARE_BITMAP(result, SLICE_NUM_HIGH); - dst->low_slices &= ~src->low_slices; - bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); - bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); + if (!SLICE_NUM_HIGH) + return; + bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices, + SLICE_NUM_HIGH); } #ifdef CONFIG_PPC_64K_PAGES @@ -441,14 +453,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, * init different masks */ mask.low_slices = 0; - bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); /* silence stupid warning */; potential_mask.low_slices = 0; - bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); compat_mask.low_slices = 0; - bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + + if (SLICE_NUM_HIGH) { + bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); + bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); + } /* Sanity checks */ BUG_ON(mm->task_size == 0); @@ -586,7 +601,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, convert: slice_andnot_mask(&mask, &good_mask); slice_andnot_mask(&mask, &compat_mask); - if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) { + if (mask.low_slices || + (SLICE_NUM_HIGH && + !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) { slice_convert(mm, mask, psize); if (psize > MMU_PAGE_BASE) on_each_cpu(slice_flush_segments, mm, 1); diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index f9941b3b5770fca641d2f4a8cf70dbb727fe8b70..f760494ecd66d6fd9eb33a478e0534bd4fc355cd 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -329,6 +329,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; + case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ + PPC_LWZ_OFFS(r_A, r_skb, K); + break; case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); break; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index fce545774d50afc6093c28ad2f4127c24ed5331c..b7a6044161e8698e82109e52f9f3fed7b310bdd9 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) /* invalid entry */ continue; + /* + * BHRB rolling buffer could very much contain the kernel + * addresses at this point. Check the privileges before + * exporting it to userspace (avoid exposure of regions + * where we could have speculative execution) + */ + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && + is_kernel_addr(addr)) + continue; + /* Branches are read most recent first (ie. mfbhrb 0 is * the most recent branch). * There are two types of valid entries: @@ -1226,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu) */ write_mmcr0(cpuhw, val); mb(); + isync(); /* * Disable instruction sampling if it was enabled @@ -1234,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu) mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); + isync(); } cpuhw->disabled = 1; cpuhw->n_added = 0; ebb_switch_out(mmcr0); + +#ifdef CONFIG_PPC64 + /* + * These are readable by userspace, may contain kernel + * addresses and are not switched by context switch, so clear + * them now to avoid leaking anything to userspace in general + * including to another process. + */ + if (ppmu->flags & PPMU_ARCH_207S) { + mtspr(SPRN_SDAR, 0); + mtspr(SPRN_SIAR, 0); + } +#endif } local_irq_restore(flags); diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index a78f255111f21469492bf663ab8b935fa9a53cb2..3ce376b42330b07c5c619012450aa2db4d5ab51a 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -325,6 +325,7 @@ config PPC_BOOK3E_MMU config PPC_MM_SLICES bool default y if PPC_STD_MMU_64 + default y if PPC_8xx && HUGETLB_PAGE default n config PPC_HAVE_PMU_SUPPORT diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4043109f4051eb5b5a90f46213ea7c5d4c68bd78..63f007f2de7eb295b9bb78821e2dca596a96e56f 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -413,6 +413,11 @@ struct npu_context { void *priv; }; +struct mmio_atsd_reg { + struct npu *npu; + int reg; +}; + /* * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC * if none are available. @@ -422,7 +427,7 @@ static int get_mmio_atsd_reg(struct npu *npu) int i; for (i = 0; i < npu->mmio_atsd_count; i++) { - if (!test_and_set_bit(i, &npu->mmio_atsd_usage)) + if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage)) return i; } @@ -431,86 +436,90 @@ static int get_mmio_atsd_reg(struct npu *npu) static void put_mmio_atsd_reg(struct npu *npu, int reg) { - clear_bit(reg, &npu->mmio_atsd_usage); + clear_bit_unlock(reg, &npu->mmio_atsd_usage); } /* MMIO ATSD register offsets */ #define XTS_ATSD_AVA 1 #define XTS_ATSD_STAT 2 -static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, - unsigned long va) +static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, + unsigned long launch, unsigned long va) { - int mmio_atsd_reg; - - do { - mmio_atsd_reg = get_mmio_atsd_reg(npu); - cpu_relax(); - } while (mmio_atsd_reg < 0); + struct npu *npu = mmio_atsd_reg->npu; + int reg = mmio_atsd_reg->reg; __raw_writeq(cpu_to_be64(va), - npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA); + npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); eieio(); - __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]); - - return mmio_atsd_reg; + __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]); } -static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) +static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate matching PID */ - launch = PPC_BIT(12); + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate matching PID */ + launch = PPC_BIT(12); - /* PRS set to process-scoped */ - launch |= PPC_BIT(13); + /* PRS set to process-scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - /* Invalidating the entire process doesn't use a va */ - return mmio_launch_invalidate(npu, launch, 0); + /* Invalidating the entire process doesn't use a va */ + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0); + } } -static int mmio_invalidate_va(struct npu *npu, unsigned long va, - unsigned long pid, bool flush) +static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long va, unsigned long pid, bool flush) { + int i; unsigned long launch; - /* IS set to invalidate target VA */ - launch = 0; + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* IS set to invalidate target VA */ + launch = 0; - /* PRS set to process scoped */ - launch |= PPC_BIT(13); + /* PRS set to process scoped */ + launch |= PPC_BIT(13); - /* AP */ - launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* AP */ + launch |= (u64) + mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); - return mmio_launch_invalidate(npu, launch, va); + mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va); + } } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) -struct mmio_atsd_reg { - struct npu *npu; - int reg; -}; - static void mmio_invalidate_wait( - struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) { struct npu *npu; int i, reg; @@ -525,16 +534,67 @@ static void mmio_invalidate_wait( reg = mmio_atsd_reg[i].reg; while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) cpu_relax(); + } +} + +/* + * Acquires all the address translation shootdown (ATSD) registers required to + * launch an ATSD on all links this npu_context is active on. + */ +static void acquire_atsd_reg(struct npu_context *npu_context, + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i, j; + struct npu *npu; + struct pci_dev *npdev; + struct pnv_phb *nphb; - put_mmio_atsd_reg(npu, reg); + for (i = 0; i <= max_npu2_index; i++) { + mmio_atsd_reg[i].reg = -1; + for (j = 0; j < NV_MAX_LINKS; j++) { + /* + * There are no ordering requirements with respect to + * the setup of struct npu_context, but to ensure + * consistent behaviour we need to ensure npdev[][] is + * only read once. + */ + npdev = READ_ONCE(npu_context->npdev[i][j]); + if (!npdev) + continue; + nphb = pci_bus_to_host(npdev->bus)->private_data; + npu = &nphb->npu; + mmio_atsd_reg[i].npu = npu; + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + while (mmio_atsd_reg[i].reg < 0) { + mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu); + cpu_relax(); + } + break; + } + } +} + +/* + * Release previously acquired ATSD registers. To avoid deadlocks the registers + * must be released in the same order they were acquired above in + * acquire_atsd_reg. + */ +static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) +{ + int i; + + for (i = 0; i <= max_npu2_index; i++) { /* - * The GPU requires two flush ATSDs to ensure all entries have - * been flushed. We use PID 0 as it will never be used for a - * process on the GPU. + * We can't rely on npu_context->npdev[][] being the same here + * as when acquire_atsd_reg() was called, hence we use the + * values stored in mmio_atsd_reg during the acquire phase + * rather than re-reading npdev[][]. */ - if (flush) - mmio_invalidate_pid(npu, 0, true); + if (mmio_atsd_reg[i].reg < 0) + continue; + + put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg); } } @@ -545,10 +605,6 @@ static void mmio_invalidate_wait( static void mmio_invalidate(struct npu_context *npu_context, int va, unsigned long address, bool flush) { - int i, j; - struct npu *npu; - struct pnv_phb *nphb; - struct pci_dev *npdev; struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; @@ -562,37 +618,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, * Loop over all the NPUs this process is active on and launch * an invalidate. */ - for (i = 0; i <= max_npu2_index; i++) { - mmio_atsd_reg[i].reg = -1; - for (j = 0; j < NV_MAX_LINKS; j++) { - npdev = npu_context->npdev[i][j]; - if (!npdev) - continue; - - nphb = pci_bus_to_host(npdev->bus)->private_data; - npu = &nphb->npu; - mmio_atsd_reg[i].npu = npu; - - if (va) - mmio_atsd_reg[i].reg = - mmio_invalidate_va(npu, address, pid, - flush); - else - mmio_atsd_reg[i].reg = - mmio_invalidate_pid(npu, pid, flush); - - /* - * The NPU hardware forwards the shootdown to all GPUs - * so we only have to launch one shootdown per NPU. - */ - break; - } + acquire_atsd_reg(npu_context, mmio_atsd_reg); + if (va) + mmio_invalidate_va(mmio_atsd_reg, address, pid, flush); + else + mmio_invalidate_pid(mmio_atsd_reg, pid, flush); + + mmio_invalidate_wait(mmio_atsd_reg); + if (flush) { + /* + * The GPU requires two flush ATSDs to ensure all entries have + * been flushed. We use PID 0 as it will never be used for a + * process on the GPU. + */ + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); + mmio_invalidate_pid(mmio_atsd_reg, 0, true); + mmio_invalidate_wait(mmio_atsd_reg); } - - mmio_invalidate_wait(mmio_atsd_reg, flush); - if (flush) - /* Wait for the flush to complete */ - mmio_invalidate_wait(mmio_atsd_reg, false); + release_atsd_reg(mmio_atsd_reg); } static void pnv_npu2_mn_release(struct mmu_notifier *mn, @@ -735,7 +779,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return ERR_PTR(-ENODEV); - npu_context->npdev[npu->index][nvlink_index] = npdev; + + /* + * npdev is a pci_dev pointer setup by the PCI code. We assign it to + * npdev[][] to indicate to the mmu notifiers that an invalidation + * should also be sent over this nvlink. The notifiers don't use any + * other fields in npu_context, so we just need to ensure that when they + * deference npu_context->npdev[][] it is either a valid pointer or + * NULL. + */ + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev); return npu_context; } @@ -774,7 +827,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", &nvlink_index))) return; - npu_context->npdev[npu->index][nvlink_index] = NULL; + WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); kref_put(&npu_context->kref, pnv_npu2_release_context); diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 1bceb95f422d0f828017128580695c0d4c87ba47..5584247f502929de6e13df0ca5127a1ab92cdbe8 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c @@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index) return count; } +/* + * This can be called in the panic path with interrupts off, so use + * mdelay in that case. + */ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) { s64 rc = OPAL_BUSY; @@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_write_nvram(__pa(buf), count, off); if (rc == OPAL_BUSY_EVENT) { - msleep(OPAL_BUSY_DELAY_MS); + if (in_interrupt() || irqs_disabled()) + mdelay(OPAL_BUSY_DELAY_MS); + else + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); } else if (rc == OPAL_BUSY) { - msleep(OPAL_BUSY_DELAY_MS); + if (in_interrupt() || irqs_disabled()) + mdelay(OPAL_BUSY_DELAY_MS); + else + msleep(OPAL_BUSY_DELAY_MS); } } diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 7966a314d93abe3df7324899e47ef287d242a888..fd143c934768227c0aaca030268f75ed84aebc74 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -37,53 +37,92 @@ #include #include #include +#include #include "powernv.h" + +static bool fw_feature_is(const char *state, const char *name, + struct device_node *fw_features) +{ + struct device_node *np; + bool rc = false; + + np = of_get_child_by_name(fw_features, name); + if (np) { + rc = of_property_read_bool(np, state); + of_node_put(np); + } + + return rc; +} + +static void init_fw_feat_flags(struct device_node *np) +{ + if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (fw_feature_is("enabled", "fw-l1d-thread-split", np)) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (fw_feature_is("disabled", "speculation-policy-favor-security", np)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); + + if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + static void pnv_setup_rfi_flush(void) { struct device_node *np, *fw_features; enum l1d_flush_type type; - int enable; + bool enable; /* Default to fallback in case fw-features are not available */ type = L1D_FLUSH_FALLBACK; - enable = 1; np = of_find_node_by_name(NULL, "ibm,opal"); fw_features = of_get_child_by_name(np, "fw-features"); of_node_put(np); if (fw_features) { - np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); - if (np && of_property_read_bool(np, "enabled")) - type = L1D_FLUSH_MTTRIG; + init_fw_feat_flags(fw_features); + of_node_put(fw_features); - of_node_put(np); + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) + type = L1D_FLUSH_MTTRIG; - np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); - if (np && of_property_read_bool(np, "enabled")) + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) type = L1D_FLUSH_ORI; - - of_node_put(np); - - /* Enable unless firmware says NOT to */ - enable = 2; - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); - if (np && of_property_read_bool(np, "disabled")) - enable--; - - of_node_put(np); - - np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); - if (np && of_property_read_bool(np, "disabled")) - enable--; - - of_node_put(np); - of_node_put(fw_features); } - setup_rfi_flush(type, enable > 0); + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); + + setup_rfi_flush(type, enable); } static void __init pnv_setup_arch(void) @@ -91,6 +130,7 @@ static void __init pnv_setup_arch(void) set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); pnv_setup_rfi_flush(); + setup_stf_barrier(); /* Initialize SMP */ pnv_smp_init(); diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index f7042ad492bafba5ac21e3db9ce24977fd527169..fbea7db043faa0ececbc5e589032031412bc5654 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -348,6 +348,9 @@ void post_mobility_fixup(void) printk(KERN_ERR "Post-mobility device tree update " "failed: %d\n", rc); + /* Possibly switch to a new RFI flush type */ + pseries_setup_rfi_flush(); + return; } diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 1ae1d9f4dbe99935130971cdc390d8f581ad788e..27cdcb69fd18617301b9f19bf31a1f022c007650 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -100,4 +100,6 @@ static inline unsigned long cmo_get_page_size(void) int dlpar_workqueue_init(void); +void pseries_setup_rfi_flush(void); + #endif /* _PSERIES_PSERIES_H */ diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ae4f596273b51a836e5d27307f81bfe6438590bf..45f814041448c56100debd01be0d7f909bff7e11 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "pseries.h" @@ -459,35 +460,78 @@ static void __init find_and_init_phbs(void) of_pci_check_probe_only(); } -static void pseries_setup_rfi_flush(void) +static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) +{ + /* + * The features below are disabled by default, so we instead look to see + * if firmware has *enabled* them, and set them if so. + */ + if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31) + security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); + + if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED) + security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30) + security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); + + if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2) + security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); + + if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV) + security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); + + if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) + security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); + + /* + * The features below are enabled by default, so we instead look to see + * if firmware has *disabled* them, and clear them if so. + */ + if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) + security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); + + if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) + security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) + security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); +} + +void pseries_setup_rfi_flush(void) { struct h_cpu_char_result result; enum l1d_flush_type types; bool enable; long rc; - /* Enable by default */ - enable = true; + /* + * Set features to the defaults assumed by init_cpu_char_feature_flags() + * so it can set/clear again any features that might have changed after + * migration, and in case the hypercall fails and it is not even called. + */ + powerpc_security_features = SEC_FTR_DEFAULT; rc = plpar_get_cpu_characteristics(&result); - if (rc == H_SUCCESS) { - types = L1D_FLUSH_NONE; + if (rc == H_SUCCESS) + init_cpu_char_feature_flags(&result); - if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) - types |= L1D_FLUSH_MTTRIG; - if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) - types |= L1D_FLUSH_ORI; + /* + * We're the guest so this doesn't apply to us, clear it to simplify + * handling of it elsewhere. + */ + security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); - /* Use fallback if nothing set in hcall */ - if (types == L1D_FLUSH_NONE) - types = L1D_FLUSH_FALLBACK; + types = L1D_FLUSH_FALLBACK; - if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) - enable = false; - } else { - /* Default to fallback if case hcall is not available */ - types = L1D_FLUSH_FALLBACK; - } + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) + types |= L1D_FLUSH_MTTRIG; + + if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) + types |= L1D_FLUSH_ORI; + + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ + security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR); setup_rfi_flush(types, enable); } @@ -510,6 +554,7 @@ static void __init pSeries_setup_arch(void) fwnmi_init(); pseries_setup_rfi_flush(); + setup_stf_barrier(); /* By default, only probe PCI (can be overridden by rtas_pci) */ pci_add_flags(PCI_PROBE_ONLY); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index ead3e2549ebfe2b891e54e37e8ef3e3f77e963d0..205dec18d6b538671f78130fdc631888fbf59e5f 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask) int i; u32 mask = 0; - for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) + for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1) mask |= (cpumask & 1) << get_hard_smp_processor_id(i); return mask; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 2c8b325591cc256fedf59701e32cc90bbb498432..a5938fadd031ebe40635f4876ea3729c769d0d2a 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2348,6 +2348,8 @@ static void dump_one_paca(int cpu) DUMP(p, slb_cache_ptr, "x"); for (i = 0; i < SLB_CACHE_ENTRIES; i++) printf(" slb_cache[%d]: = 0x%016lx\n", i, p->slb_cache[i]); + + DUMP(p, rfi_flush_fallback_area, "px"); #endif DUMP(p, dscr_default, "llx"); #ifdef CONFIG_PPC_BOOK3E diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S index e8077f0971f89f1695d5b5637d9c89126cf1e9cc..2bf01ba44107cd678e1fd38b31e5be18080f80b0 100644 --- a/arch/s390/crypto/crc32be-vx.S +++ b/arch/s390/crypto/crc32be-vx.S @@ -13,6 +13,7 @@ */ #include +#include #include /* Vector register range containing CRC-32 constants */ @@ -67,6 +68,8 @@ .previous + GEN_BR_THUNK %r14 + .text /* * The CRC-32 function(s) use these calling conventions: @@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16) .Ldone: VLGVF %r2,%v2,3 - br %r14 + BR_EX %r14 .previous diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S index d8c67a58c0c53b620c4a8f1837ce9bf3c0db5207..7d6f568bd3ad1fe19586e7597ae127b519c7709f 100644 --- a/arch/s390/crypto/crc32le-vx.S +++ b/arch/s390/crypto/crc32le-vx.S @@ -14,6 +14,7 @@ */ #include +#include #include /* Vector register range containing CRC-32 constants */ @@ -76,6 +77,7 @@ .previous + GEN_BR_THUNK %r14 .text @@ -264,6 +266,6 @@ crc32_le_vgfm_generic: .Ldone: VLGVF %r2,%v2,2 - br %r14 + BR_EX %r14 .previous diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h new file mode 100644 index 0000000000000000000000000000000000000000..955d620db23edf04d1c8c946c22a4ccc219e9f74 --- /dev/null +++ b/arch/s390/include/asm/alternative-asm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_ALTERNATIVE_ASM_H +#define _ASM_S390_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Check the length of an instruction sequence. The length may not be larger + * than 254 bytes and it has to be divisible by 2. + */ +.macro alt_len_check start,end + .if ( \end - \start ) > 254 + .error "cpu alternatives does not support instructions blocks > 254 bytes\n" + .endif + .if ( \end - \start ) % 2 + .error "cpu alternatives instructions length is odd\n" + .endif +.endm + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature + .long \orig_start - . + .long \alt_start - . + .word \feature + .byte \orig_end - \orig_start + .byte \alt_end - \alt_start +.endm + +/* + * Fill up @bytes with nops. The macro emits 6-byte nop instructions + * for the bulk of the area, possibly followed by a 4-byte and/or + * a 2-byte nop if the size of the area is not divisible by 6. + */ +.macro alt_pad_fill bytes + .fill ( \bytes ) / 6, 6, 0xc0040000 + .fill ( \bytes ) % 6 / 4, 4, 0x47000000 + .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700 +.endm + +/* + * Fill up @bytes with nops. If the number of bytes is larger + * than 6, emit a jg instruction to branch over all nops, then + * fill an area of size (@bytes - 6) with nop instructions. + */ +.macro alt_pad bytes + .if ( \bytes > 0 ) + .if ( \bytes > 6 ) + jg . + \bytes + alt_pad_fill \bytes - 6 + .else + alt_pad_fill \bytes + .endif + .endif +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature + .pushsection .altinstr_replacement,"ax" +770: \newinstr +771: .popsection +772: \oldinstr +773: alt_len_check 770b, 771b + alt_len_check 772b, 773b + alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) +774: .pushsection .altinstructions,"a" + alt_entry 772b, 774b, 770b, 771b, \feature + .popsection +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".skip" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 + .pushsection .altinstr_replacement,"ax" +770: \newinstr1 +771: \newinstr2 +772: .popsection +773: \oldinstr +774: alt_len_check 770b, 771b + alt_len_check 771b, 772b + alt_len_check 773b, 774b + .if ( 771b - 770b > 772b - 771b ) + alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) + .else + alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) + .endif +775: .pushsection .altinstructions,"a" + alt_entry 773b, 775b, 770b, 771b,\feature1 + alt_entry 773b, 775b, 771b, 772b,\feature2 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_ALTERNATIVE_ASM_H */ diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h new file mode 100644 index 0000000000000000000000000000000000000000..9a56e738d645a4f4e13466644d0ffe225fc8e44a --- /dev/null +++ b/arch/s390/include/asm/nospec-insn.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_NOSPEC_ASM_H +#define _ASM_S390_NOSPEC_ASM_H + +#include +#include + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_EXPOLINE + +_LC_BR_R1 = __LC_BR_R1 + +/* + * The expoline macros are used to create thunks in the same format + * as gcc generates them. The 'comdat' section flag makes sure that + * the various thunks are merged into a single copy. + */ + .macro __THUNK_PROLOG_NAME name + .pushsection .text.\name,"axG",@progbits,\name,comdat + .globl \name + .hidden \name + .type \name,@function +\name: + .cfi_startproc + .endm + + .macro __THUNK_EPILOG + .cfi_endproc + .popsection + .endm + + .macro __THUNK_PROLOG_BR r1,r2 + __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1 + .endm + + .macro __THUNK_PROLOG_BC d0,r1,r2 + __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1 + .endm + + .macro __THUNK_BR r1,r2 + jg __s390x_indirect_jump_r\r2\()use_r\r1 + .endm + + .macro __THUNK_BC d0,r1,r2 + jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1 + .endm + + .macro __THUNK_BRASL r1,r2,r3 + brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2 + .endm + + .macro __DECODE_RR expand,reg,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \reg,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r2 + \expand \r1,\r2 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_RR failed" + .endif + .endm + + .macro __DECODE_RRR expand,rsave,rtarget,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rsave,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \rtarget,%r\r2 + .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r3 + \expand \r1,\r2,\r3 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_RRR failed" + .endif + .endm + + .macro __DECODE_DRR expand,disp,reg,ruse + .set __decode_fail,1 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \reg,%r\r1 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .ifc \ruse,%r\r2 + \expand \disp,\r1,\r2 + .set __decode_fail,0 + .endif + .endr + .endif + .endr + .if __decode_fail == 1 + .error "__DECODE_DRR failed" + .endif + .endm + + .macro __THUNK_EX_BR reg,ruse + # Be very careful when adding instructions to this macro! + # The ALTERNATIVE replacement code has a .+10 which targets + # the "br \reg" after the code has been patched. +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,555f + j . +#else + .ifc \reg,%r1 + ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 + j . + .else + larl \ruse,555f + ex 0,0(\ruse) + j . + .endif +#endif +555: br \reg + .endm + + .macro __THUNK_EX_BC disp,reg,ruse +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,556f + j . +#else + larl \ruse,556f + ex 0,0(\ruse) + j . +#endif +556: b \disp(\reg) + .endm + + .macro GEN_BR_THUNK reg,ruse=%r1 + __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse + __THUNK_EX_BR \reg,\ruse + __THUNK_EPILOG + .endm + + .macro GEN_B_THUNK disp,reg,ruse=%r1 + __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse + __THUNK_EX_BC \disp,\reg,\ruse + __THUNK_EPILOG + .endm + + .macro BR_EX reg,ruse=%r1 +557: __DECODE_RR __THUNK_BR,\reg,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 557b-. + .popsection + .endm + + .macro B_EX disp,reg,ruse=%r1 +558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 558b-. + .popsection + .endm + + .macro BASR_EX rsave,rtarget,ruse=%r1 +559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse + .pushsection .s390_indirect_branches,"a",@progbits + .long 559b-. + .popsection + .endm + +#else + .macro GEN_BR_THUNK reg,ruse=%r1 + .endm + + .macro GEN_B_THUNK disp,reg,ruse=%r1 + .endm + + .macro BR_EX reg,ruse=%r1 + br \reg + .endm + + .macro B_EX disp,reg,ruse=%r1 + b \disp(\reg) + .endm + + .macro BASR_EX rsave,rtarget,ruse=%r1 + basr \rsave,\rtarget + .endm +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_NOSPEC_ASM_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index a3a4cafb6080ff526575cd81aedc0df9088c87a0..e0784fff07f5de44b603aa6b268d144244ef2425 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -63,6 +63,7 @@ obj-y += nospec-branch.o extra-y += head.o head64.o vmlinux.lds +obj-$(CONFIG_SYSFS) += nospec-sysfs.o CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) obj-$(CONFIG_MODULES) += module.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 0e6d2b032484a8bfc02ac1c8683c3dd1c8c34b91..4e69bf909e8791eeadb90108139f5ba08076c733 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -177,6 +177,7 @@ int main(void) OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); OFFSET(__LC_GMAP, lowcore, gmap); OFFSET(__LC_PASTE, lowcore, paste); + OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ OFFSET(__LC_DUMP_REIPL, lowcore, ipib); /* hardware defined lowcore locations 0x1000 - 0x18ff */ diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f6c56009e822473d701cecbc896d3693b080d6c2..b65874b0b412e40ea1baea814fb1169d04f02104 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -9,18 +9,22 @@ #include #include +#include #include #include + GEN_BR_THUNK %r9 + GEN_BR_THUNK %r14 + ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_mcck_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 1: la %r1,4095 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) lpswe __LC_MCK_OLD_PSW @@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler) basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_ext_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit lpswe __LC_EXT_OLD_PSW @@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler) basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_pgm_handler_fn - lg %r1,0(%r1) - ltgr %r1,%r1 + lg %r9,0(%r1) + ltgr %r9,%r9 jz 1f - basr %r14,%r1 + BASR_EX %r14,%r9 lmg %r0,%r15,__LC_SAVE_AREA_SYNC lpswe __LC_PGM_OLD_PSW 1: lpswe disabled_wait_psw-0b(%r13) @@ -117,7 +121,7 @@ ENTRY(diag308_reset) larl %r4,.Lcontinue_psw # Restore PSW flags lpswe 0(%r4) .Lcontinue: - br %r14 + BR_EX %r14 .align 16 .Lrestart_psw: .long 0x00080000,0x80000000 + .Lrestart_part2 diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index ed9aaa212d4a10495964d9349affc256ce1ffe05..be20b1f7338412726174a61955de12e96ab7ae2f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -25,6 +25,7 @@ #include #include #include +#include __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -221,67 +222,9 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) .popsection .endm -#ifdef CONFIG_EXPOLINE - - .macro GEN_BR_THUNK name,reg,tmp - .section .text.\name,"axG",@progbits,\name,comdat - .globl \name - .hidden \name - .type \name,@function -\name: - .cfi_startproc -#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES - exrl 0,0f -#else - larl \tmp,0f - ex 0,0(\tmp) -#endif - j . -0: br \reg - .cfi_endproc - .endm - - GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 - GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 - GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 - - .macro BASR_R14_R9 -0: brasl %r14,__s390x_indirect_jump_r1use_r9 - .pushsection .s390_indirect_branches,"a",@progbits - .long 0b-. - .popsection - .endm - - .macro BR_R1USE_R14 -0: jg __s390x_indirect_jump_r1use_r14 - .pushsection .s390_indirect_branches,"a",@progbits - .long 0b-. - .popsection - .endm - - .macro BR_R11USE_R14 -0: jg __s390x_indirect_jump_r11use_r14 - .pushsection .s390_indirect_branches,"a",@progbits - .long 0b-. - .popsection - .endm - -#else /* CONFIG_EXPOLINE */ - - .macro BASR_R14_R9 - basr %r14,%r9 - .endm - - .macro BR_R1USE_R14 - br %r14 - .endm - - .macro BR_R11USE_R14 - br %r14 - .endm - -#endif /* CONFIG_EXPOLINE */ - + GEN_BR_THUNK %r9 + GEN_BR_THUNK %r14 + GEN_BR_THUNK %r14,%r11 .section .kprobes.text, "ax" .Ldummy: @@ -298,7 +241,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) ENTRY(__bpon) .globl __bpon BPON - BR_R1USE_R14 + BR_EX %r14 /* * Scheduler resume function, called by switch_to @@ -325,7 +268,7 @@ ENTRY(__switch_to) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP jz 0f .insn s,0xb2800000,__LC_LPP # set program parameter -0: BR_R1USE_R14 +0: BR_EX %r14 .L__critical_start: @@ -392,7 +335,7 @@ sie_exit: xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_EMPTY+16(%r15) # return exit reason code - BR_R1USE_R14 + BR_EX %r14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_EMPTY+16(%r15) # set exit reason code @@ -451,7 +394,7 @@ ENTRY(system_call) lgf %r9,0(%r8,%r10) # get system call add. TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys - BASR_R14_R9 # call sys_xxxx + BASR_EX %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: @@ -628,7 +571,7 @@ ENTRY(system_call) lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) - BASR_R14_R9 # call sys_xxx + BASR_EX %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: TSTMSK __TI_flags(%r12),_TIF_TRACE @@ -652,7 +595,7 @@ ENTRY(ret_from_fork) lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) - BASR_R14_R9 + BASR_EX %r14,%r9 j .Lsysc_tracenogo /* @@ -731,7 +674,7 @@ ENTRY(pgm_check_handler) je .Lpgm_return lgf %r9,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs - BASR_R14_R9 # branch to interrupt-handler + BASR_EX %r14,%r9 # branch to interrupt-handler .Lpgm_return: LOCKDEP_SYS_EXIT tm __PT_PSW+1(%r11),0x01 # returning to user ? @@ -1041,7 +984,7 @@ ENTRY(psw_idle) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) - BR_R1USE_R14 + BR_EX %r14 .Lpsw_idle_end: /* @@ -1083,7 +1026,7 @@ ENTRY(save_fpu_regs) .Lsave_fpu_regs_done: oi __LC_CPU_FLAGS+7,_CIF_FPU .Lsave_fpu_regs_exit: - BR_R1USE_R14 + BR_EX %r14 .Lsave_fpu_regs_end: EXPORT_SYMBOL(save_fpu_regs) @@ -1129,7 +1072,7 @@ load_fpu_regs: .Lload_fpu_regs_done: ni __LC_CPU_FLAGS+7,255-_CIF_FPU .Lload_fpu_regs_exit: - BR_R1USE_R14 + BR_EX %r14 .Lload_fpu_regs_end: .L__critical_end: @@ -1301,7 +1244,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: BR_R11USE_R14 +0: BR_EX %r14 .align 8 .Lcleanup_table: @@ -1337,7 +1280,7 @@ cleanup_critical: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - BR_R11USE_R14 + BR_EX %r14 #endif .Lcleanup_system_call: @@ -1391,7 +1334,7 @@ cleanup_critical: stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,.Lsysc_do_svc - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_system_call_insn: .quad system_call .quad .Lsysc_stmg @@ -1403,7 +1346,7 @@ cleanup_critical: .Lcleanup_sysc_tif: larl %r9,.Lsysc_tif - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_sysc_restore: # check if stpt has been executed @@ -1420,14 +1363,14 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_sysc_restore_insn: .quad .Lsysc_exit_timer .quad .Lsysc_done - 4 .Lcleanup_io_tif: larl %r9,.Lio_tif - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_io_restore: # check if stpt has been executed @@ -1441,7 +1384,7 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_io_restore_insn: .quad .Lio_exit_timer .quad .Lio_done - 4 @@ -1494,17 +1437,17 @@ cleanup_critical: # prepare return psw nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: larl %r9,save_fpu_regs - BR_R11USE_R14 + BR_EX %r14,%r11 .Lcleanup_load_fpu_regs: larl %r9,load_fpu_regs - BR_R11USE_R14 + BR_EX %r14,%r11 /* * Integer constants diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 94f2099bceb04cbfdc73d8f498a42e777a4b4e07..3d17c41074ca55d59fbe156c5967605912af9734 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -176,10 +176,9 @@ void do_softirq_own_stack(void) new -= STACK_FRAME_OVERHEAD; ((struct stack_frame *) new)->back_chain = old; asm volatile(" la 15,0(%0)\n" - " basr 14,%2\n" + " brasl 14,__do_softirq\n" " la 15,0(%1)\n" - : : "a" (new), "a" (old), - "a" (__do_softirq) + : : "a" (new), "a" (old) : "0", "1", "2", "3", "4", "5", "14", "cc", "memory" ); } else { diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 82df7d80fab22090cb943e1d54562ee00acfaec2..27110f3294edcdf30935048d5553f712caf44116 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -9,13 +9,17 @@ #include #include #include +#include #include #include + GEN_BR_THUNK %r1 + GEN_BR_THUNK %r14 + .section .kprobes.text, "ax" ENTRY(ftrace_stub) - br %r14 + BR_EX %r14 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) #define STACK_PTREGS (STACK_FRAME_OVERHEAD) @@ -23,7 +27,7 @@ ENTRY(ftrace_stub) #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) ENTRY(_mcount) - br %r14 + BR_EX %r14 EXPORT_SYMBOL(_mcount) @@ -53,7 +57,7 @@ ENTRY(ftrace_caller) #endif lgr %r3,%r14 la %r5,STACK_PTREGS(%r15) - basr %r14,%r1 + BASR_EX %r14,%r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER # The j instruction gets runtime patched to a nop instruction. # See ftrace_enable_ftrace_graph_caller. @@ -68,7 +72,7 @@ ftrace_graph_caller_end: #endif lg %r1,(STACK_PTREGS_PSW+8)(%r15) lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) - br %r1 + BR_EX %r1 #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -81,6 +85,6 @@ ENTRY(return_to_handler) aghi %r15,STACK_FRAME_OVERHEAD lgr %r14,%r2 lmg %r2,%r5,32(%r15) - br %r14 + BR_EX %r14 #endif diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 9f3b5b3827435f62f1279872033a508355c6292c..d5eed651b5abd00cb556a47b58ac5551ea80566f 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -44,24 +44,6 @@ static int __init nospec_report(void) } arch_initcall(nospec_report); -#ifdef CONFIG_SYSFS -ssize_t cpu_show_spectre_v1(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); -} - -ssize_t cpu_show_spectre_v2(struct device *dev, - struct device_attribute *attr, char *buf) -{ - if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) - return sprintf(buf, "Mitigation: execute trampolines\n"); - if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) - return sprintf(buf, "Mitigation: limited branch prediction.\n"); - return sprintf(buf, "Vulnerable\n"); -} -#endif - #ifdef CONFIG_EXPOLINE int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF); @@ -112,7 +94,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) s32 *epo; /* Second part of the instruction replace is always a nop */ - memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); for (epo = start; epo < end; epo++) { instr = (u8 *) epo + *epo; if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) @@ -133,18 +114,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) br = thunk + (*(int *)(thunk + 2)) * 2; else continue; - if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) + /* Check for unconditional branch 0x07f? or 0x47f???? */ + if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) continue; + + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); switch (type) { case BRCL_EXPOLINE: - /* brcl to thunk, replace with br + nop */ insnbuf[0] = br[0]; insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + if (br[0] == 0x47) { + /* brcl to b, replace with bc + nopr */ + insnbuf[2] = br[2]; + insnbuf[3] = br[3]; + } else { + /* brcl to br, replace with bcr + nop */ + } break; case BRASL_EXPOLINE: - /* brasl to thunk, replace with basr + nop */ - insnbuf[0] = 0x0d; insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + if (br[0] == 0x47) { + /* brasl to b, replace with bas + nopr */ + insnbuf[0] = 0x4d; + insnbuf[2] = br[2]; + insnbuf[3] = br[3]; + } else { + /* brasl to br, replace with basr + nop */ + insnbuf[0] = 0x0d; + } break; } diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..8affad5f18cb5df637754f192ae5d8bce9387eba --- /dev/null +++ b/arch/s390/kernel/nospec-sysfs.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +ssize_t cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +ssize_t cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) + return sprintf(buf, "Mitigation: execute trampolines\n"); + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) + return sprintf(buf, "Mitigation: limited branch prediction\n"); + return sprintf(buf, "Vulnerable\n"); +} diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 7e1e40323b78e1bb7a3910e1191dfbe40027b748..d99155793c26ec4d78f078e7b2c0a766a23740f1 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -739,6 +739,10 @@ static int __hw_perf_event_init(struct perf_event *event) */ rate = 0; if (attr->freq) { + if (!attr->sample_freq) { + err = -EINVAL; + goto out; + } rate = freq_to_sample_rate(&si, attr->sample_freq); rate = hw_limit_rate(&si, rate); attr->freq = 0; diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index a40ebd1d29d0ebc5a3d415f33edd8a4db32e5799..8e954c1026396f84e61b0980e429ef75b00adfb0 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -7,8 +7,11 @@ #include #include +#include #include + GEN_BR_THUNK %r9 + # # Issue "store status" for the current CPU to its prefix page # and call passed function afterwards @@ -67,9 +70,9 @@ ENTRY(store_status) st %r4,0(%r1) st %r5,4(%r1) stg %r2,8(%r1) - lgr %r1,%r2 + lgr %r9,%r2 lgr %r2,%r3 - br %r1 + BR_EX %r9 .section .bss .align 8 diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index e99187149f1717f1ec81c94ea12cc77fa964c2fd..a049a7b9d6e893801a1ecd79d9332d3faea8d0ba 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -13,6 +13,7 @@ #include #include #include +#include #include /* @@ -24,6 +25,8 @@ * (see below) in the resume process. * This function runs with disabled interrupts. */ + GEN_BR_THUNK %r14 + .section .text ENTRY(swsusp_arch_suspend) stmg %r6,%r15,__SF_GPRS(%r15) @@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend) spx 0x318(%r1) lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 - br %r14 + BR_EX %r14 /* * Restore saved memory image to correct place and restore register context. @@ -197,11 +200,10 @@ pgm_check_entry: larl %r15,init_thread_union ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) larl %r2,.Lpanic_string - larl %r3,sclp_early_printk lghi %r1,0 sam31 sigp %r1,%r0,SIGP_SET_ARCHITECTURE - basr %r14,%r3 + brasl %r14,sclp_early_printk larl %r3,.Ldisabled_wait_31 lpsw 0(%r3) 4: @@ -267,7 +269,7 @@ restore_registers: /* Return 0 */ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) lghi %r2,0 - br %r14 + BR_EX %r14 .section .data..nosave,"aw",@progbits .align 8 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index eb7b530d1783f7c8da48f0ff328e15a808a71b2f..4f1f5fc8139d881078720f42f4e6d907ee3fa342 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -590,7 +590,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; if (gpa && (scb_s->ecb & ECB_TE)) { - if (!(gpa & ~0x1fffU)) { + if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; } diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index d66751397e721e7eee8a2ea117ab8d4c8549f248..e1fa974ac5005e7909996d9075fd30c13964cd8a 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S @@ -7,6 +7,9 @@ #include #include +#include + + GEN_BR_THUNK %r14 /* * void *memmove(void *dest, const void *src, size_t n) @@ -33,14 +36,14 @@ ENTRY(memmove) .Lmemmove_forward_remainder: larl %r5,.Lmemmove_mvc ex %r4,0(%r5) - br %r14 + BR_EX %r14 .Lmemmove_reverse: ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) brctg %r4,.Lmemmove_reverse ic %r0,0(%r4,%r3) stc %r0,0(%r4,%r1) - br %r14 + BR_EX %r14 .Lmemmove_mvc: mvc 0(1,%r1),0(%r3) EXPORT_SYMBOL(memmove) @@ -77,7 +80,7 @@ ENTRY(memset) .Lmemset_clear_remainder: larl %r3,.Lmemset_xc ex %r4,0(%r3) - br %r14 + BR_EX %r14 .Lmemset_fill: stc %r3,0(%r2) cghi %r4,1 @@ -94,7 +97,7 @@ ENTRY(memset) .Lmemset_fill_remainder: larl %r3,.Lmemset_mvc ex %r4,0(%r3) - br %r14 + BR_EX %r14 .Lmemset_xc: xc 0(1,%r1),0(%r1) .Lmemset_mvc: @@ -117,7 +120,7 @@ ENTRY(memcpy) .Lmemcpy_remainder: larl %r5,.Lmemcpy_mvc ex %r4,0(%r5) - br %r14 + BR_EX %r14 .Lmemcpy_loop: mvc 0(256,%r1),0(%r3) la %r1,256(%r1) diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S index 25bb4643c4f46cc3bcbf904e9fc257a3145f4221..9f794869c1b090a9a6589d573e77739562265cf5 100644 --- a/arch/s390/net/bpf_jit.S +++ b/arch/s390/net/bpf_jit.S @@ -9,6 +9,7 @@ */ #include +#include #include "bpf_jit.h" /* @@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \ clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ jh sk_load_##NAME##_slow; \ LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ - b OFF_OK(%r6); /* Return */ \ + B_EX OFF_OK,%r6; /* Return */ \ \ sk_load_##NAME##_slow:; \ lgr %r2,%r7; /* Arg1 = skb pointer */ \ @@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \ brasl %r14,skb_copy_bits; /* Get data from skb */ \ LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ - br %r6; /* Return */ + BR_EX %r6; /* Return */ sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ + GEN_BR_THUNK %r6 + GEN_B_THUNK OFF_OK,%r6 + /* * Load 1 byte from SKB (optimized version) */ @@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos) clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? jnl sk_load_byte_slow llgc %r14,0(%r3,%r12) # Get byte from skb - b OFF_OK(%r6) # Return OK + B_EX OFF_OK,%r6 # Return OK sk_load_byte_slow: lgr %r2,%r7 # Arg1 = skb pointer @@ -90,7 +94,7 @@ sk_load_byte_slow: brasl %r14,skb_copy_bits # Get data from skb llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer ltgr %r2,%r2 # Set cc to (%r2 != 0) - br %r6 # Return cc + BR_EX %r6 # Return cc #define sk_negative_common(NAME, SIZE, LOAD) \ sk_load_##NAME##_slow_neg:; \ @@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \ jz bpf_error; \ LOAD %r14,0(%r2); /* Get data from pointer */ \ xr %r3,%r3; /* Set cc to zero */ \ - br %r6; /* Return cc */ + BR_EX %r6; /* Return cc */ sk_negative_common(word, 4, llgf) sk_negative_common(half, 2, llgh) @@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc) bpf_error: # force a return 0 from jit handler ltgr %r15,%r15 # Set condition code - br %r6 + BR_EX %r6 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 33e2785f684280a574827c4141ba30d8a39bb2cf..11cd151733d43a4a8e9d0c992a4005ca8dc1e509 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include #include "bpf_jit.h" @@ -43,6 +45,8 @@ struct bpf_jit { int base_ip; /* Base address for literal pool */ int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ + int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ int tail_call_start; /* Tail call start offset */ int labels[1]; /* Labels for local jumps */ }; @@ -252,6 +256,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) REG_SET_SEEN(b2); \ }) +#define EMIT6_PCREL_RILB(op, b, target) \ +({ \ + int rel = (target - jit->prg) / 2; \ + _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ + REG_SET_SEEN(b); \ +}) + +#define EMIT6_PCREL_RIL(op, target) \ +({ \ + int rel = (target - jit->prg) / 2; \ + _EMIT6(op | rel >> 16, rel & 0xffff); \ +}) + #define _EMIT6_IMM(op, imm) \ ({ \ unsigned int __imm = (imm); \ @@ -471,8 +488,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ save_restore_regs(jit, REGS_RESTORE); + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { + jit->r14_thunk_ip = jit->prg; + /* Generate __s390_indirect_jump_r14 thunk */ + if (test_facility(35)) { + /* exrl %r0,.+10 */ + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); + } else { + /* larl %r1,.+14 */ + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,0(%r1) */ + EMIT4_DISP(0x44000000, REG_0, REG_1, 0); + } + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + } /* br %r14 */ _EMIT2(0x07fe); + + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable && + (jit->seen & SEEN_FUNC)) { + jit->r1_thunk_ip = jit->prg; + /* Generate __s390_indirect_jump_r1 thunk */ + if (test_facility(35)) { + /* exrl %r0,.+10 */ + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + /* br %r1 */ + _EMIT2(0x07f1); + } else { + /* larl %r1,.+14 */ + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); + /* ex 0,S390_lowcore.br_r1_tampoline */ + EMIT4_DISP(0x44000000, REG_0, REG_0, + offsetof(struct lowcore, br_r1_trampoline)); + /* j . */ + EMIT4_PCREL(0xa7f40000, 0); + } + } } /* @@ -978,8 +1032,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* lg %w1,(%l) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, EMIT_CONST_U64(func)); - /* basr %r14,%w1 */ - EMIT2(0x0d00, REG_14, REG_W1); + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { + /* brasl %r14,__s390_indirect_jump_r1 */ + EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); + } else { + /* basr %r14,%w1 */ + EMIT2(0x0d00, REG_14, REG_W1); + } /* lgr %b0,%r2: load return value into %b0 */ EMIT4(0xb9040000, BPF_REG_0, REG_2); if ((jit->seen & SEEN_SKB) && diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index c001f782c5f1c6320c89011d1bc3627f10bfb3ea..28cc61216b649773045d2eb16bdd997602ac9087 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -255,7 +255,7 @@ debug_trap: mov.l @r8, r8 jsr @r8 nop - bra __restore_all + bra ret_from_exception nop CFI_ENDPROC diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index abad97edf736bfc1f1d456e32dfe3922c60f2624..28db058d471b14809a3efb775489e92a0b816f12 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -83,7 +83,11 @@ ATOMIC_OPS(xor) #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_xchg(atomic_t *v, int new) +{ + return xchg(&v->counter, new); +} static inline int __atomic_add_unless(atomic_t *v, int a, int u) { diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h index 6f17528356b2f71c8a71764a0ac2715a3398a33f..ea53e418f6c045763ff030a1dc2fa73d524c38b8 100644 --- a/arch/sparc/include/asm/bug.h +++ b/arch/sparc/include/asm/bug.h @@ -9,10 +9,14 @@ void do_BUG(const char *file, int line); #define BUG() do { \ do_BUG(__FILE__, __LINE__); \ + barrier_before_unreachable(); \ __builtin_trap(); \ } while (0) #else -#define BUG() __builtin_trap() +#define BUG() do { \ + barrier_before_unreachable(); \ + __builtin_trap(); \ +} while (0) #endif #define HAVE_ARCH_BUG diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1a0fa10cb6b721747b479651119b095d05893de0..32bae68e34c1b617c2e1547cbea9f609dc285d8f 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (err) { printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", dev_name(&vdev->dev), err); - kfree(vdev); + put_device(&vdev->dev); return NULL; } if (vdev->dp) diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index e56dbc67e8378e891fa8aa64192d0875237f4aba..97c57b5f8d577cd926db00a68eb61bccc7908e0b 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom) if (status != EFI_SUCCESS) goto free_struct; - memcpy(rom->romdata, pci->romimage, pci->romsize); + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, + pci->romsize); return status; free_struct: @@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom) if (status != EFI_SUCCESS) goto free_struct; - memcpy(rom->romdata, pci->romimage, pci->romsize); + memcpy(rom->romdata, (void *)(unsigned long)pci->romimage, + pci->romsize); return status; free_struct: diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 3de85deeb44b58fb4e9d5a9ef803086de43608e6..26a42b91140b39d5ad5a4547d38d5cd3ab73a6fc 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -32,6 +32,7 @@ CONFIG_OPROFILE=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y @@ -89,8 +90,8 @@ CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y CONFIG_INET_ESP=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_BEET is not set CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y @@ -105,6 +106,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_NETLABEL=y CONFIG_NETFILTER=y @@ -131,6 +133,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y CONFIG_NETFILTER_XT_MATCH_COMMENT=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y @@ -144,14 +147,17 @@ CONFIG_NETFILTER_XT_MATCH_MAC=y CONFIG_NETFILTER_XT_MATCH_MARK=y CONFIG_NETFILTER_XT_MATCH_POLICY=y CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y CONFIG_NETFILTER_XT_MATCH_STATE=y CONFIG_NETFILTER_XT_MATCH_STATISTIC=y CONFIG_NETFILTER_XT_MATCH_STRING=y CONFIG_NETFILTER_XT_MATCH_TIME=y CONFIG_NETFILTER_XT_MATCH_U32=y CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_MATCH_AH=y CONFIG_IP_NF_MATCH_ECN=y @@ -169,6 +175,7 @@ CONFIG_IP_NF_ARPTABLES=y CONFIG_IP_NF_ARPFILTER=y CONFIG_IP_NF_ARP_MANGLE=y CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_MATCH_IPV6HEADER=y CONFIG_IP6_NF_MATCH_RPFILTER=y @@ -263,6 +270,7 @@ CONFIG_TABLET_USB_GTCO=y CONFIG_TABLET_USB_HANWANG=y CONFIG_TABLET_USB_KBTAB=y CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y CONFIG_INPUT_UINPUT=y CONFIG_INPUT_GPIO=y # CONFIG_SERIO_I8042 is not set @@ -303,7 +311,6 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_HIDRAW=y CONFIG_UHID=y -# CONFIG_HID_GENERIC is not set CONFIG_HID_A4TECH=y CONFIG_HID_ACRUX=y CONFIG_HID_ACRUX_FF=y @@ -391,6 +398,9 @@ CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set @@ -423,15 +433,13 @@ CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_PANIC_TIMEOUT=5 -# CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_ENABLE_DEFAULT_TRACERS=y -# CONFIG_KPROBE_EVENTS is not set -# CONFIG_UPROBE_EVENTS is not set CONFIG_IO_DELAY_NONE=y CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_UNWINDER_FRAME_POINTER=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -440,4 +448,5 @@ CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_DEV_VIRTIO=y diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index c366c0adeb40da1ae311a2906087c4a73f730af4..f5c1007e2c408bf484ac816fd8ffe1f6bd506723 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -172,7 +172,8 @@ quiet_cmd_vdso = VDSO $@ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \ - $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) + $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS) \ + $(filter --target=% --gcc-toolchain=%,$(KBUILD_CFLAGS)) GCOV_PROFILE := n # diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 011a47b4587ca88f70c3db56b1c6f3e69aab820d..717c9219d00ec3fbdc6ebea65277bfc6025dc8fa 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1162,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event) per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || - local64_read(&hwc->prev_count) != (u64)-left) { - /* - * The hw event starts counting from this event offset, - * mark it to be able to extra future deltas: - */ - local64_set(&hwc->prev_count, (u64)-left); + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); - } + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9b18a227fff7310a44f398189d1c68c54b2d3175..6965ee8c4b8aa8521c5dd686037f600a7baeff83 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 status; int handled; + int pmu_enabled; cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * Save the PMU state. + * It needs to be restored when leaving the handler. + */ + pmu_enabled = cpuc->enabled; /* * No known reason to not always do late ACK, * but just in case do it opt-in. @@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) if (!x86_pmu.late_ack) apic_write(APIC_LVTPC, APIC_DM_NMI); intel_bts_disable_local(); + cpuc->enabled = 0; __intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); handled += intel_bts_interrupt(); @@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) done: /* Only restore PMU state when it's active. See x86_pmu_disable(). */ - if (cpuc->enabled) + cpuc->enabled = pmu_enabled; + if (pmu_enabled) __intel_pmu_enable_all(0, true); intel_bts_enable_local(); @@ -3188,7 +3196,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, * Therefore the effective (average) period matches the requested period, * despite coarser hardware granularity. */ -static unsigned bdw_limit_period(struct perf_event *event, unsigned left) +static u64 bdw_limit_period(struct perf_event *event, u64 left) { if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xc0, .umask=0x01)) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8156e47da7ba4c5a31e9f9436b27a96cfb7da4c3..10b39d44981c69a2002dfc9608e7a705eaa562f0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1150,6 +1150,7 @@ static void setup_pebs_sample_data(struct perf_event *event, if (pebs == NULL) return; + regs->flags &= ~PERF_EFLAGS_EXACT; sample_type = event->attr.sample_type; dsrc = sample_type & PERF_SAMPLE_DATA_SRC; @@ -1194,7 +1195,6 @@ static void setup_pebs_sample_data(struct perf_event *event, */ *regs = *iregs; regs->flags = pebs->flags; - set_linear_ip(regs, pebs->ip); if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1230,13 +1230,22 @@ static void setup_pebs_sample_data(struct perf_event *event, #endif } - if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { - regs->ip = pebs->real_ip; - regs->flags |= PERF_EFLAGS_EXACT; - } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs)) - regs->flags |= PERF_EFLAGS_EXACT; - else - regs->flags &= ~PERF_EFLAGS_EXACT; + if (event->attr.precise_ip > 1) { + /* Haswell and later have the eventing IP, so use it: */ + if (x86_pmu.intel_cap.pebs_format >= 2) { + set_linear_ip(regs, pebs->real_ip); + regs->flags |= PERF_EFLAGS_EXACT; + } else { + /* Otherwise use PEBS off-by-1 IP: */ + set_linear_ip(regs, pebs->ip); + + /* ... and try to fix it up using the LBR entries: */ + if (intel_pmu_pebs_fixup_ip(regs)) + regs->flags |= PERF_EFLAGS_EXACT; + } + } else + set_linear_ip(regs, pebs->ip); + if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) && x86_pmu.intel_cap.pebs_format >= 1) @@ -1303,17 +1312,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } +/* + * Special variant of intel_pmu_save_and_restart() for auto-reload. + */ +static int +intel_pmu_save_and_restart_reload(struct perf_event *event, int count) +{ + struct hw_perf_event *hwc = &event->hw; + int shift = 64 - x86_pmu.cntval_bits; + u64 period = hwc->sample_period; + u64 prev_raw_count, new_raw_count; + s64 new, old; + + WARN_ON(!period); + + /* + * drain_pebs() only happens when the PMU is disabled. + */ + WARN_ON(this_cpu_read(cpu_hw_events.enabled)); + + prev_raw_count = local64_read(&hwc->prev_count); + rdpmcl(hwc->event_base_rdpmc, new_raw_count); + local64_set(&hwc->prev_count, new_raw_count); + + /* + * Since the counter increments a negative counter value and + * overflows on the sign switch, giving the interval: + * + * [-period, 0] + * + * the difference between two consequtive reads is: + * + * A) value2 - value1; + * when no overflows have happened in between, + * + * B) (0 - value1) + (value2 - (-period)); + * when one overflow happened in between, + * + * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); + * when @n overflows happened in between. + * + * Here A) is the obvious difference, B) is the extension to the + * discrete interval, where the first term is to the top of the + * interval and the second term is from the bottom of the next + * interval and C) the extension to multiple intervals, where the + * middle term is the whole intervals covered. + * + * An equivalent of C, by reduction, is: + * + * value2 - value1 + n * period + */ + new = ((s64)(new_raw_count << shift) >> shift); + old = ((s64)(prev_raw_count << shift) >> shift); + local64_add(new - old + count * period, &event->count); + + perf_event_update_userpage(event); + + return 0; +} + static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *base, void *top, int bit, int count) { + struct hw_perf_event *hwc = &event->hw; struct perf_sample_data data; struct pt_regs regs; void *at = get_next_pebs_record_by_bit(base, top, bit); - if (!intel_pmu_save_and_restart(event) && - !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)) + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { + /* + * Now, auto-reload is only enabled in fixed period mode. + * The reload value is always hwc->sample_period. + * May need to change it, if auto-reload is enabled in + * freq mode later. + */ + intel_pmu_save_and_restart_reload(event, count); + } else if (!intel_pmu_save_and_restart(event)) return; while (count > 1) { @@ -1365,8 +1441,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) return; n = top - at; - if (n <= 0) + if (n <= 0) { + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); return; + } __intel_pmu_pebs_event(event, iregs, at, top, 0, n); } @@ -1389,8 +1468,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ds->pebs_index = ds->pebs_buffer_base; - if (unlikely(base >= top)) + if (unlikely(base >= top)) { + /* + * The drain_pebs() could be called twice in a short period + * for auto-reload event in pmu::read(). There are no + * overflows have happened in between. + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, + x86_pmu.max_pebs_events) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); + } return; + } for (at = base; at < top; at += x86_pmu.pebs_record_size) { struct pebs_record_nhm *p = at; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 8e4ea143ed96403d275bf6727801961db9a053d7..dc4728eccfd86c4a2a97894f53b05b658a6e0155 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -556,7 +556,7 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; bool late_ack; - unsigned (*limit_period)(struct perf_event *event, unsigned l); + u64 (*limit_period)(struct perf_event *event, u64 l); /* * sysfs attrs diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index cf5961ca867746ae2eadb4a2b6f2c9068cec48c4..4cd6a3b71824293ae3edb664bc5ed6e48ca5a459 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -218,13 +218,11 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ output, input...) \ -{ \ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ "call %P[new2]", feature2) \ : output, ASM_CALL_CONSTRAINT \ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ - [new2] "i" (newfunc2), ## input); \ -} + [new2] "i" (newfunc2), ## input) /* * use this macro(s) if you need more than one output parameter diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 23a65439c37c22fb4a46c143546bf149f7d874da..403e97d5e24322775dc01953ef32f8f4e3dd9276 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -198,7 +198,6 @@ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ #define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ - #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ @@ -207,11 +206,19 @@ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ +#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ +#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ +#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ +#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ +#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ @@ -272,9 +279,10 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ -#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ -#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ -#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ +#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ +#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ @@ -331,6 +339,7 @@ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ +#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ /* * BUG word(s) @@ -360,5 +369,6 @@ #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ +#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f8b801996722df5d1ebdcbe0e849a9bfbbc955f..174b9c41efce00d67d034616afcc974a7e51fe2f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -908,7 +908,7 @@ struct kvm_x86_ops { int (*hardware_setup)(void); /* __init */ void (*hardware_unsetup)(void); /* __exit */ bool (*cpu_has_accelerated_tpr)(void); - bool (*cpu_has_high_real_mode_segbase)(void); + bool (*has_emulated_msr)(int index); void (*cpuid_update)(struct kvm_vcpu *vcpu); int (*vm_init)(struct kvm *kvm); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 1de72ce514cd5561dbafea43996d909f449f8766..ed97ef3b48a75c7396997497e964019481f87f1b 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -192,7 +192,7 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { - /* pkey 0 is the default and always allocated */ + /* pkey 0 is the default and allocated implicitly */ mm->context.pkey_allocation_map = 0x1; /* -1 means unallocated or invalid */ mm->context.execute_only_pkey = -1; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index eb83ff1bae8f1a709039975aa383ebcb29eafc40..504b21692d3277d37ee1d22bf1367faec9d4db97 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -42,6 +42,8 @@ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ +#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ @@ -68,6 +70,11 @@ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a #define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ #define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +#define ARCH_CAP_SSB_NO (1 << 4) /* + * Not susceptible to Speculative Store Bypass + * attack, so no Speculative Store Bypass + * control required. + */ #define MSR_IA32_BBL_CR_CTL 0x00000119 #define MSR_IA32_BBL_CR_CTL3 0x0000011e @@ -337,6 +344,8 @@ #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f + /* Fam 17h MSRs */ #define MSR_F17H_IRPERF 0xc00000e9 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index f928ad9b143fedea1085dedc508658fa745b4ceb..8b38df98548e8dfd1176f564b02379f6a728da49 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -217,6 +217,14 @@ enum spectre_v2_mitigation { SPECTRE_V2_IBRS, }; +/* The Speculative Store Bypass disable variants */ +enum ssb_mitigation { + SPEC_STORE_BYPASS_NONE, + SPEC_STORE_BYPASS_DISABLE, + SPEC_STORE_BYPASS_PRCTL, + SPEC_STORE_BYPASS_SECCOMP, +}; + extern char __indirect_thunk_start[]; extern char __indirect_thunk_end[]; @@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void) #endif } -#define alternative_msr_write(_msr, _val, _feature) \ - asm volatile(ALTERNATIVE("", \ - "movl %[msr], %%ecx\n\t" \ - "movl %[val], %%eax\n\t" \ - "movl $0, %%edx\n\t" \ - "wrmsr", \ - _feature) \ - : : [msr] "i" (_msr), [val] "i" (_val) \ - : "eax", "ecx", "edx", "memory") +static __always_inline +void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) +{ + asm volatile(ALTERNATIVE("", "wrmsr", %c[feature]) + : : "c" (msr), + "a" ((u32)val), + "d" ((u32)(val >> 32)), + [feature] "i" (feature) + : "memory"); +} static inline void indirect_branch_prediction_barrier(void) { - alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, - X86_FEATURE_USE_IBPB); + u64 val = PRED_CMD_IBPB; + + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); } +/* The Intel SPEC CTRL MSR base value cache */ +extern u64 x86_spec_ctrl_base; + /* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. @@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void) */ #define firmware_restrict_branch_speculation_start() \ do { \ + u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ + \ preempt_disable(); \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ X86_FEATURE_USE_IBRS_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ - alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ + u64 val = x86_spec_ctrl_base; \ + \ + alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index a0ba1ffda0dfd3dc9ee335f04cee58aafdea6fb8..851c04b7a0922cd3ea79f3a574246b6a94262d40 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -2,6 +2,8 @@ #ifndef _ASM_X86_PKEYS_H #define _ASM_X86_PKEYS_H +#define ARCH_DEFAULT_PKEY 0 + #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, @@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm); static inline int execute_only_pkey(struct mm_struct *mm) { if (!boot_cpu_has(X86_FEATURE_OSPKE)) - return 0; + return ARCH_DEFAULT_PKEY; return __execute_only_pkey(mm); } @@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { /* * "Allocated" pkeys are those that have been returned - * from pkey_alloc(). pkey 0 is special, and never - * returned from pkey_alloc(). + * from pkey_alloc() or pkey 0 which is allocated + * implicitly when the mm is created. */ - if (pkey <= 0) + if (pkey < 0) return false; if (pkey >= arch_max_pkey()) return false; + /* + * The exec-only pkey is set in the allocation map, but + * is not available to any of the user interfaces like + * mprotect_pkey(). + */ + if (pkey == mm->context.execute_only_pkey) + return false; + return mm_pkey_allocation_map(mm) & (1U << pkey); } diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..ae7c2c5cd7f0e2e9f2becb438a1366461f5725c6 --- /dev/null +++ b/arch/x86/include/asm/spec-ctrl.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_SPECCTRL_H_ +#define _ASM_X86_SPECCTRL_H_ + +#include +#include + +/* + * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR + * the guest has, while on VMEXIT we restore the host view. This + * would be easier if SPEC_CTRL were architecturally maskable or + * shadowable for guests but this is not (currently) the case. + * Takes the guest view of SPEC_CTRL MSR as a parameter and also + * the guest's version of VIRT_SPEC_CTRL, if emulated. + */ +extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest); + +/** + * x86_spec_ctrl_set_guest - Set speculation control registers for the guest + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL + * (may get translated to MSR_AMD64_LS_CFG bits) + * + * Avoids writing to the MSR if the content/bits are the same + */ +static inline +void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) +{ + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); +} + +/** + * x86_spec_ctrl_restore_host - Restore host speculation control registers + * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL + * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL + * (may get translated to MSR_AMD64_LS_CFG bits) + * + * Avoids writing to the MSR if the content/bits are the same + */ +static inline +void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) +{ + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); +} + +/* AMD specific Speculative Store Bypass MSR data */ +extern u64 x86_amd_ls_cfg_base; +extern u64 x86_amd_ls_cfg_ssbd_mask; + +static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) +{ + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); + return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); +} + +static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) +{ + BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); + return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); +} + +static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) +{ + return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; +} + +#ifdef CONFIG_SMP +extern void speculative_store_bypass_ht_init(void); +#else +static inline void speculative_store_bypass_ht_init(void) { } +#endif + +extern void speculative_store_bypass_update(unsigned long tif); + +static inline void speculative_store_bypass_update_current(void) +{ + speculative_store_bypass_update(current_thread_info()->flags); +} + +#endif diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index eda3b6823ca4323d542f435854c658324ead17bd..95ff2d7f553f32f220d6856bd0fc3fbc9467711d 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -81,6 +81,7 @@ struct thread_info { #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ +#define TIF_SSBD 5 /* Reduced data speculation */ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ @@ -107,6 +108,7 @@ struct thread_info { #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_SSBD (1 << TIF_SSBD) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) @@ -146,7 +148,7 @@ struct thread_info { /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 704f31315ddeb3b82b702c57d4c33c3bed9e303b..875ca99b82eef003781464af111214392fdd0f5c 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) { VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); - VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); + /* + * Use boot_cpu_has() instead of this_cpu_has() as this function + * might be called during early boot. This should work even after + * boot because all CPU's the have same capabilities: + */ + VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 5942aa5f569bf3359dbc7247f91e9215132370cd..ebdcc368a2d3dc34c0f81b86c1afd2ac457bf1c3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1481,7 +1481,7 @@ void setup_local_APIC(void) * TODO: set up through-local-APIC from through-I/O-APIC? --macro */ value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; - if (!cpu && (pic_mode || !value)) { + if (!cpu && (pic_mode || !value || skip_ioapic_setup)) { value = APIC_DM_EXTINT; apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); } else { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e7d5a7883632caa73b6631c2e203f48d93a9458a..90574f731c05062bea029310560f853489575ee5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) rdmsrl(MSR_FAM10H_NODE_ID, value); nodes_per_socket = ((value >> 3) & 7) + 1; } + + if (c->x86 >= 0x15 && c->x86 <= 0x17) { + unsigned int bit; + + switch (c->x86) { + case 0x15: bit = 54; break; + case 0x16: bit = 33; break; + case 0x17: bit = 10; break; + default: return; + } + /* + * Try to cache the base value so further operations can + * avoid RMW. If that faults, do not enable SSBD. + */ + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); + setup_force_cpu_cap(X86_FEATURE_SSBD); + x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; + } + } } static void early_init_amd(struct cpuinfo_x86 *c) @@ -765,6 +786,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c) static void init_amd_zn(struct cpuinfo_x86 *c) { + set_cpu_cap(c, X86_FEATURE_ZEN); /* * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects * all up to and including B1. diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index bfca937bdcc36ce8d9523f03dcc92e93d3c39d5c..7416fc206b4a0e3f17be821e932d9fd840c03079 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -12,8 +12,10 @@ #include #include #include +#include +#include -#include +#include #include #include #include @@ -27,6 +29,27 @@ #include static void __init spectre_v2_select_mitigation(void); +static void __init ssb_select_mitigation(void); + +/* + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any + * writes to SPEC_CTRL contain whatever reserved bits have been set. + */ +u64 __ro_after_init x86_spec_ctrl_base; +EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); + +/* + * The vendor and possibly platform specific bits which can be modified in + * x86_spec_ctrl_base. + */ +static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; + +/* + * AMD specific MSR info for Speculative Store Bypass control. + * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). + */ +u64 __ro_after_init x86_amd_ls_cfg_base; +u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; void __init check_bugs(void) { @@ -37,9 +60,27 @@ void __init check_bugs(void) print_cpu_info(&boot_cpu_data); } + /* + * Read the SPEC_CTRL MSR to account for reserved bits which may + * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD + * init code as it is not enumerated and depends on the family. + */ + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + /* Allow STIBP in MSR_SPEC_CTRL if supported */ + if (boot_cpu_has(X86_FEATURE_STIBP)) + x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; + /* Select the proper spectre mitigation before patching alternatives */ spectre_v2_select_mitigation(); + /* + * Select proper mitigation for any exposure to the Speculative Store + * Bypass vulnerability. + */ + ssb_select_mitigation(); + #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. @@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = { #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt -static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + SPECTRE_V2_NONE; + +void +x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) +{ + u64 msrval, guestval, hostval = x86_spec_ctrl_base; + struct thread_info *ti = current_thread_info(); + + /* Is MSR_SPEC_CTRL implemented ? */ + if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { + /* + * Restrict guest_spec_ctrl to supported values. Clear the + * modifiable bits in the host base value and or the + * modifiable bits from the guest value. + */ + guestval = hostval & ~x86_spec_ctrl_mask; + guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; + + /* SSBD controlled in MSR_SPEC_CTRL */ + if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) + hostval |= ssbd_tif_to_spec_ctrl(ti->flags); + + if (hostval != guestval) { + msrval = setguest ? guestval : hostval; + wrmsrl(MSR_IA32_SPEC_CTRL, msrval); + } + } + + /* + * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update + * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. + */ + if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && + !static_cpu_has(X86_FEATURE_VIRT_SSBD)) + return; + + /* + * If the host has SSBD mitigation enabled, force it in the host's + * virtual MSR value. If its not permanently enabled, evaluate + * current's TIF_SSBD thread flag. + */ + if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) + hostval = SPEC_CTRL_SSBD; + else + hostval = ssbd_tif_to_spec_ctrl(ti->flags); + + /* Sanitize the guest value */ + guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; + + if (hostval != guestval) { + unsigned long tif; + + tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : + ssbd_spec_ctrl_to_tif(hostval); + + speculative_store_bypass_update(tif); + } +} +EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); + +static void x86_amd_ssb_disable(void) +{ + u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; + + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); + else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + wrmsrl(MSR_AMD64_LS_CFG, msrval); +} #ifdef RETPOLINE static bool spectre_v2_bad_module; @@ -312,32 +422,289 @@ static void __init spectre_v2_select_mitigation(void) } #undef pr_fmt +#define pr_fmt(fmt) "Speculative Store Bypass: " fmt + +static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; + +/* The kernel command line selection */ +enum ssb_mitigation_cmd { + SPEC_STORE_BYPASS_CMD_NONE, + SPEC_STORE_BYPASS_CMD_AUTO, + SPEC_STORE_BYPASS_CMD_ON, + SPEC_STORE_BYPASS_CMD_PRCTL, + SPEC_STORE_BYPASS_CMD_SECCOMP, +}; + +static const char *ssb_strings[] = { + [SPEC_STORE_BYPASS_NONE] = "Vulnerable", + [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", + [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", + [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", +}; + +static const struct { + const char *option; + enum ssb_mitigation_cmd cmd; +} ssb_mitigation_options[] = { + { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ + { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ + { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ + { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ + { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ +}; + +static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) +{ + enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; + char arg[20]; + int ret, i; + + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { + return SPEC_STORE_BYPASS_CMD_NONE; + } else { + ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", + arg, sizeof(arg)); + if (ret < 0) + return SPEC_STORE_BYPASS_CMD_AUTO; + + for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { + if (!match_option(arg, ret, ssb_mitigation_options[i].option)) + continue; + + cmd = ssb_mitigation_options[i].cmd; + break; + } + + if (i >= ARRAY_SIZE(ssb_mitigation_options)) { + pr_err("unknown option (%s). Switching to AUTO select\n", arg); + return SPEC_STORE_BYPASS_CMD_AUTO; + } + } + + return cmd; +} + +static enum ssb_mitigation __init __ssb_select_mitigation(void) +{ + enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; + enum ssb_mitigation_cmd cmd; + + if (!boot_cpu_has(X86_FEATURE_SSBD)) + return mode; + + cmd = ssb_parse_cmdline(); + if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && + (cmd == SPEC_STORE_BYPASS_CMD_NONE || + cmd == SPEC_STORE_BYPASS_CMD_AUTO)) + return mode; + + switch (cmd) { + case SPEC_STORE_BYPASS_CMD_AUTO: + case SPEC_STORE_BYPASS_CMD_SECCOMP: + /* + * Choose prctl+seccomp as the default mode if seccomp is + * enabled. + */ + if (IS_ENABLED(CONFIG_SECCOMP)) + mode = SPEC_STORE_BYPASS_SECCOMP; + else + mode = SPEC_STORE_BYPASS_PRCTL; + break; + case SPEC_STORE_BYPASS_CMD_ON: + mode = SPEC_STORE_BYPASS_DISABLE; + break; + case SPEC_STORE_BYPASS_CMD_PRCTL: + mode = SPEC_STORE_BYPASS_PRCTL; + break; + case SPEC_STORE_BYPASS_CMD_NONE: + break; + } + + /* + * We have three CPU feature flags that are in play here: + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. + * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass + * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation + */ + if (mode == SPEC_STORE_BYPASS_DISABLE) { + setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); + /* + * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses + * a completely different MSR and bit dependent on family. + */ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_INTEL: + x86_spec_ctrl_base |= SPEC_CTRL_SSBD; + x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + break; + case X86_VENDOR_AMD: + x86_amd_ssb_disable(); + break; + } + } + + return mode; +} + +static void ssb_select_mitigation(void) +{ + ssb_mode = __ssb_select_mitigation(); + + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + pr_info("%s\n", ssb_strings[ssb_mode]); +} + +#undef pr_fmt +#define pr_fmt(fmt) "Speculation prctl: " fmt + +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + bool update; + + if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && + ssb_mode != SPEC_STORE_BYPASS_SECCOMP) + return -ENXIO; + + switch (ctrl) { + case PR_SPEC_ENABLE: + /* If speculation is force disabled, enable is not allowed */ + if (task_spec_ssb_force_disable(task)) + return -EPERM; + task_clear_spec_ssb_disable(task); + update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_DISABLE: + task_set_spec_ssb_disable(task); + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_FORCE_DISABLE: + task_set_spec_ssb_disable(task); + task_set_spec_ssb_force_disable(task); + update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); + break; + default: + return -ERANGE; + } + + /* + * If being set on non-current task, delay setting the CPU + * mitigation until it is next scheduled. + */ + if (task == current && update) + speculative_store_bypass_update_current(); + + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssb_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +#ifdef CONFIG_SECCOMP +void arch_seccomp_spec_mitigate(struct task_struct *task) +{ + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +} +#endif + +static int ssb_prctl_get(struct task_struct *task) +{ + switch (ssb_mode) { + case SPEC_STORE_BYPASS_DISABLE: + return PR_SPEC_DISABLE; + case SPEC_STORE_BYPASS_SECCOMP: + case SPEC_STORE_BYPASS_PRCTL: + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + default: + if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) + return PR_SPEC_ENABLE; + return PR_SPEC_NOT_AFFECTED; + } +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssb_prctl_get(task); + default: + return -ENODEV; + } +} + +void x86_spec_ctrl_setup_ap(void) +{ + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) + x86_amd_ssb_disable(); +} #ifdef CONFIG_SYSFS -ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) + +static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) { - if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + if (!boot_cpu_has_bug(bug)) return sprintf(buf, "Not affected\n"); - if (boot_cpu_has(X86_FEATURE_PTI)) - return sprintf(buf, "Mitigation: PTI\n"); + + switch (bug) { + case X86_BUG_CPU_MELTDOWN: + if (boot_cpu_has(X86_FEATURE_PTI)) + return sprintf(buf, "Mitigation: PTI\n"); + + break; + + case X86_BUG_SPECTRE_V1: + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + + case X86_BUG_SPECTRE_V2: + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + spectre_v2_module_string()); + + case X86_BUG_SPEC_STORE_BYPASS: + return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + + default: + break; + } + return sprintf(buf, "Vulnerable\n"); } +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); +} + ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) - return sprintf(buf, "Not affected\n"); - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); } ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { - if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) - return sprintf(buf, "Not affected\n"); + return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); +} - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - spectre_v2_module_string()); +ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); } #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cf6380200dc29f897d1e6317d3445fda909ee95c..48e98964ecadb71754c32f300d2e99544e733400 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -733,17 +733,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c) * and they also have a different bit for STIBP support. Also, * a hypervisor might have set the individual AMD bits even on * Intel CPUs, for finer-grained selection of what's available. - * - * We use the AMD bits in 0x8000_0008 EBX as the generic hardware - * features, which are visible in /proc/cpuinfo and used by the - * kernel. So set those accordingly from the Intel bits. */ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { set_cpu_cap(c, X86_FEATURE_IBRS); set_cpu_cap(c, X86_FEATURE_IBPB); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); } + if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) set_cpu_cap(c, X86_FEATURE_STIBP); + + if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || + cpu_has(c, X86_FEATURE_VIRT_SSBD)) + set_cpu_cap(c, X86_FEATURE_SSBD); + + if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { + set_cpu_cap(c, X86_FEATURE_IBRS); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); + } + + if (cpu_has(c, X86_FEATURE_AMD_IBPB)) + set_cpu_cap(c, X86_FEATURE_IBPB); + + if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { + set_cpu_cap(c, X86_FEATURE_STIBP); + set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); + } } void get_cpu_cap(struct cpuinfo_x86 *c) @@ -894,21 +909,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { {} }; -static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) +static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + { X86_VENDOR_CENTAUR, 5, }, + { X86_VENDOR_INTEL, 5, }, + { X86_VENDOR_NSC, 5, }, + { X86_VENDOR_AMD, 0x12, }, + { X86_VENDOR_AMD, 0x11, }, + { X86_VENDOR_AMD, 0x10, }, + { X86_VENDOR_AMD, 0xf, }, + { X86_VENDOR_ANY, 4, }, + {} +}; + +static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = 0; - if (x86_match_cpu(cpu_no_meltdown)) - return false; - if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + if (!x86_match_cpu(cpu_no_spec_store_bypass) && + !(ia32_cap & ARCH_CAP_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (x86_match_cpu(cpu_no_speculation)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); + setup_force_cpu_bug(X86_BUG_SPECTRE_V2); + + if (x86_match_cpu(cpu_no_meltdown)) + return; + /* Rogue Data Cache Load? No! */ if (ia32_cap & ARCH_CAP_RDCL_NO) - return false; + return; - return true; + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); } /* @@ -958,12 +1007,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) setup_force_cpu_cap(X86_FEATURE_ALWAYS); - if (!x86_match_cpu(cpu_no_speculation)) { - if (cpu_vulnerable_to_meltdown(c)) - setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); - setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); - } + cpu_set_bug_bits(c); fpu__init_system(c); @@ -1322,6 +1366,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) #endif mtrr_ap_init(); validate_apic_and_package_id(c); + x86_spec_ctrl_setup_ap(); } static __init int setup_noclflush(char *arg) diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index e806b11a99af4c72c5868731c7a8555cfb3957d9..37672d299e357430f2d16941905e352e9e89f648 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); +extern void x86_spec_ctrl_setup_ap(void); + #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c3af167d0a70c8e0220d3ae81383b8f8dee046de..c895f38a7a5eb42b8d51d13f9dff50d520ffc376 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) setup_clear_cpu_cap(X86_FEATURE_IBPB); setup_clear_cpu_cap(X86_FEATURE_STIBP); setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); + setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); + setup_clear_cpu_cap(X86_FEATURE_SSBD); + setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); } /* diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 7be35b60029981b71f56c3068f4a6337e27f85e2..2dae1b3c42fccee965ce244582f2fcc0e99aab6a 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1657,6 +1657,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, if (ret < 0) goto out_common_fail; closid = ret; + ret = 0; rdtgrp->closid = closid; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 259c75d7a2a0b3d19cf8cd0f340a039c306c12f5..dbcb010067496358a975fe4807114e3a47fe6559 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = { [SMCA_SMU] = { "smu", "System Management Unit" }, }; +static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = +{ + [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } +}; + const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) @@ -429,52 +434,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) wrmsr(MSR_CU_DEF_ERR, low, high); } -static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, - unsigned int bank, unsigned int block) +static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, + unsigned int block) { - u32 addr = 0, offset = 0; + u32 low, high; + u32 addr = 0; - if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) + if (smca_get_bank_type(bank) == SMCA_RESERVED) return addr; - /* Get address from already initialized block. */ - if (per_cpu(threshold_banks, cpu)) { - struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; + if (!block) + return MSR_AMD64_SMCA_MCx_MISC(bank); - if (bankp && bankp->blocks) { - struct threshold_block *blockp = &bankp->blocks[block]; + /* Check our cache first: */ + if (smca_bank_addrs[bank][block] != -1) + return smca_bank_addrs[bank][block]; - if (blockp) - return blockp->address; - } - } + /* + * For SMCA enabled processors, BLKPTR field of the first MISC register + * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). + */ + if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) + goto out; - if (mce_flags.smca) { - if (smca_get_bank_type(bank) == SMCA_RESERVED) - return addr; + if (!(low & MCI_CONFIG_MCAX)) + goto out; - if (!block) { - addr = MSR_AMD64_SMCA_MCx_MISC(bank); - } else { - /* - * For SMCA enabled processors, BLKPTR field of the - * first MISC register (MCx_MISC0) indicates presence of - * additional MISC register set (MISC1-4). - */ - u32 low, high; + if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && + (low & MASK_BLKPTR_LO)) + addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) - return addr; +out: + smca_bank_addrs[bank][block] = addr; + return addr; +} - if (!(low & MCI_CONFIG_MCAX)) - return addr; +static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, + unsigned int bank, unsigned int block) +{ + u32 addr = 0, offset = 0; - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && - (low & MASK_BLKPTR_LO)) - addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - } + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) return addr; - } + + if (mce_flags.smca) + return smca_get_block_address(cpu, bank, block); /* Fall back to method we used for older processors: */ switch (block) { diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 76e07698e6d1524fe7c55ba73acbfe8e9911e68a..7fa0855e4b9aa2c71a8eebd4b4e46af8a687448a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -200,19 +201,22 @@ static struct of_ioapic_type of_ioapic_type[] = static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { - struct of_phandle_args *irq_data = (void *)arg; + struct irq_fwspec *fwspec = (struct irq_fwspec *)arg; struct of_ioapic_type *it; struct irq_alloc_info tmp; + int type_index; - if (WARN_ON(irq_data->args_count < 2)) + if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; - if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type)) + + type_index = fwspec->param[1]; + if (type_index >= ARRAY_SIZE(of_ioapic_type)) return -EINVAL; - it = &of_ioapic_type[irq_data->args[1]]; + it = &of_ioapic_type[type_index]; ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); - tmp.ioapic_pin = irq_data->args[0]; + tmp.ioapic_pin = fwspec->param[0]; return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); } @@ -276,14 +280,15 @@ static void __init x86_flattree_get_config(void) map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128); - initial_boot_params = dt = early_memremap(initial_dtb, map_len); - size = of_get_flat_dt_size(); + dt = early_memremap(initial_dtb, map_len); + size = fdt_totalsize(dt); if (map_len < size) { early_memunmap(dt, map_len); - initial_boot_params = dt = early_memremap(initial_dtb, size); + dt = early_memremap(initial_dtb, size); map_len = size; } + early_init_dt_verify(dt); unflatten_and_copy_device_tree(); early_memunmap(dt, map_len); } diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index edfede76868870e4cf86fed553377fd6bd0cac06..5167f3f7413673b218309fc91a4d1a8486405189 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -57,12 +57,17 @@ static void load_segments(void) static void machine_kexec_free_page_tables(struct kimage *image) { free_page((unsigned long)image->arch.pgd); + image->arch.pgd = NULL; #ifdef CONFIG_X86_PAE free_page((unsigned long)image->arch.pmd0); + image->arch.pmd0 = NULL; free_page((unsigned long)image->arch.pmd1); + image->arch.pmd1 = NULL; #endif free_page((unsigned long)image->arch.pte0); + image->arch.pte0 = NULL; free_page((unsigned long)image->arch.pte1); + image->arch.pte1 = NULL; } static int machine_kexec_alloc_page_tables(struct kimage *image) @@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image) !image->arch.pmd0 || !image->arch.pmd1 || #endif !image->arch.pte0 || !image->arch.pte1) { - machine_kexec_free_page_tables(image); return -ENOMEM; } return 0; diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 3b7427aa7d850675905aaf636590b34107132874..5bce2a88e8a3a83c75f5ff2b42318797059a8976 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -38,9 +38,13 @@ static struct kexec_file_ops *kexec_file_loaders[] = { static void free_transition_pgtable(struct kimage *image) { free_page((unsigned long)image->arch.p4d); + image->arch.p4d = NULL; free_page((unsigned long)image->arch.pud); + image->arch.pud = NULL; free_page((unsigned long)image->arch.pmd); + image->arch.pmd = NULL; free_page((unsigned long)image->arch.pte); + image->arch.pte = NULL; } static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) @@ -90,7 +94,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC)); return 0; err: - free_transition_pgtable(image); return result; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 8bd1d8292cf761ea70db7b10576841508462149b..988a98f34c6646f37124314c834dcfb8671e783a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -39,6 +39,7 @@ #include #include #include +#include /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, @@ -279,6 +280,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss, } } +#ifdef CONFIG_SMP + +struct ssb_state { + struct ssb_state *shared_state; + raw_spinlock_t lock; + unsigned int disable_state; + unsigned long local_state; +}; + +#define LSTATE_SSB 0 + +static DEFINE_PER_CPU(struct ssb_state, ssb_state); + +void speculative_store_bypass_ht_init(void) +{ + struct ssb_state *st = this_cpu_ptr(&ssb_state); + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + st->local_state = 0; + + /* + * Shared state setup happens once on the first bringup + * of the CPU. It's not destroyed on CPU hotunplug. + */ + if (st->shared_state) + return; + + raw_spin_lock_init(&st->lock); + + /* + * Go over HT siblings and check whether one of them has set up the + * shared state pointer already. + */ + for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { + if (cpu == this_cpu) + continue; + + if (!per_cpu(ssb_state, cpu).shared_state) + continue; + + /* Link it to the state of the sibling: */ + st->shared_state = per_cpu(ssb_state, cpu).shared_state; + return; + } + + /* + * First HT sibling to come up on the core. Link shared state of + * the first HT sibling to itself. The siblings on the same core + * which come up later will see the shared state pointer and link + * themself to the state of this CPU. + */ + st->shared_state = st; +} + +/* + * Logic is: First HT sibling enables SSBD for both siblings in the core + * and last sibling to disable it, disables it for the whole core. This how + * MSR_SPEC_CTRL works in "hardware": + * + * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL + */ +static __always_inline void amd_set_core_ssb_state(unsigned long tifn) +{ + struct ssb_state *st = this_cpu_ptr(&ssb_state); + u64 msr = x86_amd_ls_cfg_base; + + if (!static_cpu_has(X86_FEATURE_ZEN)) { + msr |= ssbd_tif_to_amd_ls_cfg(tifn); + wrmsrl(MSR_AMD64_LS_CFG, msr); + return; + } + + if (tifn & _TIF_SSBD) { + /* + * Since this can race with prctl(), block reentry on the + * same CPU. + */ + if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) + return; + + msr |= x86_amd_ls_cfg_ssbd_mask; + + raw_spin_lock(&st->shared_state->lock); + /* First sibling enables SSBD: */ + if (!st->shared_state->disable_state) + wrmsrl(MSR_AMD64_LS_CFG, msr); + st->shared_state->disable_state++; + raw_spin_unlock(&st->shared_state->lock); + } else { + if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) + return; + + raw_spin_lock(&st->shared_state->lock); + st->shared_state->disable_state--; + if (!st->shared_state->disable_state) + wrmsrl(MSR_AMD64_LS_CFG, msr); + raw_spin_unlock(&st->shared_state->lock); + } +} +#else +static __always_inline void amd_set_core_ssb_state(unsigned long tifn) +{ + u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); + + wrmsrl(MSR_AMD64_LS_CFG, msr); +} +#endif + +static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) +{ + /* + * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, + * so ssbd_tif_to_spec_ctrl() just works. + */ + wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); +} + +static __always_inline void intel_set_ssb_state(unsigned long tifn) +{ + u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); + + wrmsrl(MSR_IA32_SPEC_CTRL, msr); +} + +static __always_inline void __speculative_store_bypass_update(unsigned long tifn) +{ + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) + amd_set_ssb_virt_state(tifn); + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + amd_set_core_ssb_state(tifn); + else + intel_set_ssb_state(tifn); +} + +void speculative_store_bypass_update(unsigned long tif) +{ + preempt_disable(); + __speculative_store_bypass_update(tif); + preempt_enable(); +} + void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss) { @@ -310,6 +453,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, if ((tifp ^ tifn) & _TIF_NOCPUID) set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); + + if ((tifp ^ tifn) & _TIF_SSBD) + __speculative_store_bypass_update(tifn); } /* diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9eb448c7859d2469ba63e6112f129bb2926c4bb0..fa093b77689f82ee547481067ba85026ebdfd29c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -528,6 +528,7 @@ void set_personality_64bit(void) clear_thread_flag(TIF_X32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; + current_thread_info()->status &= ~TS_COMPAT; /* Ensure the corresponding mm is not marked. */ if (current->mm) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6b841262b7905efb84eabc07749254cababfeab1..344d3c160f8d779773a7c25ff4f971d5c2273622 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,6 +77,7 @@ #include #include #include +#include /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -245,6 +246,8 @@ static void notrace start_secondary(void *unused) */ check_tsc_sync_target(); + speculative_store_bypass_ht_init(); + /* * Lock vector_lock and initialize the vectors on this cpu * before setting the cpu online. We must set it online with @@ -1349,6 +1352,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) set_mtrr_aps_delayed_init(); smp_quirk_init_udelay(); + + speculative_store_bypass_ht_init(); } void arch_enable_nonboot_cpus_begin(void) @@ -1516,6 +1521,7 @@ static void remove_siblinginfo(int cpu) cpumask_clear(topology_core_cpumask(cpu)); c->phys_proc_id = 0; c->cpu_core_id = 0; + c->booted_cores = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 4f544f2a7b06b38fb38d22c1bf52bdb847d92bdd..d1f5c744142b24da997442139c213f3b3d0276cb 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = - F(IBPB) | F(IBRS); + F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = @@ -395,7 +395,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | - F(ARCH_CAPABILITIES); + F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); @@ -481,6 +481,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; cpuid_mask(&entry->edx, CPUID_7_EDX); + /* + * We emulate ARCH_CAPABILITIES in software even + * if the host doesn't support it. + */ + entry->edx |= F(ARCH_CAPABILITIES); } else { entry->ebx = 0; entry->ecx = 0; @@ -632,13 +637,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, g_phys_as = phys_as; entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; - /* IBRS and IBPB aren't necessarily present in hardware cpuid */ - if (boot_cpu_has(X86_FEATURE_IBPB)) - entry->ebx |= F(IBPB); - if (boot_cpu_has(X86_FEATURE_IBRS)) - entry->ebx |= F(IBRS); + /* + * IBRS, IBPB and VIRT_SSBD aren't necessarily present in + * hardware cpuid + */ + if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) + entry->ebx |= F(AMD_IBPB); + if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) + entry->ebx |= F(AMD_IBRS); + if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) + entry->ebx |= F(VIRT_SSBD); entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); + if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) + entry->ebx |= F(VIRT_SSBD); break; } case 0x80000019: diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ab8993fe58cc5b85cc429cd0636b88de5b526a16..6d0fbff71d7a56a8d0a59412512b3a8a01cfdfb2 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -321,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; + /* + * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation) + * which doesn't have EOI register; Some buggy OSes (e.g. Windows with + * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC + * version first and level-triggered interrupts never get EOIed in + * IOAPIC. + */ feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); - if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) + if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) && + !ioapic_in_kernel(vcpu->kvm)) v |= APIC_LVR_DIRECTED_EOI; kvm_lapic_set_reg(apic, APIC_LVR, v); } @@ -1467,11 +1475,23 @@ static bool set_target_expiration(struct kvm_lapic *apic) static void advance_periodic_target_expiration(struct kvm_lapic *apic) { - apic->lapic_timer.tscdeadline += - nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); + ktime_t now = ktime_get(); + u64 tscl = rdtsc(); + ktime_t delta; + + /* + * Synchronize both deadlines to the same time source or + * differences in the periods (caused by differences in the + * underlying clocks or numerical approximation errors) will + * cause the two to drift apart over time as the errors + * accumulate. + */ apic->lapic_timer.target_expiration = ktime_add_ns(apic->lapic_timer.target_expiration, apic->lapic_timer.period); + delta = ktime_sub(apic->lapic_timer.target_expiration, now); + apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + + nsec_to_cycles(apic->vcpu, delta); } static void start_sw_period(struct kvm_lapic *apic) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9fb0daf628cbcfa558db8856495f6c0097cedc85..029aa13188749561c9f630c8781f90fbe7491a51 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include "trace.h" @@ -186,6 +186,12 @@ struct vcpu_svm { } host; u64 spec_ctrl; + /* + * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be + * translated into the appropriate L2_CFG bits on the host to + * perform speculative control. + */ + u64 virt_spec_ctrl; u32 *msrpm; @@ -1611,6 +1617,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 eax = 1; svm->spec_ctrl = 0; + svm->virt_spec_ctrl = 0; if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | @@ -3618,11 +3625,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) return 1; msr_info->data = svm->spec_ctrl; break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) + return 1; + + msr_info->data = svm->virt_spec_ctrl; + break; case MSR_IA32_UCODE_REV: msr_info->data = 0x01000065; break; @@ -3716,7 +3730,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS)) return 1; /* The STIBP bit doesn't fault even if it's not advertised */ @@ -3743,7 +3757,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) + !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) return 1; if (data & ~PRED_CMD_IBPB) @@ -3757,6 +3771,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); break; + case MSR_AMD64_VIRT_SPEC_CTRL: + if (!msr->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) + return 1; + + if (data & ~SPEC_CTRL_SSBD) + return 1; + + svm->virt_spec_ctrl = data; + break; case MSR_STAR: svm->vmcb->save.star = data; break; @@ -5015,8 +5039,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ - if (svm->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); asm volatile ( "push %%" _ASM_BP "; \n\t" @@ -5110,6 +5133,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + /* Eliminate branch target predictions from guest mode */ + vmexit_fill_RSB(); + +#ifdef CONFIG_X86_64 + wrmsrl(MSR_GS_BASE, svm->host.gs_base); +#else + loadsegment(fs, svm->host.fs); +#ifndef CONFIG_X86_32_LAZY_GS + loadsegment(gs, svm->host.gs); +#endif +#endif + /* * We do not use IBRS in the kernel. If this vCPU has used the * SPEC_CTRL MSR it may have left it on; save the value and @@ -5128,20 +5163,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - if (svm->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); - - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); - -#ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, svm->host.gs_base); -#else - loadsegment(fs, svm->host.fs); -#ifndef CONFIG_X86_32_LAZY_GS - loadsegment(gs, svm->host.gs); -#endif -#endif + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); reload_tss(vcpu); @@ -5244,7 +5266,7 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } -static bool svm_has_high_real_mode_segbase(void) +static bool svm_has_emulated_msr(int index) { return true; } @@ -5551,7 +5573,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .hardware_enable = svm_hardware_enable, .hardware_disable = svm_hardware_disable, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, - .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, + .has_emulated_msr = svm_has_emulated_msr, .vcpu_create = svm_create_vcpu, .vcpu_free = svm_free_vcpu, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bdd84ce4491e5375ffab668b2d9fe01207173fc7..4c88572d2b81ad7726a8e321a828b4bcb2bfb273 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -52,7 +52,7 @@ #include #include #include -#include +#include #include "trace.h" #include "pmu.h" @@ -2583,6 +2583,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) return; } + WARN_ON_ONCE(vmx->emulation_required); + if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); @@ -3293,7 +3295,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) return 1; @@ -3414,12 +3415,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) return 1; /* The STIBP bit doesn't fault even if it's not advertised */ - if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) + if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD)) return 1; vmx->spec_ctrl = data; @@ -3445,7 +3445,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_PRED_CMD: if (!msr_info->host_initiated && - !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) && !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) return 1; @@ -6832,12 +6831,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) goto out; } - if (err != EMULATE_DONE) { - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; - vcpu->run->internal.ndata = 0; - return 0; - } + if (err != EMULATE_DONE) + goto emulation_error; + + if (vmx->emulation_required && !vmx->rmode.vm86_active && + vcpu->arch.exception.pending) + goto emulation_error; if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; @@ -6853,6 +6852,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) out: return ret; + +emulation_error: + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; } static int __grow_ple_window(int val) @@ -9217,9 +9222,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) } STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); -static bool vmx_has_high_real_mode_segbase(void) +static bool vmx_has_emulated_msr(int index) { - return enable_unrestricted_guest || emulate_invalid_guest_state; + switch (index) { + case MSR_IA32_SMBASE: + /* + * We cannot do SMM unless we can run the guest in big + * real mode. + */ + return enable_unrestricted_guest || emulate_invalid_guest_state; + case MSR_AMD64_VIRT_SPEC_CTRL: + /* This is AMD only. */ + return false; + default: + return true; + } } static bool vmx_mpx_supported(void) @@ -9452,10 +9469,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ - if (vmx->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); + x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); vmx->__launched = vmx->loaded_vmcs->launched; + asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" @@ -9591,8 +9608,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - if (vmx->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); /* Eliminate branch target predictions from guest mode */ vmexit_fill_RSB(); @@ -11166,7 +11182,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (ret) return ret; - if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) + /* + * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken + * by event injection, halt vcpu. + */ + if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; @@ -12182,7 +12203,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, - .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, + .has_emulated_msr = vmx_has_emulated_msr, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3b2c3aa2cd076bdc7c4e23bc6befe4a6e8ff40b3..adac01d0181a28e2b63f11210164c42882015c7d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1036,6 +1036,7 @@ static u32 emulated_msrs[] = { MSR_IA32_SMBASE, MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, + MSR_AMD64_VIRT_SPEC_CTRL, }; static unsigned num_emulated_msrs; @@ -2721,7 +2722,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); + r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); @@ -4324,14 +4325,8 @@ static void kvm_init_msr_list(void) num_msrs_to_save = j; for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { - switch (emulated_msrs[i]) { - case MSR_IA32_SMBASE: - if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) - continue; - break; - default: - break; - } + if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) + continue; if (j < i) emulated_msrs[j] = emulated_msrs[i]; @@ -7510,6 +7505,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, { struct msr_data apic_base_msr; int mmu_reset_needed = 0; + int cpuid_update_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; @@ -7547,8 +7543,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; + cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & + (X86_CR4_OSXSAVE | X86_CR4_PKE)); kvm_x86_ops->set_cr4(vcpu, sregs->cr4); - if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) + if (cpuid_update_needed) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 3ed9a08885c5601a8bd69893b4e56bfe42d84857..4085897fef648700f669fea03907828296076fb8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, /* * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. + * catches all aliases. This also includes __ro_after_init, + * so do not enforce until kernel_set_to_readonly is true. */ - if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + if (kernel_set_to_readonly && + within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, __pa_symbol(__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 34cda7e0551b4a8809bb4a1a9fedef22ce4a4e28..c03c85e4fb6a6cb5fe35479a9e1cb63d65b7984c 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -636,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) (mtrr != MTRR_TYPE_WRBACK)) return 0; + /* Bail out if we are we on a populated non-leaf entry: */ + if (pud_present(*pud) && !pud_huge(*pud)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pud, pfn_pte( @@ -664,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) return 0; } + /* Bail out if we are we on a populated non-leaf entry: */ + if (pmd_present(*pmd) && !pmd_huge(*pmd)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pmd, pfn_pte( diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index d7bc0eea20a5ed2fc8ec43ebc06429517cbb362b..6e98e0a7c92315c2a819ee396bf78ae3104688c4 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey */ if (pkey != -1) return pkey; - /* - * Look for a protection-key-drive execute-only mapping - * which is now being given permissions that are not - * execute-only. Move it back to the default pkey. - */ - if (vma_is_pkey_exec_only(vma) && - (prot & (PROT_READ|PROT_WRITE))) { - return 0; - } + /* * The mapping is execute-only. Go try to get the * execute-only protection key. If we fail to do that, * fall through as if we do not have execute-only - * support. + * support in this mm. */ if (prot == PROT_EXEC) { pkey = execute_only_pkey(vma->vm_mm); if (pkey > 0) return pkey; + } else if (vma_is_pkey_exec_only(vma)) { + /* + * Protections are *not* PROT_EXEC, but the mapping + * is using the exec-only pkey. This mapping was + * PROT_EXEC and will no longer be. Move back to + * the default pkey. + */ + return ARCH_DEFAULT_PKEY; } + /* * This is a vanilla, non-pkey mprotect (or we failed to * setup execute-only), inherit the pkey from the VMA we diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3e15345abfe70db670ba1d8376ca267915d0437a..de0263348f2d5149e015aa2def52c3a4619b2ca5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) } EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); -static void xen_flush_tlb_all(void) +static noinline void xen_flush_tlb_all(void) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_all(0); - preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 042e9c422b2112406e13f9b795f408bdfb9b568d..b3526a98a5a51151f95d26932fe612a79373af7c 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void) return this_cpu_read(xen_vcpu_info.arch.cr2); } -static void xen_flush_tlb(void) +static noinline void xen_flush_tlb(void) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb(0); - preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); diff --git a/block/partition-generic.c b/block/partition-generic.c index 91622db9aedffd997947642a5872802312346869..08dabcd8b6aefc6844bbb9d9e9c001e6ff71fb33 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf) EXPORT_SYMBOL(bdevname); +const char *bio_devname(struct bio *bio, char *buf) +{ + return disk_name(bio->bi_disk, bio->bi_partno, buf); +} +EXPORT_SYMBOL(bio_devname); + /* * There's very little reason to use this, you should really * have a struct block_device just about everywhere and use diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 index 83344f38d1042c126fc6e92502ad7ae0b14c7660..694ed56a5f47ce8abc1c6d5500d69bab928a92ae 100644 --- a/build.config.cuttlefish.x86_64 +++ b/build.config.cuttlefish.x86_64 @@ -6,7 +6,7 @@ DEFCONFIG=x86_64_cuttlefish_defconfig EXTRA_CMDS='' KERNEL_DIR=common POST_DEFCONFIG_CMDS="check_defconfig" -CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-4630689/bin +CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r328903/bin LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin FILES=" arch/x86/boot/bzImage diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c index f6a009d88a33fb550654b11d2cfc4460c733a049..52e5ea3b8e40771ea68f506658ffed991c54c730 100644 --- a/crypto/asymmetric_keys/pkcs7_trust.c +++ b/crypto/asymmetric_keys/pkcs7_trust.c @@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); x509 = NULL; + sig = sinfo->sig; goto matched; } if (PTR_ERR(key) != -ENOKEY) diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 7544310312825507ae010262c35a031ce9262850..552c1f725b6cf5ab8d4a86ba556c37a053d91b1d 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { mutex_unlock(&round_robin_lock); + free_cpumask_var(tmp); return; } for_each_cpu(cpu, tmp) { @@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index) mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); + + free_cpumask_var(tmp); } static void exit_round_robin(unsigned int tsk_index) diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index d3b6b314fa5070012b65ac6431c0ee137e2966f9..37b0b4c04220da4ad5d81975e75c37ad169a405a 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void) u32 fixed_status; u32 fixed_enable; u32 i; + acpi_status status; ACPI_FUNCTION_NAME(ev_fixed_event_detect); @@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void) * Read the fixed feature status and enable registers, as all the cases * depend on their values. Ignore errors here. */ - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); - (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); + status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status); + status |= + acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable); + if (ACPI_FAILURE(status)) { + return (int_status); + } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Fixed Event Block: Enable %08X Status %08X\n", diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index d22167cbd0ca65d8b758ffcc2c7d0afa40e5cc36..f13d3cfa74e163466c06c2e25d70a0c06c4c9ab3 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */ status = AE_OK; + } else if (ACPI_FAILURE(status)) { + + /* If return_object exists, delete it */ + + if (info->return_object) { + acpi_ut_remove_reference(info->return_object); + info->return_object = NULL; + } } ACPI_DEBUG_PRINT((ACPI_DB_NAMES, diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index eb9dfaca555fe31d0a0c318c682268fa3ffece11..11ce4e5d10e2e42e33796b38da6853e2704c14bf 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, ACPI_POSSIBLE_METHOD_CALL); if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { + + /* Free method call op and corresponding namestring sub-ob */ + + acpi_ps_free_op(arg->common.value.arg); acpi_ps_free_op(arg); arg = NULL; walk_state->arg_count = 1; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 473f150d6b22f0df2e62fa895420ffc71be43b16..71008dbabe98ffef68b54047d9eafedc6b9e4774 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4483,6 +4483,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on + SD7SN6S256G and SD8SN8U256G */ + { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, @@ -4543,6 +4547,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 4ff69f508e959b1850bdaa5eb2d64e3acc453ab8..6b0440a12c5198264311165cc9a42217bf4c3076 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -4287,7 +4287,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, #ifdef ATA_DEBUG struct scsi_device *scsidev = cmd->device; - DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", + DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n", ap->print_id, scsidev->channel, scsidev->id, scsidev->lun, cmd->cmnd); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 4e1c60c16104e4ed2b085bb21d62478dd4474e36..b56ca174f582829f4f3a6b47e87155e868a97fe5 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -557,14 +557,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, &dev_attr_spectre_v1.attr, &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, NULL }; diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 65c15832f5678e1a05f8935b9c2b2be23b51a35f..c5963415c1c55e5fbd7d30756ab534b89a02f1bd 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -294,11 +294,12 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, pgprot_t prot, const void *caller) { - int i; + unsigned long i; struct page **pages; struct vm_struct *area; - pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + pages = kvmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + if (!pages) return NULL; @@ -307,7 +308,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); - kfree(pages); + kvfree(pages); if (!area) return NULL; diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c index 09e77d5995a4e4ea7f2f6e00e7e66abaf763489e..8ab35101747f9de663ed488060c4c8bab708f539 100644 --- a/drivers/base/dma-removed.c +++ b/drivers/base/dma-removed.c @@ -7,12 +7,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include #include #include #include #include -#include #include #include #include @@ -26,14 +24,11 @@ #include #include #include -#include -#include struct removed_region { phys_addr_t base; int nr_pages; unsigned long *bitmap; - int fixup; struct mutex lock; }; @@ -77,145 +72,6 @@ static int dma_assign_removed_region(struct device *dev, return 0; } -static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn) -{ - struct resource *res, *conflict; - resource_size_t cstart, cend; - - res = kzalloc(sizeof(*res), GFP_KERNEL); - if (!res) - return; - - res->name = "System RAM"; - res->start = __pfn_to_phys(base_pfn); - res->end = __pfn_to_phys(end_pfn) - 1; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - conflict = request_resource_conflict(&iomem_resource, res); - if (!conflict) { - pr_err("Removed memory: no conflict resource found\n"); - kfree(res); - goto done; - } - - cstart = conflict->start; - cend = conflict->end; - if ((cstart == res->start) && (cend == res->end)) { - release_resource(conflict); - } else if ((res->start >= cstart) && (res->start <= cend)) { - if (res->start == cstart) { - adjust_resource(conflict, res->end + 1, - cend - res->end); - } else if (res->end == cend) { - adjust_resource(conflict, cstart, - res->start - cstart); - } else { - adjust_resource(conflict, cstart, - res->start - cstart); - res->start = res->end + 1; - res->end = cend; - request_resource(&iomem_resource, res); - goto done; - } - } else { - pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n", - (unsigned long long) conflict->start, - (unsigned long long) conflict->end); - } - - kfree(res); -done: - return; -} - -#ifdef CONFIG_FLATMEM -static void free_memmap(unsigned long start_pfn, unsigned long end_pfn) -{ - struct page *start_pg, *end_pg; - unsigned long pg, pgend; - - start_pfn = ALIGN(start_pfn, pageblock_nr_pages); - end_pfn = round_down(end_pfn, pageblock_nr_pages); - /* - * Convert start_pfn/end_pfn to a struct page pointer. - */ - start_pg = pfn_to_page(start_pfn - 1) + 1; - end_pg = pfn_to_page(end_pfn - 1) + 1; - - /* - * Convert to physical addresses, and round start upwards and end - * downwards. - */ - pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); - pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; - - /* - * If there are free pages between these, free the section of the - * memmap array. - */ - if (pg < pgend) - free_bootmem_late(pg, pgend - pg); -} -#else -static void free_memmap(unsigned long start_pfn, unsigned long end_pfn) -{ -} -#endif - -static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, - void *data) -{ - pte_clear(&init_mm, addr, pte); - return 0; -} - -static void clear_mapping(unsigned long addr, unsigned long size) -{ - apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL); - /* ensure ptes are updated */ - mb(); - flush_tlb_kernel_range(addr, addr + size); -} - -static void removed_region_fixup(struct removed_region *dma_mem, int index) -{ - unsigned long fixup_size; - unsigned long base_pfn; - unsigned long flags; - - if (index > dma_mem->nr_pages) - return; - - /* carve-out */ - flags = memblock_region_resize_late_begin(); - memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE); - memblock_remove(dma_mem->base, index * PAGE_SIZE); - memblock_region_resize_late_end(flags); - - /* clear page-mappings */ - base_pfn = dma_mem->base >> PAGE_SHIFT; - if (!PageHighMem(pfn_to_page(base_pfn))) { - clear_mapping((unsigned long) phys_to_virt(dma_mem->base), - index * PAGE_SIZE); - } - - /* free page objects */ - free_memmap(base_pfn, base_pfn + index); - - /* return remaining area to system */ - fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE; - free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size); - - /* - * release freed resource region so as to show up under iomem resource - * list - */ - adapt_iomem_resource(base_pfn, base_pfn + index); - - /* limit the fixup region */ - dma_mem->nr_pages = index; -} - void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { @@ -251,11 +107,6 @@ void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle, bitmap_set(dma_mem->bitmap, pageno, nbits); - if (dma_mem->fixup) { - removed_region_fixup(dma_mem, pageno + nbits); - dma_mem->fixup = 0; - } - if (no_kernel_mapping && skip_zeroing) { addr = (void *)NO_KERNEL_MAPPING_DUMMY; goto out; @@ -397,7 +248,6 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) &rmem->base, (unsigned long)rmem->size / SZ_1M); return -EINVAL; } - mem->fixup = rmem->fixup; set_dma_ops(dev, &removed_dma_ops); rmem->priv = mem; dma_assign_removed_region(dev, mem); @@ -417,25 +267,6 @@ static const struct reserved_mem_ops removed_mem_ops = { static int __init removed_dma_setup(struct reserved_mem *rmem) { - unsigned long node = rmem->fdt_node; - int nomap, fixup; - - nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; - fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL; - - if (nomap && fixup) { - pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n"); - return -EINVAL; - } - - rmem->fixup = fixup; - if (rmem->fixup) { - /* Architecture specific contiguous memory fixup only for - * no-map-fixup to split mappings - */ - dma_contiguous_early_fixup(rmem->base, rmem->size); - } - rmem->ops = &removed_mem_ops; pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n", &rmem->base, (unsigned long)rmem->size / SZ_1M); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ccb67c3c1e6797fcd05050ab456f1babd478d5b..44fc47263413c9010355f69bdda53d762267f388 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -447,10 +447,62 @@ static ssize_t show_valid_zones(struct device *dev, static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); #endif +#ifdef CONFIG_MEMORY_HOTPLUG +static int count_num_free_block_pages(struct zone *zone, int bid) +{ + int order, type; + unsigned long freecount = 0; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); + for (type = 0; type < MIGRATE_TYPES; type++) { + for (order = 0; order < MAX_ORDER; ++order) { + struct free_area *area; + struct page *page; + + area = &(zone->free_area[order]); + list_for_each_entry(page, &area->free_list[type], lru) { + unsigned long pfn = page_to_pfn(page); + int section_nr = pfn_to_section_nr(pfn); + + if (bid == base_memory_block_id(section_nr)) + freecount += (1 << order); + } + + } + } + spin_unlock_irqrestore(&zone->lock, flags); + + return freecount; +} + +static ssize_t show_allocated_bytes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + int block_id, free_pages; + struct zone *movable_zone = + &NODE_DATA(numa_node_id())->node_zones[ZONE_MOVABLE]; + unsigned long used, block_sz = get_memory_block_size(); + + if (mem->state != MEM_ONLINE) + return snprintf(buf, 100, "0\n"); + + block_id = base_memory_block_id(mem->start_section_nr); + free_pages = count_num_free_block_pages(movable_zone, block_id); + used = block_sz - (free_pages * PAGE_SIZE); + + return snprintf(buf, 100, "%lu\n", used); +} +#endif + static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); +#ifdef CONFIG_MEMORY_HOTPLUG +static DEVICE_ATTR(allocated_bytes, 0444, show_allocated_bytes, NULL); +#endif /* * Block size attribute stuff @@ -655,6 +707,9 @@ static struct attribute *memory_memblk_attrs[] = { &dev_attr_removable.attr, #ifdef CONFIG_MEMORY_HOTREMOVE &dev_attr_valid_zones.attr, +#endif +#ifdef CONFIG_MEMORY_HOTPLUG + &dev_attr_allocated_bytes.attr, #endif NULL }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index efdadd153abef7f41c9a5d6319557ebc9e8ff298..8fd08023c0f5fae351800ab27d30a8a244eabaaa 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -98,7 +98,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg) int ret; unsigned int val; - if (map->cache == REGCACHE_NONE) + if (map->cache_type == REGCACHE_NONE) return false; if (!map->cache_ops) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7548521566223bc76206ab68d67bd73f031113b4..1a87f87c88d03bccd30b7c9ea74966938785ccea 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1166,21 +1166,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file = lo->lo_backing_file; + struct file *file; struct kstat stat; - int error; + int ret; - if (lo->lo_state != Lo_bound) + if (lo->lo_state != Lo_bound) { + mutex_unlock(&lo->lo_ctl_mutex); return -ENXIO; - error = vfs_getattr(&file->f_path, &stat, - STATX_INO, AT_STATX_SYNC_AS_STAT); - if (error) - return error; + } + memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; - info->lo_device = huge_encode_dev(stat.dev); - info->lo_inode = stat.ino; - info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; @@ -1193,7 +1189,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } - return 0; + + /* Drop lo_ctl_mutex while we call into the filesystem. */ + file = get_file(lo->lo_backing_file); + mutex_unlock(&lo->lo_ctl_mutex); + ret = vfs_getattr(&file->f_path, &stat, STATX_INO, + AT_STATX_SYNC_AS_STAT); + if (!ret) { + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(stat.rdev); + } + fput(file); + return ret; } static void @@ -1274,12 +1282,13 @@ static int loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); if (!err && copy_to_user(arg, &info, sizeof(info))) @@ -1291,12 +1300,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { static int loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; @@ -1373,7 +1383,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1382,7 +1393,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1515,12 +1527,13 @@ loop_get_status_compat(struct loop_device *lo, struct compat_loop_info __user *arg) { struct loop_info64 info64; - int err = 0; + int err; - if (!arg) - err = -EINVAL; - if (!err) - err = loop_get_status(lo, &info64); + if (!arg) { + mutex_unlock(&lo->lo_ctl_mutex); + return -EINVAL; + } + err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); return err; @@ -1543,7 +1556,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, mutex_lock(&lo->lo_ctl_mutex); err = loop_get_status_compat( lo, (struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + /* loop_get_status() unlocks lo_ctl_mutex */ break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5f2a4240a204d54fc6fe87e569dc6165d5190530..86258b00a1d4d1960a67fb021babc668884a72a7 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1591,7 +1591,7 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (new_index < 0) { mutex_unlock(&nbd_index_mutex); printk(KERN_ERR "nbd: failed to add new device\n"); - return ret; + return new_index; } nbd = idr_find(&nbd_index_idr, new_index); } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69dfa1d3f45353662f96dfa7357f35cde562b93a..f01d4a8a783ace1d03157d64c419e289d62f8920 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -68,6 +68,7 @@ enum nullb_device_flags { NULLB_DEV_FL_CACHE = 3, }; +#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * @@ -82,10 +83,10 @@ enum nullb_device_flags { */ struct nullb_page { struct page *page; - unsigned long bitmap; + DECLARE_BITMAP(bitmap, MAP_SZ); }; -#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) -#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) +#define NULLB_PAGE_LOCK (MAP_SZ - 1) +#define NULLB_PAGE_FREE (MAP_SZ - 2) struct nullb_device { struct nullb *nullb; @@ -725,7 +726,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) if (!t_page->page) goto out_freepage; - t_page->bitmap = 0; + memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; out_freepage: kfree(t_page); @@ -735,13 +736,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) static void null_free_page(struct nullb_page *t_page) { - __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); - if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) + __set_bit(NULLB_PAGE_FREE, t_page->bitmap); + if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } +static bool null_page_empty(struct nullb_page *page) +{ + int size = MAP_SZ - 2; + + return find_first_bit(page->bitmap, size) == size; +} + static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { @@ -756,9 +764,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, t_page = radix_tree_lookup(root, idx); if (t_page) { - __clear_bit(sector_bit, &t_page->bitmap); + __clear_bit(sector_bit, t_page->bitmap); - if (!t_page->bitmap) { + if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); @@ -829,7 +837,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb, t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->index != idx); - if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) + if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; @@ -892,10 +900,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); - __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); - if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { + __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); + if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); - if (t_page && t_page->bitmap == 0) { + if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); @@ -911,11 +919,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { - if (test_bit(i, &c_page->bitmap)) { + if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); - __set_bit(i, &t_page->bitmap); + __set_bit(i, t_page->bitmap); } } @@ -952,10 +960,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n) * We found the page which is being flushed to disk by other * threads */ - if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) + if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else - __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); + __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; @@ -1008,7 +1016,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source, kunmap_atomic(dst); kunmap_atomic(src); - __set_bit(sector & SECTOR_MASK, &t_page->bitmap); + __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); @@ -1922,10 +1930,6 @@ static int __init null_init(void) struct nullb *nullb; struct nullb_device *dev; - /* check for nullb_page.bitmap */ - if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) - return -EINVAL; - if (g_bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7b8c6368beb7920ddc765737f8540141e6d2f0f0..a026211afb51fb904d7d58b27d15e6d5f879b2ab 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; + check_disk_change(bdev); + mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); mutex_unlock(&pcd_mutex); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 891265acb10ec3c0af6a2ddc9e76c14ccc70cfb2..7d23225f79ed3e5975ade384cfde7bdae6097224 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static void blkfront_gather_backend_features(struct blkfront_info *info); +static int negotiate_mq(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_ring_info *rinfo) { @@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, unsigned int i, max_page_order; unsigned int ring_page_order; + if (!info) + return -ENODEV; + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); info->nr_ring_pages = 1 << ring_page_order; + err = negotiate_mq(info); + if (err) + goto destroy_blkring; + for (i = 0; i < info->nr_rings; i++) { struct blkfront_ring_info *rinfo = &info->rinfo[i]; @@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, } info->xbdev = dev; - err = negotiate_mq(info); - if (err) { - kfree(info); - return err; - } mutex_init(&info->mutex); info->vdevice = vdevice; @@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); - err = negotiate_mq(info); - if (err) - return err; - err = talk_to_blkback(dev, info); if (!err) blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7fcc4d7f49091dea22967a7912a689ab09fd9dd6..86d7975afaeb84c20f94ebab1c7ed6cac8cd044c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -370,6 +370,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8723BU Bluetooth devices */ + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, @@ -377,6 +380,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8822BE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c index a5d723f059270219a57304cd4480ba42acc6617f..c2c88d443ec7be52c633c8f655a34f804cda30be 100644 --- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c @@ -21,14 +21,12 @@ #include #include #include -#include #include #include #include #include "mhi_qcom.h" struct arch_info { - bool initialized; struct mhi_dev *mhi_dev; struct esoc_desc *esoc_client; struct esoc_client_hook esoc_ops; @@ -82,24 +80,6 @@ static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify) } } -int mhi_arch_platform_init(struct mhi_dev *mhi_dev) -{ - struct arch_info *arch_info = kzalloc(sizeof(*arch_info), GFP_KERNEL); - - if (!arch_info) - return -ENOMEM; - - mhi_dev->arch_info = arch_info; - - return 0; -} - -void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) -{ - kfree(mhi_dev->arch_info); - mhi_dev->arch_info = NULL; -} - static int mhi_arch_esoc_ops_power_on(void *priv, bool mdm_state) { struct mhi_controller *mhi_cntrl = priv; @@ -169,9 +149,16 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) char node[32]; int ret; - if (!arch_info->initialized) { + if (!arch_info) { struct msm_pcie_register_event *reg_event; + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u", mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, mhi_cntrl->slot); @@ -180,7 +167,8 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) mhi_cntrl->log_lvl = mhi_ipc_log_lvl; /* register bus scale */ - arch_info->msm_bus_pdata = msm_bus_cl_get_pdata(mhi_dev->pdev); + arch_info->msm_bus_pdata = msm_bus_cl_get_pdata_from_dev( + &mhi_dev->pci_dev->dev); if (!arch_info->msm_bus_pdata) return -EINVAL; @@ -200,7 +188,7 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) MHI_LOG("Failed to reg. for link up notification\n"); arch_info->esoc_client = devm_register_esoc_client( - &mhi_dev->pdev->dev, "mdm"); + &mhi_dev->pci_dev->dev, "mdm"); if (IS_ERR_OR_NULL(arch_info->esoc_client)) { MHI_ERR("Failed to register esoc client\n"); } else { @@ -224,8 +212,6 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) /* save reference state for pcie config space */ arch_info->ref_pcie_state = pci_store_saved_state( mhi_dev->pci_dev); - - arch_info->initialized = true; } return mhi_arch_set_bus_request(mhi_cntrl, 1); diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c index 642981a7f7aeb2b67d47bf6b7eee7f91ad3a3f2e..f97833224b727f2c6cdf4cac545b79f5ee13c40c 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_qcom.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -341,144 +340,111 @@ DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, mhi_debugfs_trigger_m3, "%llu\n"); -static int mhi_init_debugfs_trigger_go(void *data, u64 val) +static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) { - struct mhi_controller *mhi_cntrl = data; + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + struct device_node *of_node = pci_dev->dev.of_node; + bool use_bb; + u64 addr_win[2]; + int ret; - MHI_LOG("Trigger power up sequence\n"); + if (!of_node) + return ERR_PTR(-ENODEV); - mhi_async_power_up(mhi_cntrl); + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) + return ERR_PTR(-ENOMEM); - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL, - mhi_init_debugfs_trigger_go, "%llu\n"); + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); -int mhi_init_debugfs_debug_show(struct seq_file *m, void *d) -{ - seq_puts(m, "Enable debug mode to debug external soc\n"); - seq_puts(m, - "Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n"); - seq_puts(m, "No spaces between parameters\n"); - seq_puts(m, "\t1. devid : 0 or pci device id to register\n"); - seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n"); - seq_puts(m, "\t3. domain: Rootcomplex\n"); - seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n"); - seq_puts(m, "\t\t- BIT0: ATTACH\n"); - seq_puts(m, "\t\t- BIT1: S1 BYPASS\n"); - seq_puts(m, "\t\t-BIT2: FAST_MAP\n"); - seq_puts(m, "\t\t-BIT3: ATOMIC\n"); - seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n"); - seq_puts(m, "\t\t-BIT5: GEOMETRY\n"); - seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n"); - seq_puts(m, "Examples inputs: '0x307,10000'\n"); - seq_puts(m, "\techo '0,10000,1'\n"); - seq_puts(m, "\techo '0x307,10000,0,0x3d'\n"); - seq_puts(m, "firmware image name will be changed to debug.mbn\n"); + ret = of_property_read_u32(of_node, "qcom,smmu-cfg", + &mhi_dev->smmu_cfg); + if (ret) + goto error_register; - return 0; -} + use_bb = of_property_read_bool(of_node, "mhi,use-bb"); -static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file) -{ - return single_open(file, mhi_init_debugfs_debug_show, NULL); -} + /* + * if s1 translation enabled or using bounce buffer pull iova addr + * from dt + */ + if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) { + ret = of_property_count_elems_of_size(of_node, "qcom,addr-win", + sizeof(addr_win)); + if (ret != 1) + goto error_register; + ret = of_property_read_u64_array(of_node, "qcom,addr-win", + addr_win, 2); + if (ret) + goto error_register; + } else { + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + } -static ssize_t mhi_init_debugfs_debug_write(struct file *fp, - const char __user *ubuf, - size_t count, - loff_t *pos) -{ - char *buf = kmalloc(count + 1, GFP_KERNEL); - /* #,devid,timeout,domain,smmu-cfg */ - int args[5] = {0}; - static char const *dbg_fw = "debug.mbn"; - int ret; - struct mhi_controller *mhi_cntrl = fp->f_inode->i_private; - struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); - struct pci_device_id *id; + mhi_dev->iova_start = addr_win[0]; + mhi_dev->iova_stop = addr_win[1]; - if (!buf) - return -ENOMEM; + /* + * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low + * level mapping api to map buffers outside of smmu domain + */ + if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) + mhi_cntrl->iova_start = 0; + else + mhi_cntrl->iova_start = addr_win[0]; + + mhi_cntrl->iova_stop = mhi_dev->iova_stop; + mhi_cntrl->of_node = of_node; - ret = copy_from_user(buf, ubuf, count); + mhi_dev->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->link_status = mhi_link_status; + + ret = of_register_mhi_controller(mhi_cntrl); if (ret) - goto error_read; - buf[count] = 0; - get_options(buf, ARRAY_SIZE(args), args); - kfree(buf); - - /* override default parameters */ - mhi_cntrl->fw_image = dbg_fw; - mhi_cntrl->edl_image = dbg_fw; - - if (args[0] >= 2 && args[2]) - mhi_cntrl->timeout_ms = args[2]; - - if (args[0] >= 3 && args[3]) - mhi_cntrl->domain = args[3]; - - if (args[0] >= 4 && args[4]) - mhi_dev->smmu_cfg = args[4]; - - /* If it's a new device id register it */ - if (args[0] && args[1]) { - /* find the debug_id and overwrite it */ - for (id = mhi_pcie_device_id; id->vendor; id++) - if (id->device == MHI_PCIE_DEBUG_ID) { - id->device = args[1]; - pci_unregister_driver(&mhi_pcie_driver); - ret = pci_register_driver(&mhi_pcie_driver); - } - } + goto error_register; - mhi_dev->debug_mode = true; - debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl, - &mhi_init_debugfs_trigger_go_fops); - pr_info( - "%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n", - __func__, ret, args[1], mhi_dev->smmu_cfg, - mhi_cntrl->timeout_ms); - return count; - -error_read: - kfree(buf); - return ret; -} + return mhi_cntrl; -static const struct file_operations debugfs_debug_ops = { - .open = mhi_init_debugfs_debug_open, - .release = single_release, - .read = seq_read, - .write = mhi_init_debugfs_debug_write, -}; +error_register: + mhi_free_controller(mhi_cntrl); + + return ERR_PTR(-EINVAL); +} int mhi_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *device_id) { - struct mhi_controller *mhi_cntrl = NULL; + struct mhi_controller *mhi_cntrl; u32 domain = pci_domain_nr(pci_dev->bus); u32 bus = pci_dev->bus->number; - /* first match to exact DT node, if not match to any free DT */ - u32 dev_id[] = {pci_dev->device, PCI_ANY_ID}; + u32 dev_id = pci_dev->device; u32 slot = PCI_SLOT(pci_dev->devfn); struct mhi_dev *mhi_dev; - int i, ret; - - /* find a matching controller */ - for (i = 0; i < ARRAY_SIZE(dev_id); i++) { - mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id[i]); - if (mhi_cntrl) - break; - } + int ret; + /* see if we already registered */ + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); if (!mhi_cntrl) - return -EPROBE_DEFER; + mhi_cntrl = mhi_register_controller(pci_dev); + + if (IS_ERR(mhi_cntrl)) + return PTR_ERR(mhi_cntrl); - mhi_cntrl->dev_id = pci_dev->device; mhi_dev = mhi_controller_get_devdata(mhi_cntrl); - mhi_dev->pci_dev = pci_dev; mhi_dev->powered_on = true; ret = mhi_arch_pcie_init(mhi_cntrl); @@ -493,12 +459,10 @@ int mhi_pci_probe(struct pci_dev *pci_dev, if (ret) goto error_init_pci; - /* start power up sequence if not in debug mode */ - if (!mhi_dev->debug_mode) { - ret = mhi_async_power_up(mhi_cntrl); - if (ret) - goto error_power_up; - } + /* start power up sequence */ + ret = mhi_async_power_up(mhi_cntrl); + if (ret) + goto error_power_up; pm_runtime_mark_last_busy(&pci_dev->dev); pm_runtime_allow(&pci_dev->dev); @@ -526,124 +490,6 @@ int mhi_pci_probe(struct pci_dev *pci_dev, return ret; } -static const struct of_device_id mhi_plat_match[] = { - { .compatible = "qcom,mhi" }, - {}, -}; - -static int mhi_platform_probe(struct platform_device *pdev) -{ - struct mhi_controller *mhi_cntrl; - struct mhi_dev *mhi_dev; - struct device_node *of_node = pdev->dev.of_node; - u64 addr_win[2]; - int ret; - - if (!of_node) - return -ENODEV; - - mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); - if (!mhi_cntrl) - return -ENOMEM; - - mhi_dev = mhi_controller_get_devdata(mhi_cntrl); - - /* get pci bus topology for this node */ - ret = of_property_read_u32(of_node, "qcom,pci-dev-id", - &mhi_cntrl->dev_id); - if (ret) - mhi_cntrl->dev_id = PCI_ANY_ID; - - ret = of_property_read_u32(of_node, "qcom,pci-domain", - &mhi_cntrl->domain); - if (ret) - goto error_probe; - - ret = of_property_read_u32(of_node, "qcom,pci-bus", &mhi_cntrl->bus); - if (ret) - goto error_probe; - - ret = of_property_read_u32(of_node, "qcom,pci-slot", &mhi_cntrl->slot); - if (ret) - goto error_probe; - - ret = of_property_read_u32(of_node, "qcom,smmu-cfg", - &mhi_dev->smmu_cfg); - if (ret) - goto error_probe; - - /* if s1 translation enabled pull iova addr from dt */ - if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && - !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) { - ret = of_property_count_elems_of_size(of_node, "qcom,addr-win", - sizeof(addr_win)); - if (ret != 1) - goto error_probe; - ret = of_property_read_u64_array(of_node, "qcom,addr-win", - addr_win, 2); - if (ret) - goto error_probe; - } else { - addr_win[0] = memblock_start_of_DRAM(); - addr_win[1] = memblock_end_of_DRAM(); - } - - mhi_dev->iova_start = addr_win[0]; - mhi_dev->iova_stop = addr_win[1]; - - /* - * if S1 is enabled, set MHI_CTRL start address to 0 so we can use low - * level mapping api to map buffers outside of smmu domain - */ - if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && - !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) - mhi_cntrl->iova_start = 0; - else - mhi_cntrl->iova_start = addr_win[0]; - - mhi_cntrl->iova_stop = mhi_dev->iova_stop; - mhi_cntrl->of_node = of_node; - - /* setup power management apis */ - mhi_cntrl->status_cb = mhi_status_cb; - mhi_cntrl->runtime_get = mhi_runtime_get; - mhi_cntrl->runtime_put = mhi_runtime_put; - mhi_cntrl->link_status = mhi_link_status; - - mhi_dev->pdev = pdev; - - ret = mhi_arch_platform_init(mhi_dev); - if (ret) - goto error_probe; - - ret = of_register_mhi_controller(mhi_cntrl); - if (ret) - goto error_register; - - if (mhi_cntrl->parent) - debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent, - mhi_cntrl, &debugfs_debug_ops); - - return 0; - -error_register: - mhi_arch_platform_deinit(mhi_dev); - -error_probe: - mhi_free_controller(mhi_cntrl); - - return -EINVAL; -}; - -static struct platform_driver mhi_platform_driver = { - .probe = mhi_platform_probe, - .driver = { - .name = "mhi", - .owner = THIS_MODULE, - .of_match_table = mhi_plat_match, - }, -}; - static const struct dev_pm_ops pm_ops = { SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, @@ -660,26 +506,7 @@ static struct pci_driver mhi_pcie_driver = { } }; -static int __init mhi_init(void) -{ - int ret; - - ret = platform_driver_register(&mhi_platform_driver); - if (ret) - return ret; - - ret = pci_register_driver(&mhi_pcie_driver); - if (ret) - goto pci_reg_error; - - return ret; - -pci_reg_error: - platform_driver_unregister(&mhi_platform_driver); - - return ret; -}; -module_init(mhi_init); +module_pci_driver(mhi_pcie_driver); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("MHI_CORE"); diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h index 252f83662fb94d3c1914f0b3634bfae9bf512e7b..1d06dfeeb1383daeda3955f7454078d1af152183 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.h +++ b/drivers/bus/mhi/controllers/mhi_qcom.h @@ -25,13 +25,11 @@ #define MHI_PCI_BAR_NUM (0) struct mhi_dev { - struct platform_device *pdev; struct pci_dev *pci_dev; u32 smmu_cfg; int resn; void *arch_info; bool powered_on; - bool debug_mode; dma_addr_t iova_start; dma_addr_t iova_stop; }; @@ -44,8 +42,6 @@ int mhi_pci_probe(struct pci_dev *pci_dev, int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl); void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl); -int mhi_arch_platform_init(struct mhi_dev *mhi_dev); -void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev); int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl); void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl); int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful); @@ -75,15 +71,6 @@ static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) { } -static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) -{ - return 0; -} - -static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) -{ -} - static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful) { diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c index a70ec2553ba02a91107199d3a005952eb72d2b8e..af57b942a315cf3c082e319c5e38e6dd913a036c 100644 --- a/drivers/bus/mhi/core/mhi_dtr.c +++ b/drivers/bus/mhi/core/mhi_dtr.c @@ -34,18 +34,30 @@ struct __packed dtr_ctrl_msg { #define CTRL_MAGIC (0x4C525443) #define CTRL_MSG_DTR BIT(0) -#define CTRL_MSG_ID (0x10) +#define CTRL_MSG_RTS BIT(1) +#define CTRL_MSG_DCD BIT(0) +#define CTRL_MSG_DSR BIT(1) +#define CTRL_MSG_RI BIT(2) +#define CTRL_HOST_STATE (0x10) +#define CTRL_DEVICE_STATE (0x11) +#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF) static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, + struct mhi_device *mhi_dev, u32 tiocm) { struct dtr_ctrl_msg *dtr_msg = NULL; struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + spinlock_t *res_lock = &mhi_dev->dev.devres_lock; + u32 cur_tiocm; int ret = 0; - tiocm &= TIOCM_DTR; - if (mhi_chan->tiocm == tiocm) + cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + tiocm &= (TIOCM_DTR | TIOCM_RTS); + + /* state did not changed */ + if (cur_tiocm == tiocm) return 0; mutex_lock(&dtr_chan->mutex); @@ -57,11 +69,13 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, } dtr_msg->preamble = CTRL_MAGIC; - dtr_msg->msg_id = CTRL_MSG_ID; - dtr_msg->dest_id = mhi_chan->chan; + dtr_msg->msg_id = CTRL_HOST_STATE; + dtr_msg->dest_id = mhi_dev->ul_chan_id; dtr_msg->size = sizeof(u32); if (tiocm & TIOCM_DTR) dtr_msg->msg |= CTRL_MSG_DTR; + if (tiocm & TIOCM_RTS) + dtr_msg->msg |= CTRL_MSG_RTS; reinit_completion(&dtr_chan->completion); ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, @@ -71,7 +85,6 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, ret = wait_for_completion_timeout(&dtr_chan->completion, msecs_to_jiffies(mhi_cntrl->timeout_ms)); - if (!ret) { MHI_ERR("Failed to receive transfer callback\n"); ret = -EIO; @@ -79,7 +92,10 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, } ret = 0; - mhi_chan->tiocm = tiocm; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS); + mhi_dev->tiocm |= tiocm; + spin_unlock_irq(res_lock); tiocm_exit: kfree(dtr_msg); @@ -91,7 +107,6 @@ static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) { struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = mhi_dev->ul_chan; int ret; /* ioctl not supported by this controller */ @@ -100,7 +115,7 @@ long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) switch (cmd) { case TIOCMGET: - return mhi_chan->tiocm; + return mhi_dev->tiocm; case TIOCMSET: { u32 tiocm; @@ -109,7 +124,7 @@ long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) if (ret) return ret; - return mhi_dtr_tiocmset(mhi_cntrl, mhi_chan, tiocm); + return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm); } default: break; @@ -122,6 +137,42 @@ EXPORT_SYMBOL(mhi_ioctl); static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) { + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr; + u32 chan; + spinlock_t *res_lock; + + if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) { + MHI_ERR("Unexpected length %zu received\n", + mhi_result->bytes_xferd); + return; + } + + MHI_VERB("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n", + dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id, + dtr_msg->msg); + + chan = CTRL_GET_CHID(dtr_msg); + if (chan >= mhi_cntrl->max_chan) + return; + + mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev; + if (!mhi_dev) + return; + + res_lock = &mhi_dev->dev.devres_lock; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + if (dtr_msg->msg & CTRL_MSG_DCD) + mhi_dev->tiocm |= TIOCM_CD; + + if (dtr_msg->msg & CTRL_MSG_DSR) + mhi_dev->tiocm |= TIOCM_DSR; + + if (dtr_msg->msg & CTRL_MSG_RI) + mhi_dev->tiocm |= TIOCM_RI; + spin_unlock_irq(res_lock); } static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev, diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index b30143823631a0b9f098fbc997d4d5c10f86a6d4..cd455571b599e17173b47bd2aa32bf2a8dc25197 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -414,6 +414,77 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) return ret; } +int mhi_init_timesync(struct mhi_controller *mhi_cntrl) +{ + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + u32 time_offset, db_offset; + int ret; + + reinit_completion(&mhi_tsync->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG); + if (ret) { + MHI_ERR("Failed to send time sync cfg cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for time_sync_cfg\n"); + ret = -EIO; + goto error_send_cmd; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + + ret = -EIO; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_sync_cap; + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) { + MHI_ERR("could not find timesync capability\n"); + goto error_sync_cap; + } + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + time_offset + TIMESYNC_DB_OFFSET, &db_offset); + if (ret) + goto error_sync_cap; + + MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset); + + mhi_tsync->db = mhi_cntrl->regs + db_offset; + +error_sync_cap: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; + +error_send_cmd: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + int mhi_init_mmio(struct mhi_controller *mhi_cntrl) { u32 val; @@ -669,148 +740,180 @@ int mhi_device_configure(struct mhi_device *mhi_dev, static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, struct device_node *of_node) { - struct { - u32 ev_cfg[MHI_EV_CFG_MAX]; - } *ev_cfg; - int num, i, ret; + int i, ret, num = 0; struct mhi_event *mhi_event; - u32 bit_cfg; + struct device_node *child; - num = of_property_count_elems_of_size(of_node, "mhi,ev-cfg", - sizeof(*ev_cfg)); - if (num <= 0) - return -EINVAL; + for_each_available_child_of_node(of_node, child) { + if (!strcmp(child->name, "mhi_event")) + num++; + } - ev_cfg = kcalloc(num, sizeof(*ev_cfg), GFP_KERNEL); - if (!ev_cfg) - return -ENOMEM; + if (!num) + return -EINVAL; mhi_cntrl->total_ev_rings = num; mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), GFP_KERNEL); - if (!mhi_cntrl->mhi_event) { - kfree(ev_cfg); + if (!mhi_cntrl->mhi_event) return -ENOMEM; - } - - ret = of_property_read_u32_array(of_node, "mhi,ev-cfg", (u32 *)ev_cfg, - num * sizeof(*ev_cfg) / sizeof(u32)); - if (ret) - goto error_ev_cfg; /* populate ev ring */ mhi_event = mhi_cntrl->mhi_event; - for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { - mhi_event->er_index = i; - mhi_event->ring.elements = - ev_cfg[i].ev_cfg[MHI_EV_CFG_ELEMENTS]; - mhi_event->intmod = ev_cfg[i].ev_cfg[MHI_EV_CFG_INTMOD]; - mhi_event->msi = ev_cfg[i].ev_cfg[MHI_EV_CFG_MSI]; - mhi_event->chan = ev_cfg[i].ev_cfg[MHI_EV_CFG_CHAN]; - if (mhi_event->chan >= mhi_cntrl->max_chan) + i = 0; + for_each_available_child_of_node(of_node, child) { + if (strcmp(child->name, "mhi_event")) + continue; + + mhi_event->er_index = i++; + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_event->ring.elements); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,intmod", + &mhi_event->intmod); + if (ret) goto error_ev_cfg; - /* this event ring has a dedicated channel */ - if (mhi_event->chan) + ret = of_property_read_u32(child, "mhi,msi", + &mhi_event->msi); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,chan", + &mhi_event->chan); + if (!ret) { + if (mhi_event->chan >= mhi_cntrl->max_chan) + goto error_ev_cfg; + /* this event ring has a dedicated channel */ mhi_event->mhi_chan = &mhi_cntrl->mhi_chan[mhi_event->chan]; + } - mhi_event->priority = ev_cfg[i].ev_cfg[MHI_EV_CFG_PRIORITY]; - mhi_event->db_cfg.brstmode = - ev_cfg[i].ev_cfg[MHI_EV_CFG_BRSTMODE]; - if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + ret = of_property_read_u32(child, "mhi,priority", + &mhi_event->priority); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,brstmode", + &mhi_event->db_cfg.brstmode); + if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) goto error_ev_cfg; mhi_event->db_cfg.process_db = (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? mhi_db_brstmode : mhi_db_brstmode_disable; - bit_cfg = ev_cfg[i].ev_cfg[MHI_EV_CFG_BITCFG]; - if (bit_cfg & MHI_EV_CFG_BIT_HW_EV) { - mhi_event->hw_ring = true; + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_event->data_type); + if (ret) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; + + if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX) + goto error_ev_cfg; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev"); + if (mhi_event->hw_ring) mhi_cntrl->hw_ev_rings++; - } else + else mhi_cntrl->sw_ev_rings++; - - mhi_event->cl_manage = !!(bit_cfg & MHI_EV_CFG_BIT_CL_MANAGE); - mhi_event->offload_ev = !!(bit_cfg & MHI_EV_CFG_BIT_OFFLOAD_EV); - mhi_event->ctrl_ev = !!(bit_cfg & MHI_EV_CFG_BIT_CTRL_EV); + mhi_event->cl_manage = of_property_read_bool(child, + "mhi,client-manage"); + mhi_event->offload_ev = of_property_read_bool(child, + "mhi,offload"); + mhi_event++; } - kfree(ev_cfg); - /* we need msi for each event ring + additional one for BHI */ mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; return 0; - error_ev_cfg: - kfree(ev_cfg); +error_ev_cfg: + kfree(mhi_cntrl->mhi_event); return -EINVAL; } static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, struct device_node *of_node) { - int num, i, ret; - struct { - u32 chan_cfg[MHI_CH_CFG_MAX]; - } *chan_cfg; + int ret; + struct device_node *child; + u32 chan; ret = of_property_read_u32(of_node, "mhi,max-channels", &mhi_cntrl->max_chan); if (ret) return ret; - num = of_property_count_elems_of_size(of_node, "mhi,chan-cfg", - sizeof(*chan_cfg)); - if (num <= 0 || num >= mhi_cntrl->max_chan) - return -EINVAL; - if (of_property_count_strings(of_node, "mhi,chan-names") != num) - return -EINVAL; mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); if (!mhi_cntrl->mhi_chan) return -ENOMEM; - chan_cfg = kcalloc(num, sizeof(*chan_cfg), GFP_KERNEL); - if (!chan_cfg) { - kfree(mhi_cntrl->mhi_chan); - return -ENOMEM; - } - - ret = of_property_read_u32_array(of_node, "mhi,chan-cfg", - (u32 *)chan_cfg, - num * sizeof(*chan_cfg) / sizeof(u32)); - if (ret) - goto error_chan_cfg; INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); /* populate channel configurations */ - for (i = 0; i < num; i++) { + for_each_available_child_of_node(of_node, child) { struct mhi_chan *mhi_chan; - int chan = chan_cfg[i].chan_cfg[MHI_CH_CFG_CHAN_ID]; - u32 bit_cfg = chan_cfg[i].chan_cfg[MHI_CH_CFG_BITCFG]; - if (chan >= mhi_cntrl->max_chan) + if (strcmp(child->name, "mhi_chan")) + continue; + + ret = of_property_read_u32(child, "reg", &chan); + if (ret || chan >= mhi_cntrl->max_chan) goto error_chan_cfg; mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + ret = of_property_read_string(child, "label", + &mhi_chan->name); + if (ret) + goto error_chan_cfg; + mhi_chan->chan = chan; - mhi_chan->buf_ring.elements = - chan_cfg[i].chan_cfg[MHI_CH_CFG_ELEMENTS]; + + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_chan->buf_ring.elements); + if (!ret && !mhi_chan->buf_ring.elements) + goto error_chan_cfg; + mhi_chan->tre_ring.elements = mhi_chan->buf_ring.elements; - mhi_chan->er_index = chan_cfg[i].chan_cfg[MHI_CH_CFG_ER_INDEX]; - mhi_chan->dir = chan_cfg[i].chan_cfg[MHI_CH_CFG_DIRECTION]; - mhi_chan->db_cfg.pollcfg = - chan_cfg[i].chan_cfg[MHI_CH_CFG_POLLCFG]; - mhi_chan->ee = chan_cfg[i].chan_cfg[MHI_CH_CFG_EE]; - if (mhi_chan->ee >= MHI_EE_MAX_SUPPORTED) + ret = of_property_read_u32(child, "mhi,event-ring", + &mhi_chan->er_index); + if (ret) + goto error_chan_cfg; + + ret = of_property_read_u32(child, "mhi,chan-dir", + &mhi_chan->dir); + if (ret) + goto error_chan_cfg; + + ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee); + if (ret || mhi_chan->ee >= MHI_EE_MAX_SUPPORTED) goto error_chan_cfg; - mhi_chan->xfer_type = - chan_cfg[i].chan_cfg[MHI_CH_CFG_XFER_TYPE]; + of_property_read_u32(child, "mhi,pollcfg", + &mhi_chan->db_cfg.pollcfg); + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_chan->xfer_type); + if (ret) + goto error_chan_cfg; switch (mhi_chan->xfer_type) { case MHI_XFER_BUFFER: @@ -831,12 +934,16 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, goto error_chan_cfg; } - mhi_chan->lpm_notify = !!(bit_cfg & MHI_CH_CFG_BIT_LPM_NOTIFY); - mhi_chan->offload_ch = !!(bit_cfg & MHI_CH_CFG_BIT_OFFLOAD_CH); - mhi_chan->db_cfg.reset_req = - !!(bit_cfg & MHI_CH_CFG_BIT_DBMODE_RESET_CH); - mhi_chan->pre_alloc = !!(bit_cfg & MHI_CH_CFG_BIT_PRE_ALLOC); - mhi_chan->auto_start = !!(bit_cfg & MHI_CH_CFG_BIT_AUTO_START); + mhi_chan->lpm_notify = of_property_read_bool(child, + "mhi,lpm-notify"); + mhi_chan->offload_ch = of_property_read_bool(child, + "mhi,offload-chan"); + mhi_chan->db_cfg.reset_req = of_property_read_bool(child, + "mhi,db-mode-switch"); + mhi_chan->pre_alloc = of_property_read_bool(child, + "mhi,auto-queue"); + mhi_chan->auto_start = of_property_read_bool(child, + "mhi,auto-start"); if (mhi_chan->pre_alloc && (mhi_chan->dir != DMA_FROM_DEVICE || @@ -852,15 +959,11 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, if (mhi_chan->pre_alloc) mhi_chan->queue_xfer = mhi_queue_nop; - ret = of_property_read_string_index(of_node, "mhi,chan-names", - i, &mhi_chan->name); - if (ret) - goto error_chan_cfg; - if (!mhi_chan->offload_ch) { - mhi_chan->db_cfg.brstmode = - chan_cfg[i].chan_cfg[MHI_CH_CFG_BRSTMODE]; - if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + ret = of_property_read_u32(child, "mhi,doorbell-mode", + &mhi_chan->db_cfg.brstmode); + if (ret || + MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) goto error_chan_cfg; mhi_chan->db_cfg.process_db = @@ -868,19 +971,17 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, MHI_BRSTMODE_ENABLE) ? mhi_db_brstmode : mhi_db_brstmode_disable; } + mhi_chan->configured = true; if (mhi_chan->lpm_notify) list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); } - kfree(chan_cfg); - return 0; error_chan_cfg: kfree(mhi_cntrl->mhi_chan); - kfree(chan_cfg); return -EINVAL; } @@ -889,6 +990,7 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, struct device_node *of_node) { int ret; + struct mhi_timesync *mhi_tsync; /* parse firmware image info (optional parameters) */ of_property_read_string(of_node, "mhi,fw-name", &mhi_cntrl->fw_image); @@ -914,9 +1016,41 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, if (ret) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->time_sync = of_property_read_bool(of_node, "mhi,time-sync"); + + if (mhi_cntrl->time_sync) { + mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL); + if (!mhi_tsync) { + ret = -ENOMEM; + goto error_time_sync; + } + + ret = of_property_read_u32(of_node, "mhi,tsync-er", + &mhi_tsync->er_index); + if (ret) + goto error_time_sync; + + if (mhi_tsync->er_index >= mhi_cntrl->total_ev_rings) { + ret = -EINVAL; + goto error_time_sync; + } + + mhi_cntrl->mhi_tsync = mhi_tsync; + } + + mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb"); + ret = of_property_read_u32(of_node, "mhi,buffer-len", + (u32 *)&mhi_cntrl->buffer_len); + if (ret) + mhi_cntrl->buffer_len = MHI_MAX_MTU; + return 0; - error_ev_cfg: +error_time_sync: + kfree(mhi_tsync); + kfree(mhi_cntrl->mhi_event); + +error_ev_cfg: kfree(mhi_cntrl->mhi_chan); return ret; @@ -950,6 +1084,11 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) if (ret) return -EINVAL; + if (mhi_cntrl->time_sync && + (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable || + !mhi_cntrl->lpm_enable)) + return -EINVAL; + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); if (!mhi_cntrl->mhi_cmd) @@ -977,7 +1116,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) mhi_event->mhi_cntrl = mhi_cntrl; spin_lock_init(&mhi_event->lock); - if (mhi_event->ctrl_ev) + if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE) tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, (ulong)mhi_event); else @@ -992,6 +1131,22 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) rwlock_init(&mhi_chan->lock); } + if (mhi_cntrl->mhi_tsync) { + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + init_completion(&mhi_tsync->completion); + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + mhi_cntrl->parent = mhi_bus.dentry; mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; @@ -1015,6 +1170,7 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_tsync); mutex_lock(&mhi_bus.lock); list_del(&mhi_cntrl->node); @@ -1248,6 +1404,10 @@ static int mhi_driver_remove(struct device *dev) mutex_unlock(&mhi_chan->mutex); } + + if (mhi_cntrl->tsync_dev == mhi_dev) + mhi_cntrl->tsync_dev = NULL; + /* relinquish any pending votes */ read_lock_bh(&mhi_cntrl->pm_lock); while (atomic_read(&mhi_dev->dev_wake)) diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index bd26ad6b257cb8558cdf0ca1557706b4ecfa5c9d..df085301cac4424780b53d5821832f7dd51f86d9 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -130,6 +130,30 @@ extern struct bus_type mhi_bus_type; #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x00) +#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK) +#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT) +#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK) +#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT) +#define TIMESYNC_CFG_NUMCMD_MASK (0xFF) +#define TIMESYNC_CFG_NUMCMD_SHIFT (0) +#define TIMESYNC_TIME_LOW_OFFSET (0x4) +#define TIMESYNC_TIME_HIGH_OFFSET (0x8) +#define TIMESYNC_DB_OFFSET (0xC) + +#define TIMESYNC_CAP_ID (2) + /* MHI BHI offfsets */ #define BHI_BHIVERSION_MINOR (0x00) #define BHI_BHIVERSION_MAJOR (0x04) @@ -238,27 +262,44 @@ struct __packed bhi_vec_entry { u64 size; }; +enum mhi_cmd_type { + MHI_CMD_TYPE_NOP = 1, + MHI_CMD_TYPE_RESET = 16, + MHI_CMD_TYPE_STOP = 17, + MHI_CMD_TYPE_START = 18, + MHI_CMD_TYPE_TSYNC = 24, +}; + /* no operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (1 << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16) /* channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | (16 << 16)) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_RESET << 16)) /* channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (17 << 16)) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16)) /* channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | (18 << 16)) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_START << 16)) + +/* time sync cfg command */ +#define MHI_TRE_CMD_TSYNC_CFG_PTR (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \ + (er << 24)) #define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) /* event descriptor macros */ #define MHI_TRE_EV_PTR(ptr) (ptr) @@ -271,7 +312,8 @@ struct __packed bhi_vec_entry { #define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) #define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) - +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) /* transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) (ptr) @@ -280,11 +322,9 @@ struct __packed bhi_vec_entry { | (ieot << 9) | (ieob << 8) | chain) enum MHI_CMD { - MHI_CMD_NOOP = 0x0, - MHI_CMD_RESET_CHAN = 0x1, - MHI_CMD_STOP_CHAN = 0x2, - MHI_CMD_START_CHAN = 0x3, - MHI_CMD_RESUME_CHAN = 0x4, + MHI_CMD_RESET_CHAN, + MHI_CMD_START_CHAN, + MHI_CMD_TIMSYNC_CFG, }; enum MHI_PKT_TYPE { @@ -298,6 +338,7 @@ enum MHI_PKT_TYPE { MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, MHI_PKT_TYPE_TX_EVENT = 0x22, MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, MHI_PKT_TYPE_STALE_EVENT, /* internal event */ }; @@ -323,41 +364,6 @@ enum MHI_CH_STATE { MHI_CH_STATE_ERROR = 0x5, }; -enum MHI_CH_CFG { - MHI_CH_CFG_CHAN_ID = 0, - MHI_CH_CFG_ELEMENTS = 1, - MHI_CH_CFG_ER_INDEX = 2, - MHI_CH_CFG_DIRECTION = 3, - MHI_CH_CFG_BRSTMODE = 4, - MHI_CH_CFG_POLLCFG = 5, - MHI_CH_CFG_EE = 6, - MHI_CH_CFG_XFER_TYPE = 7, - MHI_CH_CFG_BITCFG = 8, - MHI_CH_CFG_MAX -}; - -#define MHI_CH_CFG_BIT_LPM_NOTIFY BIT(0) /* require LPM notification */ -#define MHI_CH_CFG_BIT_OFFLOAD_CH BIT(1) /* satellite mhi devices */ -#define MHI_CH_CFG_BIT_DBMODE_RESET_CH BIT(2) /* require db mode to reset */ -#define MHI_CH_CFG_BIT_PRE_ALLOC BIT(3) /* host allocate buffers for DL */ -#define MHI_CH_CFG_BIT_AUTO_START BIT(4) /* host auto start channels */ - -enum MHI_EV_CFG { - MHI_EV_CFG_ELEMENTS = 0, - MHI_EV_CFG_INTMOD = 1, - MHI_EV_CFG_MSI = 2, - MHI_EV_CFG_CHAN = 3, - MHI_EV_CFG_PRIORITY = 4, - MHI_EV_CFG_BRSTMODE = 5, - MHI_EV_CFG_BITCFG = 6, - MHI_EV_CFG_MAX -}; - -#define MHI_EV_CFG_BIT_HW_EV BIT(0) /* hw event ring */ -#define MHI_EV_CFG_BIT_CL_MANAGE BIT(1) /* client manages the event ring */ -#define MHI_EV_CFG_BIT_OFFLOAD_EV BIT(2) /* satellite driver manges it */ -#define MHI_EV_CFG_BIT_CTRL_EV BIT(3) /* ctrl event ring */ - enum MHI_BRSTMODE { MHI_BRSTMODE_DISABLE = 0x2, MHI_BRSTMODE_ENABLE = 0x3, @@ -441,8 +447,10 @@ enum MHI_PM_STATE { #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) #define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1)) -#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ - MHI_PM_M1 | MHI_PM_M2)) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M1 | MHI_PM_M2)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ MHI_PM_IN_ERROR_STATE(pm_state)) #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ @@ -468,6 +476,13 @@ enum MHI_ER_TYPE { MHI_ER_TYPE_VALID = 0x1, }; +enum mhi_er_data_type { + MHI_ER_DATA_ELEMENT_TYPE, + MHI_ER_CTRL_ELEMENT_TYPE, + MHI_ER_TSYNC_ELEMENT_TYPE, + MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE, +}; + struct db_cfg { bool reset_req; bool db_mode; @@ -521,6 +536,7 @@ struct mhi_cmd { struct mhi_buf_info { dma_addr_t p_addr; void *v_addr; + void *bb_addr; void *wp; size_t len; void *cb_buf; @@ -533,15 +549,18 @@ struct mhi_event { u32 msi; int chan; /* this event ring is dedicated to a channel */ u32 priority; + enum mhi_er_data_type data_type; struct mhi_ring ring; struct db_cfg db_cfg; bool hw_ring; bool cl_manage; bool offload_ev; /* managed by a device driver */ - bool ctrl_ev; spinlock_t lock; struct mhi_chan *mhi_chan; /* dedicated to channel */ struct tasklet_struct task; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); struct mhi_controller *mhi_cntrl; }; @@ -557,7 +576,6 @@ struct mhi_chan { struct mhi_ring tre_ring; u32 er_index; u32 intmod; - u32 tiocm; enum dma_data_direction dir; struct db_cfg db_cfg; enum MHI_EE ee; @@ -583,6 +601,25 @@ struct mhi_chan { struct list_head node; }; +struct tsync_node { + struct list_head node; + u32 sequence; + u64 local_time; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +struct mhi_timesync { + u32 er_index; + void __iomem *db; + enum MHI_EV_CCS ccs; + struct completion completion; + spinlock_t lock; + struct list_head head; +}; + struct mhi_bus { struct list_head controller_list; struct mutex lock; @@ -623,6 +660,15 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum MHI_CMD cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); /* queue transfer buffer */ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, @@ -659,6 +705,9 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum MHI_STATE state); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); +int mhi_init_timesync(struct mhi_controller *mhi_cntrl); /* memory allocation methods */ static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, @@ -694,6 +743,15 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, struct image_info *image_info); +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + /* initialization methods */ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 4695fe7605ebc6e0f6e710e2fdfe9f5a91d2c9e7..e80d5790f15cd206e138de88b940ea965592fd62 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -62,6 +62,40 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, return 0; } +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, + u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset += next_offset; + } while (next_offset); + + return -ENXIO; +} + void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, @@ -264,6 +298,51 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, return (tmp == ring->rp); } +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, + buf_info->len, buf_info->dir); + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, void *buf, @@ -276,6 +355,8 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_ring *buf_ring = &mhi_chan->buf_ring; struct mhi_buf_info *buf_info; struct mhi_tre *mhi_tre; + bool assert_wake = false; + int ret; if (mhi_is_ring_full(mhi_cntrl, tre_ring)) return -ENOMEM; @@ -294,7 +375,17 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); } - mhi_cntrl->wake_get(mhi_cntrl, false); + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } /* generate the tre */ buf_info = buf_ring->wp; @@ -303,10 +394,8 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, buf_info->wp = tre_ring->wp; buf_info->dir = mhi_chan->dir; buf_info->len = len; - buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, len, - buf_info->dir); - - if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) goto map_error; mhi_tre = tre_ring->wp; @@ -328,21 +417,19 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, read_unlock_bh(&mhi_chan->lock); } - if (mhi_chan->dir == DMA_FROM_DEVICE) { - bool override = (mhi_cntrl->pm_state != MHI_PM_M0); - - mhi_cntrl->wake_put(mhi_cntrl, override); - } + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); read_unlock_bh(&mhi_cntrl->pm_lock); return 0; map_error: - mhi_cntrl->wake_put(mhi_cntrl, false); + if (assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); - return -ENOMEM; + return ret; } int mhi_gen_tre(struct mhi_controller *mhi_cntrl, @@ -356,6 +443,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_tre *mhi_tre; struct mhi_buf_info *buf_info; int eot, eob, chain, bei; + int ret; buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -366,11 +454,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, buf_info->wp = tre_ring->wp; buf_info->dir = mhi_chan->dir; buf_info->len = buf_len; - buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf, buf_len, - buf_info->dir); - if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) - return -ENOMEM; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; eob = !!(flags & MHI_EOB); eot = !!(flags & MHI_EOT); @@ -402,13 +489,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ring *tre_ring; unsigned long flags; + bool assert_wake = false; int ret; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { MHI_ERR("MHI is not in active state, pm_state:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state)); - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return -EIO; } @@ -419,18 +510,27 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); } - mhi_cntrl->wake_get(mhi_cntrl, false); - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); - tre_ring = &mhi_chan->tre_ring; if (mhi_is_ring_full(mhi_cntrl, tre_ring)) - goto error_queue; + return -ENOMEM; ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); if (unlikely(ret)) - goto error_queue; + return ret; read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { unsigned long flags; @@ -439,22 +539,12 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, read_unlock_irqrestore(&mhi_chan->lock, flags); } - if (mhi_chan->dir == DMA_FROM_DEVICE) { - bool override = (mhi_cntrl->pm_state != MHI_PM_M0); - - mhi_cntrl->wake_put(mhi_cntrl, override); - } + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return 0; - -error_queue: - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - mhi_cntrl->wake_put(mhi_cntrl, false); - read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); - - return -ENOMEM; } /* destroy specific device */ @@ -491,16 +581,76 @@ void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) mhi_drv->status_cb(mhi_dev, cb_reason); } +static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + struct device_node *controller, *node; + const char *dt_name; + int ret; + + controller = mhi_cntrl->of_node; + for_each_available_child_of_node(controller, node) { + ret = of_property_read_string(node, "mhi,chan", &dt_name); + if (ret) + continue; + if (!strcmp(mhi_dev->chan_name, dt_name)) { + mhi_dev->dev.of_node = node; + break; + } + } +} + +static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int ret; + + if (!mhi_tsync || !mhi_tsync->db) + return; + + if (mhi_cntrl->ee != MHI_EE_AMSS) + return; + + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_TIMESYNC_TYPE; + mhi_dev->chan_name = "TIME_SYNC"; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_cntrl->tsync_dev = mhi_dev; +} + /* bind mhi channels into mhi devices */ void mhi_create_devices(struct mhi_controller *mhi_cntrl) { int i; struct mhi_chan *mhi_chan; struct mhi_device *mhi_dev; - struct device_node *controller, *node; - const char *dt_name; int ret; + /* + * we need to create time sync device before creating other + * devices, because client may try to capture time during + * clint probe. + */ + mhi_create_time_sync_dev(mhi_cntrl); + mhi_chan = mhi_cntrl->mhi_chan; for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { if (!mhi_chan->configured || mhi_chan->ee != mhi_cntrl->ee) @@ -509,6 +659,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) if (!mhi_dev) return; + mhi_dev->dev_type = MHI_XFER_TYPE; switch (mhi_chan->dir) { case DMA_TO_DEVICE: mhi_dev->ul_chan = mhi_chan; @@ -557,15 +708,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) mhi_dev->slot, mhi_dev->chan_name); /* add if there is a matching DT node */ - controller = mhi_cntrl->of_node; - for_each_available_child_of_node(controller, node) { - ret = of_property_read_string(node, "mhi,chan", - &dt_name); - if (ret) - continue; - if (!strcmp(mhi_dev->chan_name, dt_name)) - mhi_dev->dev.of_node = node; - } + mhi_assign_of_node(mhi_cntrl, mhi_dev); ret = device_add(&mhi_dev->dev); if (ret) { @@ -637,8 +780,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, else xfer_len = buf_info->len; - dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, - buf_info->len, buf_info->dir); + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); result.buf_addr = buf_info->cb_buf; result.bytes_xferd = xfer_len; @@ -706,9 +848,44 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, return 0; } -static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, - struct mhi_event *mhi_event, - u32 event_quota) +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + struct mhi_timesync *mhi_tsync; + enum mhi_cmd_type type; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion"); + + type = MHI_TRE_GET_CMD_TYPE(cmd_pkt); + + if (type == MHI_CMD_TYPE_TSYNC) { + mhi_tsync = mhi_cntrl->mhi_tsync; + mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_tsync->completion); + } else { + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) { struct mhi_tre *dev_rp, *local_rp; struct mhi_ring *ev_ring = &mhi_event->ring; @@ -716,38 +893,27 @@ static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; int count = 0; - read_lock_bh(&mhi_cntrl->pm_lock); + /* + * this is a quick check to avoid unnecessary event processing + * in case we already in error state, but it's still possible + * to transition to error state while processing events + */ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { MHI_ERR("No EV access, PM_STATE:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state)); - read_unlock_bh(&mhi_cntrl->pm_lock); return -EIO; } - mhi_cntrl->wake_get(mhi_cntrl, false); - read_unlock_bh(&mhi_cntrl->pm_lock); - dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); local_rp = ev_ring->rp; - while (dev_rp != local_rp && event_quota > 0) { + while (dev_rp != local_rp) { enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); switch (type) { - case MHI_PKT_TYPE_TX_EVENT: - { - u32 chan; - struct mhi_chan *mhi_chan; - - chan = MHI_TRE_GET_EV_CHID(local_rp); - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; - break; - } case MHI_PKT_TYPE_STATE_CHANGE_EVENT: { enum MHI_STATE new_state; @@ -789,31 +955,8 @@ static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, break; } case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: - { - dma_addr_t ptr = MHI_TRE_GET_EV_PTR(local_rp); - struct mhi_cmd *cmd_ring = - &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; - struct mhi_ring *mhi_ring = &cmd_ring->ring; - struct mhi_tre *cmd_pkt; - struct mhi_chan *mhi_chan; - u32 chan; - - cmd_pkt = mhi_to_virtual(mhi_ring, ptr); - - /* out of order completion received */ - MHI_ASSERT(cmd_pkt != mhi_ring->rp, - "Out of order cmd completion"); - - chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); - - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - write_lock_bh(&mhi_chan->lock); - mhi_chan->ccs = MHI_TRE_GET_EV_CODE(local_rp); - complete(&mhi_chan->completion); - write_unlock_bh(&mhi_chan->lock); - mhi_del_ring_element(mhi_cntrl, mhi_ring); + mhi_process_cmd_completion(mhi_cntrl, local_rp); break; - } case MHI_PKT_TYPE_EE_EVENT: { enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; @@ -848,12 +991,8 @@ static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, break; } - case MHI_PKT_TYPE_STALE_EVENT: - MHI_VERB("Stale Event received for chan:%u\n", - MHI_TRE_GET_EV_CHID(local_rp)); - break; default: - MHI_ERR("Unsupported packet type code 0x%x\n", type); + MHI_ASSERT(1, "Unsupported ev type"); break; } @@ -862,13 +1001,141 @@ static int mhi_process_event_ring(struct mhi_controller *mhi_cntrl, dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); count++; } + read_lock_bh(&mhi_cntrl->pm_lock); if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) mhi_ring_er_db(mhi_event); - mhi_cntrl->wake_put(mhi_cntrl, false); read_unlock_bh(&mhi_cntrl->pm_lock); MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int count = 0; + u32 sequence; + u64 remote_time; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + struct tsync_node *tsync_node; + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + + sequence = MHI_TRE_GET_EV_SEQ(local_rp); + remote_time = MHI_TRE_GET_EV_TIME(local_rp); + + do { + spin_lock_irq(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + MHI_ASSERT(!tsync_node, "Unexpected Event"); + + if (unlikely(!tsync_node)) + break; + + list_del(&tsync_node->node); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * device may not able to process all time sync commands + * host issue and only process last command it receive + */ + if (tsync_node->sequence == sequence) { + tsync_node->cb_func(tsync_node->mhi_dev, + sequence, + tsync_node->local_time, + remote_time); + kfree(tsync_node); + } else { + kfree(tsync_node); + } + } while (true); + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + return count; } @@ -881,7 +1148,7 @@ void mhi_ev_task(unsigned long data) /* process all pending events */ spin_lock_bh(&mhi_event->lock); - mhi_process_event_ring(mhi_cntrl, mhi_event, U32_MAX); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); spin_unlock_bh(&mhi_event->lock); } @@ -896,7 +1163,7 @@ void mhi_ctrl_ev_task(unsigned long data) MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); /* process ctrl events events */ - ret = mhi_process_event_ring(mhi_cntrl, mhi_event, U32_MAX); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); /* * we received a MSI but no events to process maybe device went to @@ -982,23 +1249,22 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) return IRQ_WAKE_THREAD; } -static int mhi_send_cmd(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, - enum MHI_CMD cmd) +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) { struct mhi_tre *cmd_tre = NULL; struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; struct mhi_ring *ring = &mhi_cmd->ring; - int chan = mhi_chan->chan; + int chan = 0; MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), TO_MHI_STATE_STR(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); - /* MHI host currently handles RESET and START cmd */ - if (cmd != MHI_CMD_START_CHAN && cmd != MHI_CMD_RESET_CHAN) - return -EINVAL; + if (mhi_chan) + chan = mhi_chan->chan; spin_lock_bh(&mhi_cmd->lock); if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { @@ -1008,16 +1274,26 @@ static int mhi_send_cmd(struct mhi_controller *mhi_cntrl, /* prepare the cmd tre */ cmd_tre = ring->wp; - if (cmd == MHI_CMD_START_CHAN) { - cmd_tre->ptr = MHI_TRE_CMD_START_PTR; - cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; - cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); - } else { + switch (cmd) { + case MHI_CMD_RESET_CHAN: cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_TIMSYNC_CFG: + cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1 + (mhi_cntrl->mhi_tsync->er_index); + break; } + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, cmd_tre->dword[0], cmd_tre->dword[1]); @@ -1095,11 +1371,12 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; while (nr_el--) { void *buf; - buf = kmalloc(MHI_MAX_MTU, GFP_KERNEL); + buf = kmalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto error_pre_alloc; @@ -1107,7 +1384,7 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, /* prepare transfer descriptors */ ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, - MHI_MAX_MTU, MHI_EOT); + len, MHI_EOT); if (ret) { MHI_ERR("Chan:%d error prepare buffer\n", mhi_chan->chan); @@ -1222,8 +1499,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) if (mhi_chan->dir == DMA_TO_DEVICE) mhi_cntrl->wake_put(mhi_cntrl, false); - dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, - buf_info->len, buf_info->dir); + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); mhi_del_ring_element(mhi_cntrl, buf_ring); mhi_del_ring_element(mhi_cntrl, tre_ring); @@ -1471,9 +1747,103 @@ int mhi_poll(struct mhi_device *mhi_dev, int ret; spin_lock_bh(&mhi_event->lock); - ret = mhi_process_event_ring(mhi_cntrl, mhi_event, budget); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); spin_unlock_bh(&mhi_event->lock); return ret; } EXPORT_SYMBOL(mhi_poll); + + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + struct tsync_node *tsync_node; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* tsync db can only be rung in M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + /* disable link level low power modes */ + mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + spin_lock_irq(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + tsync_node->local_time = + mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db); + /* write must go thru immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + ret = 0; + +error_invalid_state: + if (ret) + kfree(tsync_node); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_no_mem: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index c476a204f28f8db2ce907eec00ebf6e4b27db9c3..5ed4e2a0856edd263aadb3c3ac1af2b4468e5a26 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -181,7 +181,7 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) if (unlikely(force)) { spin_lock_irqsave(&mhi_cntrl->wlock, flags); atomic_inc(&mhi_cntrl->dev_wake); - if (MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && !mhi_cntrl->wake_set) { mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); mhi_cntrl->wake_set = true; @@ -194,7 +194,7 @@ void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) spin_lock_irqsave(&mhi_cntrl->wlock, flags); if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && - MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && !mhi_cntrl->wake_set) { mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); mhi_cntrl->wake_set = true; @@ -216,7 +216,7 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) spin_lock_irqsave(&mhi_cntrl->wlock, flags); if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && - MHI_WAKE_DB_ACCESS_VALID(mhi_cntrl->pm_state) && !override && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && mhi_cntrl->wake_set) { mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); mhi_cntrl->wake_set = false; @@ -329,7 +329,7 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) } mhi_cntrl->M0++; read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_get(mhi_cntrl, false); /* ring all event rings and CMD ring only if we're in AMSS */ if (mhi_cntrl->ee == MHI_EE_AMSS) { @@ -523,8 +523,13 @@ static int mhi_pm_amss_transition(struct mhi_controller *mhi_cntrl) spin_unlock_irq(&mhi_event->lock); } + read_unlock_bh(&mhi_cntrl->pm_lock); + /* setup support for time sync */ + if (mhi_cntrl->time_sync) + mhi_init_timesync(mhi_cntrl); + MHI_LOG("Adding new devices\n"); /* add supported devices */ @@ -901,6 +906,8 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) mhi_deinit_dev_ctxt(mhi_cntrl); } + if (mhi_cntrl->mhi_tsync) + mhi_cntrl->mhi_tsync->db = NULL; } EXPORT_SYMBOL(mhi_power_down); @@ -1064,12 +1071,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl) return 0; } -static int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) { int ret; read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_get(mhi_cntrl, true); if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); @@ -1100,7 +1107,7 @@ void mhi_device_get(struct mhi_device *mhi_dev) atomic_inc(&mhi_dev->dev_wake); read_lock_bh(&mhi_cntrl->pm_lock); - mhi_cntrl->wake_get(mhi_cntrl, false); + mhi_cntrl->wake_get(mhi_cntrl, true); read_unlock_bh(&mhi_cntrl->pm_lock); } EXPORT_SYMBOL(mhi_device_get); diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c index af5c5db4fc05e8913c3dcb5bf5386c89d9765fb5..8b4ba70800367d517a2e003281c3a622c6646a75 100644 --- a/drivers/bus/mhi/devices/mhi_netdev.c +++ b/drivers/bus/mhi/devices/mhi_netdev.c @@ -320,8 +320,6 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget) return 0; } - mhi_device_get(mhi_dev); - rx_work = mhi_poll(mhi_dev, budget); if (rx_work < 0) { MSG_ERR("Error polling ret:%d\n", rx_work); @@ -346,7 +344,6 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget) mhi_netdev->stats.rx_budget_overflow++; exit_poll: - mhi_device_put(mhi_dev); read_unlock_bh(&mhi_netdev->pm_lock); MSG_VERB("polled %d pkts\n", rx_work); diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c index c8a3fdbfa9178c58d2afefa49b2a9de437bd4301..dd7581e36d03803eaf85c43a920b4f0bafd48309 100644 --- a/drivers/bus/mhi/devices/mhi_uci.c +++ b/drivers/bus/mhi/devices/mhi_uci.c @@ -251,7 +251,7 @@ static ssize_t mhi_uci_write(struct file *file, struct mhi_device *mhi_dev = uci_dev->mhi_dev; struct uci_chan *uci_chan = &uci_dev->ul_chan; size_t bytes_xfered = 0; - int ret; + int ret, nr_avail; if (!buf || !count) return -EINVAL; @@ -275,8 +275,8 @@ static ssize_t mhi_uci_write(struct file *file, /* wait for free descriptors */ ret = wait_event_interruptible(uci_chan->wq, (!uci_dev->enabled) || - mhi_get_no_free_descriptors - (mhi_dev, DMA_TO_DEVICE) > 0); + (nr_avail = mhi_get_no_free_descriptors(mhi_dev, + DMA_TO_DEVICE)) > 0); if (ret == -ERESTARTSYS) { MSG_LOG("Exit signal caught for node\n"); @@ -297,7 +297,13 @@ static ssize_t mhi_uci_write(struct file *file, } spin_lock_bh(&uci_chan->lock); - flags = (count - xfer_size) ? MHI_EOB : MHI_EOT; + + /* if ring is full after this force EOT */ + if (nr_avail > 1 && (count - xfer_size)) + flags = MHI_CHAIN; + else + flags = MHI_EOT; + if (uci_dev->enabled) ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf, xfer_size, flags); @@ -586,7 +592,7 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev, INIT_LIST_HEAD(&uci_chan->pending); }; - uci_dev->mtu = id->driver_data; + uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); mhi_device_set_devdata(mhi_dev, uci_dev); uci_dev->enabled = true; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 5f7d86509f2f523fc48c7c5fe3dc616cdfafadec..bfc566d3f31a40cf5b89d9284b2538644af68dee 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, cd_dbg(CD_OPEN, "entering cdrom_open\n"); - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 6495b03f576ca1a5867b000e210cf2d803fa18d4..ae3a7537cf0fbce1f85d76446f3f2ca2087b151a 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = { static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) { int ret; + + check_disk_change(bdev); + mutex_lock(&gdrom_mutex); ret = cdrom_open(gd.cd_info, bdev, mode); mutex_unlock(&gdrom_mutex); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index d5b76c3cd9bf8eb5702972b4e4551fb3746863d8..39a0bdd4404a288305671c42f47a20464263e949 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -54,6 +54,8 @@ #define TZ_PIL_AUTH_QDSP6_PROC 1 #define ADSP_MMAP_HEAP_ADDR 4 #define ADSP_MMAP_REMOTE_HEAP_ADDR 8 +#define FASTRPC_DMAHANDLE_NOMAP (16) + #define FASTRPC_ENOSUCH 39 #define VMID_SSC_Q6 5 #define VMID_ADSP_Q6 6 @@ -336,6 +338,7 @@ struct fastrpc_file { struct pm_qos_request pm_qos_req; int qos_request; struct mutex map_mutex; + struct mutex internal_map_mutex; }; static struct fastrpc_apps gfa; @@ -471,15 +474,11 @@ static void fastrpc_mmap_add(struct fastrpc_mmap *map) map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { struct fastrpc_apps *me = &gfa; - spin_lock(&me->hlock); hlist_add_head(&map->hn, &me->maps); - spin_unlock(&me->hlock); } else { struct fastrpc_file *fl = map->fl; - spin_lock(&fl->hlock); hlist_add_head(&map->hn, &fl->maps); - spin_unlock(&fl->hlock); } } @@ -495,7 +494,6 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, return -EOVERFLOW; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { if (va >= map->va && va + len <= map->va + map->len && @@ -506,9 +504,7 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, break; } } - spin_unlock(&me->hlock); } else { - spin_lock(&fl->hlock); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (va >= map->va && va + len <= map->va + map->len && @@ -519,7 +515,6 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, break; } } - spin_unlock(&fl->hlock); } if (match) { *ppmap = match; @@ -552,7 +547,6 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, struct hlist_node *n; struct fastrpc_apps *me = &gfa; - spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { if (map->raddr == va && map->raddr + map->len == va + len && @@ -562,12 +556,10 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, break; } } - spin_unlock(&me->hlock); if (match) { *ppmap = match; return 0; } - spin_lock(&fl->hlock); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { if (map->raddr == va && map->raddr + map->len == va + len && @@ -577,7 +569,6 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, break; } } - spin_unlock(&fl->hlock); if (match) { *ppmap = match; return 0; @@ -597,19 +588,15 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) fl = map->fl; if (map->flags == ADSP_MMAP_HEAP_ADDR || map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { - spin_lock(&me->hlock); map->refs--; if (!map->refs) hlist_del_init(&map->hn); - spin_unlock(&me->hlock); if (map->refs > 0) return; } else { - spin_lock(&fl->hlock); map->refs--; if (!map->refs) hlist_del_init(&map->hn); - spin_unlock(&fl->hlock); if (map->refs > 0 && !flags) return; } @@ -624,6 +611,14 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags) dma_free_coherent(me->dev, map->size, (void *)map->va, (dma_addr_t)map->phys); } + } else if (map->flags == FASTRPC_DMAHANDLE_NOMAP) { + if (!IS_ERR_OR_NULL(map->table)) + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + if (!IS_ERR_OR_NULL(map->attach)) + dma_buf_detach(map->buf, map->attach); + if (!IS_ERR_OR_NULL(map->buf)) + dma_buf_put(map->buf); } else { int destVM[1] = {VMID_HLOS}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; @@ -694,6 +689,33 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, map->phys = (uintptr_t)region_phys; map->size = len; map->va = (uintptr_t)region_vaddr; + } else if (mflags == FASTRPC_DMAHANDLE_NOMAP) { + VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd))); + if (err) + goto bail; + VERIFY(err, !dma_buf_get_flags(map->buf, &flags)); + if (err) + goto bail; + map->secure = flags & ION_FLAG_SECURE; + map->uncached = 1; + map->va = 0; + map->phys = 0; + + VERIFY(err, !IS_ERR_OR_NULL(map->attach = + dma_buf_attach(map->buf, me->dev))); + if (err) + goto bail; + + map->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; + VERIFY(err, !IS_ERR_OR_NULL(map->table = + dma_buf_map_attachment(map->attach, + DMA_BIDIRECTIONAL))); + if (err) + goto bail; + VERIFY(err, map->table->nents == 1); + if (err) + goto bail; + map->phys = sg_dma_address(map->table->sgl); } else { if (map->attr && (map->attr & FASTRPC_ATTR_KEEP_MAP)) { pr_info("adsprpc: buffer mapped with persist attr %x\n", @@ -1071,8 +1093,10 @@ static void context_free(struct smq_invoke_ctx *ctx) spin_lock(&ctx->fl->hlock); hlist_del_init(&ctx->hn); spin_unlock(&ctx->fl->hlock); + mutex_lock(&ctx->fl->map_mutex); for (i = 0; i < nbufs; ++i) fastrpc_mmap_free(ctx->maps[i], 0); + mutex_unlock(&ctx->fl->map_mutex); fastrpc_buf_free(ctx->buf, 1); ctx->magic = 0; ctx->ctxid = 0; @@ -1213,21 +1237,32 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) uintptr_t buf = (uintptr_t)lpra[i].buf.pv; size_t len = lpra[i].buf.len; + mutex_lock(&ctx->fl->map_mutex); if (ctx->fds[i] && (ctx->fds[i] != -1)) fastrpc_mmap_create(ctx->fl, ctx->fds[i], ctx->attrs[i], buf, len, mflags, &ctx->maps[i]); + mutex_unlock(&ctx->fl->map_mutex); ipage += 1; } PERF_END); handles = REMOTE_SCALARS_INHANDLES(sc) + REMOTE_SCALARS_OUTHANDLES(sc); + mutex_lock(&ctx->fl->map_mutex); for (i = bufs; i < bufs + handles; i++) { + int dmaflags = 0; + + if (ctx->attrs && (ctx->attrs[i] & FASTRPC_ATTR_NOMAP)) + dmaflags = FASTRPC_DMAHANDLE_NOMAP; VERIFY(err, !fastrpc_mmap_create(ctx->fl, ctx->fds[i], - FASTRPC_ATTR_NOVA, 0, 0, 0, &ctx->maps[i])); - if (err) + FASTRPC_ATTR_NOVA, 0, 0, dmaflags, + &ctx->maps[i])); + if (err) { + mutex_unlock(&ctx->fl->map_mutex); goto bail; + } ipage += 1; } + mutex_unlock(&ctx->fl->map_mutex); metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) + (sizeof(uint32_t) * M_CRCLIST); @@ -1436,10 +1471,13 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, if (err) goto bail; } else { + mutex_lock(&ctx->fl->map_mutex); fastrpc_mmap_free(ctx->maps[i], 0); + mutex_unlock(&ctx->fl->map_mutex); ctx->maps[i] = NULL; } } + mutex_lock(&ctx->fl->map_mutex); if (inbufs + outbufs + handles) { for (i = 0; i < M_FDLIST; i++) { if (!fdlist[i]) @@ -1449,6 +1487,7 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, fastrpc_mmap_free(mmap, 0); } } + mutex_unlock(&ctx->fl->map_mutex); if (ctx->crc && crclist && rpra) K_COPY_TO_USER(err, kernel, ctx->crc, crclist, M_CRCLIST*sizeof(uint32_t)); @@ -1753,8 +1792,10 @@ static int fastrpc_init_process(struct fastrpc_file *fl, if (err) goto bail; if (init->filelen) { + mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); + mutex_unlock(&fl->map_mutex); if (err) goto bail; } @@ -1763,8 +1804,10 @@ static int fastrpc_init_process(struct fastrpc_file *fl, init->memlen)); if (err) goto bail; + mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0, init->mem, init->memlen, mflags, &mem)); + mutex_unlock(&fl->map_mutex); if (err) goto bail; inbuf.pageslen = 1; @@ -1836,9 +1879,11 @@ static int fastrpc_init_process(struct fastrpc_file *fl, inbuf.pageslen = 0; if (!me->staticpd_flags) { inbuf.pageslen = 1; + mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem, init->memlen, ADSP_MMAP_REMOTE_HEAP_ADDR, &mem)); + mutex_unlock(&fl->map_mutex); if (err) goto bail; phys = mem->phys; @@ -1899,10 +1944,15 @@ static int fastrpc_init_process(struct fastrpc_file *fl, me->channel[fl->cid].rhvm.vmid, me->channel[fl->cid].rhvm.vmcount, hlosvm, hlosvmperm, 1); + mutex_lock(&fl->map_mutex); fastrpc_mmap_free(mem, 0); + mutex_unlock(&fl->map_mutex); } - if (file) + if (file) { + mutex_lock(&fl->map_mutex); fastrpc_mmap_free(file, 0); + mutex_unlock(&fl->map_mutex); + } return err; } @@ -2157,18 +2207,25 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl, int err = 0; struct fastrpc_mmap *map = NULL; + mutex_lock(&fl->internal_map_mutex); mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map)); + mutex_unlock(&fl->map_mutex); if (err) goto bail; VERIFY(err, !fastrpc_munmap_on_dsp(fl, map)); if (err) goto bail; + mutex_lock(&fl->map_mutex); fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); bail: - if (err && map) + if (err && map) { + mutex_lock(&fl->map_mutex); fastrpc_mmap_add(map); - mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->map_mutex); + } + mutex_unlock(&fl->internal_map_mutex); return err; } @@ -2181,16 +2238,18 @@ static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl, VERIFY(err, (fl && ud)); if (err) goto bail; - + mutex_lock(&fl->map_mutex); if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) { pr_err("adsprpc: mapping not found to unmap fd 0x%x, va 0x%llx, len 0x%x\n", ud->fd, (unsigned long long)ud->va, (unsigned int)ud->len); err = -1; + mutex_unlock(&fl->map_mutex); goto bail; } if (map) fastrpc_mmap_free(map, 0); + mutex_unlock(&fl->map_mutex); bail: return err; } @@ -2203,15 +2262,18 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl, struct fastrpc_mmap *map = NULL; int err = 0; + mutex_lock(&fl->internal_map_mutex); mutex_lock(&fl->map_mutex); if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin, - ud->size, ud->flags, 1, &map)){ + ud->size, ud->flags, 1, &map)) { mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->internal_map_mutex); return 0; } VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0, (uintptr_t)ud->vaddrin, ud->size, ud->flags, &map)); + mutex_unlock(&fl->map_mutex); if (err) goto bail; VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map)); @@ -2219,9 +2281,12 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl, goto bail; ud->vaddrout = map->raddr; bail: - if (err && map) + if (err && map) { + mutex_lock(&fl->map_mutex); fastrpc_mmap_free(map, 0); - mutex_unlock(&fl->map_mutex); + mutex_unlock(&fl->map_mutex); + } + mutex_unlock(&fl->internal_map_mutex); return err; } @@ -2409,6 +2474,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) spin_unlock(&fl->hlock); fastrpc_context_list_dtor(fl); fastrpc_buf_list_free(fl); + mutex_lock(&fl->map_mutex); do { lmap = NULL; hlist_for_each_entry_safe(map, n, &fl->maps, hn) { @@ -2418,6 +2484,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl) } fastrpc_mmap_free(lmap, 1); } while (lmap); + mutex_unlock(&fl->map_mutex); if (fl->sctx) fastrpc_session_free(&fl->apps->channel[cid], fl->sctx); @@ -2438,6 +2505,8 @@ static int fastrpc_file_free(struct fastrpc_file *fl) } while (fperf); mutex_unlock(&fl->perf_mutex); mutex_destroy(&fl->perf_mutex); + mutex_destroy(&fl->map_mutex); + mutex_destroy(&fl->internal_map_mutex); kfree(fl); return 0; } @@ -2451,7 +2520,6 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) pm_qos_remove_request(&fl->pm_qos_req); if (fl->debugfs_file != NULL) debugfs_remove(fl->debugfs_file); - mutex_destroy(&fl->map_mutex); fastrpc_file_free(fl); file->private_data = NULL; } @@ -2607,8 +2675,10 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) if (me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { + mutex_lock(&fl->map_mutex); if (fastrpc_mmap_remove_ssr(fl)) pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n"); + mutex_unlock(&fl->map_mutex); me->channel[cid].prevssrcount = me->channel[cid].ssrcount; } @@ -2647,6 +2717,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) memset(&fl->perf, 0, sizeof(fl->perf)); fl->qos_request = 0; filp->private_data = fl; + mutex_init(&fl->internal_map_mutex); mutex_init(&fl->map_mutex); spin_lock(&me->hlock); hlist_add_head(&fl->hn, &me->drivers); @@ -3145,7 +3216,8 @@ static int fastrpc_probe(struct platform_device *pdev) break; } } - if (range.addr) { + if (range.addr && !of_property_read_bool(dev->of_node, + "restrict-access")) { int srcVM[1] = {VMID_HLOS}; int destVM[4] = {VMID_HLOS, VMID_MSS_MSA, VMID_SSC_Q6, VMID_ADSP_Q6}; diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h index b9016e8699aa422817bb221e180c975332276dec..207e1ab08e21d1327c3a3dc094d63d4dd1e6976c 100644 --- a/drivers/char/adsprpc_shared.h +++ b/drivers/char/adsprpc_shared.h @@ -47,6 +47,9 @@ /* Fastrpc attribute for keeping the map persistent */ #define FASTRPC_ATTR_KEEP_MAP 0x8 +/* Fastrpc attribute for no mapping of fd */ +#define FASTRPC_ATTR_NOMAP (16) + /* Driver should operate in parallel with the co-processor */ #define FASTRPC_MODE_PARALLEL 0 diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile index 954b320342870994146314ed949e2e79c9bdc055..0b017c5aca978cb9fa0d475ebebe9f4c93c279b0 100644 --- a/drivers/char/diag/Makefile +++ b/drivers/char/diag/Makefile @@ -1,6 +1,6 @@ -obj-$(CONFIG_DIAG_CHAR) += diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_peripheral.o diagfwd_socket.o diagfwd_rpmsg.o -obj-$(CONFIG_DIAG_CHAR) += diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o +obj-$(CONFIG_DIAG_CHAR) := diagchar.o obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o obj-$(CONFIG_MHI_BUS) += diagfwd_mhi.o +diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_peripheral.o diagfwd_socket.o diagfwd_rpmsg.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index fb9cff2c8ee9eb278a3dd585e1d04737272b711b..df93add2f27242cef14c7250fb552351f163e3c7 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -37,6 +37,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = { .ctx = 0, .mempool = POOL_TYPE_MUX_APPS, .num_tbl_entries = 0, + .md_info_inited = 0, .tbl = NULL, .ops = NULL, }, @@ -46,6 +47,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = { .ctx = 0, .mempool = POOL_TYPE_MDM_MUX, .num_tbl_entries = 0, + .md_info_inited = 0, .tbl = NULL, .ops = NULL, }, @@ -54,6 +56,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = { .ctx = 0, .mempool = POOL_TYPE_MDM2_MUX, .num_tbl_entries = 0, + .md_info_inited = 0, .tbl = NULL, .ops = NULL, }, @@ -62,6 +65,7 @@ struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = { .ctx = 0, .mempool = POOL_TYPE_QSC_MUX, .num_tbl_entries = 0, + .md_info_inited = 0, .tbl = NULL, .ops = NULL, } @@ -85,6 +89,8 @@ void diag_md_open_all(void) for (i = 0; i < NUM_DIAG_MD_DEV; i++) { ch = &diag_md[i]; + if (!ch->md_info_inited) + continue; if (ch->ops && ch->ops->open) ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE); } @@ -99,6 +105,8 @@ void diag_md_close_all(void) for (i = 0; i < NUM_DIAG_MD_DEV; i++) { ch = &diag_md[i]; + if (!ch->md_info_inited) + continue; if (ch->ops && ch->ops->close) ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE); @@ -155,7 +163,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) mutex_unlock(&driver->md_session_lock); ch = &diag_md[id]; - if (!ch) + if (!ch || !ch->md_info_inited) return -EINVAL; spin_lock_irqsave(&ch->lock, flags); @@ -232,6 +240,8 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) { ch = &diag_md[i]; + if (!ch->md_info_inited) + continue; for (j = 0; j < ch->num_tbl_entries && !err; j++) { entry = &ch->tbl[j]; if (entry->len <= 0 || entry->buf == NULL) @@ -352,6 +362,8 @@ int diag_md_close_peripheral(int id, uint8_t peripheral) return -EINVAL; ch = &diag_md[id]; + if (!ch || !ch->md_info_inited) + return -EINVAL; spin_lock_irqsave(&ch->lock, flags); for (i = 0; i < ch->num_tbl_entries && !found; i++) { @@ -398,6 +410,7 @@ int diag_md_init(void) ch->tbl[j].ctx = 0; } spin_lock_init(&(ch->lock)); + ch->md_info_inited = 1; } return 0; @@ -426,6 +439,7 @@ int diag_md_mdm_init(void) ch->tbl[j].ctx = 0; } spin_lock_init(&(ch->lock)); + ch->md_info_inited = 1; } return 0; diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h index 4f05ece1334e1f00e34d529a5abbe3bfe144af2c..516209c114c1381e27c643bb69636c4f2db9d581 100644 --- a/drivers/char/diag/diag_memorydevice.h +++ b/drivers/char/diag/diag_memorydevice.h @@ -38,6 +38,7 @@ struct diag_md_info { int ctx; int mempool; int num_tbl_entries; + int md_info_inited; spinlock_t lock; struct diag_buf_tbl_t *tbl; struct diag_mux_ops *ops; diff --git a/drivers/char/fastcvpd.c b/drivers/char/fastcvpd.c index ef043bc852734cae42d0136649a505e2b5ed77d7..279fc71b53a7b06683a79e0aee96251858a63af5 100644 --- a/drivers/char/fastcvpd.c +++ b/drivers/char/fastcvpd.c @@ -14,8 +14,12 @@ #include #include #include +#include #include "linux/fastcvpd.h" +#define VMID_CDSP_Q6 (30) +#define SRC_VM_NUM 1 +#define DEST_VM_NUM 2 #define FASTCVPD_VIDEO_SEND_HFI_CMD_QUEUE 0 #define FASTCVPD_VIDEO_SUSPEND 1 #define FASTCVPD_VIDEO_RESUME 2 @@ -28,14 +32,25 @@ struct fastcvpd_cmd_msg { uint32_t msg_ptr_len; }; +struct fastcvpd_cmd_msg_rsp { + int ret_val; +}; + struct fastcvpd_apps { struct rpmsg_device *chan; struct mutex smd_mutex; int rpmsg_register; + spinlock_t hlock; }; +static struct completion work; + static struct fastcvpd_apps gfa_cv; +static struct fastcvpd_cmd_msg cmd_msg; + +static struct fastcvpd_cmd_msg_rsp cmd_msg_rsp; + static int fastcvpd_send_cmd(void *msg, uint32_t len) { struct fastcvpd_apps *me = &gfa_cv; @@ -82,6 +97,14 @@ static void fastcvpd_rpmsg_remove(struct rpmsg_device *rpdev) static int fastcvpd_rpmsg_callback(struct rpmsg_device *rpdev, void *data, int len, void *priv, u32 addr) { + int *rpmsg_resp = (int *)data; + struct fastcvpd_apps *me = &gfa_cv; + + spin_lock(&me->hlock); + cmd_msg_rsp.ret_val = *rpmsg_resp; + spin_unlock(&me->hlock); + complete(&work); + return 0; } @@ -89,17 +112,39 @@ int fastcvpd_video_send_cmd_hfi_queue(phys_addr_t *phys_addr, uint32_t size_in_bytes) { int err; - struct fastcvpd_cmd_msg cmd_msg; + struct fastcvpd_cmd_msg local_cmd_msg; + struct fastcvpd_apps *me = &gfa_cv; + int srcVM[SRC_VM_NUM] = {VMID_HLOS}; + int destVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6}; + int destVMperm[DEST_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC, + PERM_READ | PERM_WRITE | PERM_EXEC }; - cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SEND_HFI_CMD_QUEUE; + local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SEND_HFI_CMD_QUEUE; + local_cmd_msg.msg_ptr = (uint64_t)phys_addr; + local_cmd_msg.msg_ptr_len = size_in_bytes; + mutex_lock(&me->smd_mutex); cmd_msg.msg_ptr = (uint64_t)phys_addr; - cmd_msg.msg_ptr_len = size_in_bytes; + cmd_msg.msg_ptr_len = (size_in_bytes); + mutex_unlock(&me->smd_mutex); + + pr_debug("%s :: address of buffer, PA=0x%pK size_buff=%d\n", + __func__, phys_addr, size_in_bytes); + + err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr, + local_cmd_msg.msg_ptr_len, srcVM, SRC_VM_NUM, destVM, + destVMperm, DEST_VM_NUM); + if (err) { + pr_err("%s: Failed in hyp_assign. err=%d\n", + __func__, err); + return err; + } err = fastcvpd_send_cmd - (&cmd_msg, sizeof(struct fastcvpd_cmd_msg)); + (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg)); if (err != 0) pr_err("%s: fastcvpd_send_cmd failed with err=%d\n", __func__, err); + return err; } EXPORT_SYMBOL(fastcvpd_video_send_cmd_hfi_queue); @@ -107,14 +152,15 @@ EXPORT_SYMBOL(fastcvpd_video_send_cmd_hfi_queue); int fastcvpd_video_suspend(uint32_t session_flag) { int err = 0; - struct fastcvpd_cmd_msg cmd_msg; + struct fastcvpd_cmd_msg local_cmd_msg; - cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SUSPEND; + local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SUSPEND; err = fastcvpd_send_cmd - (&cmd_msg, sizeof(struct fastcvpd_cmd_msg)); + (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg)); if (err != 0) pr_err("%s: fastcvpd_send_cmd failed with err=%d\n", __func__, err); + return err; } EXPORT_SYMBOL(fastcvpd_video_suspend); @@ -122,29 +168,56 @@ EXPORT_SYMBOL(fastcvpd_video_suspend); int fastcvpd_video_resume(uint32_t session_flag) { int err; - struct fastcvpd_cmd_msg cmd_msg; + struct fastcvpd_cmd_msg local_cmd_msg; - cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_RESUME; + local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_RESUME; err = fastcvpd_send_cmd - (&cmd_msg, sizeof(struct fastcvpd_cmd_msg)); + (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg)); if (err != 0) pr_err("%s: fastcvpd_send_cmd failed with err=%d\n", __func__, err); + return err; } EXPORT_SYMBOL(fastcvpd_video_resume); int fastcvpd_video_shutdown(uint32_t session_flag) { + struct fastcvpd_apps *me = &gfa_cv; int err; - struct fastcvpd_cmd_msg cmd_msg; + struct fastcvpd_cmd_msg local_cmd_msg; + int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6}; + int destVM[SRC_VM_NUM] = {VMID_HLOS}; + int destVMperm[SRC_VM_NUM] = { PERM_READ | PERM_WRITE | PERM_EXEC }; - cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SHUTDOWN; + local_cmd_msg.cmd_msg_type = FASTCVPD_VIDEO_SHUTDOWN; err = fastcvpd_send_cmd - (&cmd_msg, sizeof(struct fastcvpd_cmd_msg)); + (&local_cmd_msg, sizeof(struct fastcvpd_cmd_msg)); if (err != 0) pr_err("%s: fastcvpd_send_cmd failed with err=%d\n", __func__, err); + + wait_for_completion(&work); + + spin_lock(&me->hlock); + local_cmd_msg.msg_ptr = cmd_msg.msg_ptr; + local_cmd_msg.msg_ptr_len = cmd_msg.msg_ptr_len; + if (cmd_msg_rsp.ret_val == 0) { + err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr, + local_cmd_msg.msg_ptr_len, srcVM, DEST_VM_NUM, destVM, + destVMperm, SRC_VM_NUM); + if (err) { + pr_err("%s: Failed to hyp_assign. err=%d\n", + __func__, err); + spin_unlock(&me->hlock); + return err; + } + } else { + pr_err("%s: Skipping hyp_assign as CDSP sent invalid response=%d\n", + __func__, cmd_msg_rsp.ret_val); + } + spin_unlock(&me->hlock); + return err; } EXPORT_SYMBOL(fastcvpd_video_shutdown); @@ -169,7 +242,9 @@ static int __init fastcvpd_device_init(void) struct fastcvpd_apps *me = &gfa_cv; int err; + init_completion(&work); mutex_init(&me->smd_mutex); + spin_lock_init(&me->hlock); err = register_rpmsg_driver(&fastcvpd_rpmsg_client); if (err) { pr_err("%s : register_rpmsg_driver failed with err %d\n", @@ -187,6 +262,7 @@ static void __exit fastcvpd_device_exit(void) { struct fastcvpd_apps *me = &gfa_cv; + mutex_destroy(&me->smd_mutex); if (me->rpmsg_register == 1) unregister_rpmsg_driver(&fastcvpd_rpmsg_client); } diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c index 63d84e6f189180098a893d558b25f4e740677e63..83c695938a2d7a08d65a846c0d23b3a4cb5cbd13 100644 --- a/drivers/char/hw_random/stm32-rng.c +++ b/drivers/char/hw_random/stm32-rng.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #define RNG_CR 0x00 @@ -46,6 +47,7 @@ struct stm32_rng_private { struct hwrng rng; void __iomem *base; struct clk *clk; + struct reset_control *rst; }; static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) @@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + priv->rst = devm_reset_control_get(&ofdev->dev, NULL); + if (!IS_ERR(priv->rst)) { + reset_control_assert(priv->rst); + udelay(2); + reset_control_deassert(priv->rst); + } + dev_set_drvdata(dev, priv); priv->rng.name = dev_driver_string(dev), diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0aea3bcb615848f65d9eef06806cbacf2ae92ed3..6f2eaba1cd6a60f17597d6c7426287ccc228f811 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -763,7 +763,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_info->ssif_state = SSIF_NORMAL; ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Error getting flags: %d %d, %x\n", - result, len, data[2]); + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* @@ -785,7 +785,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, if ((result < 0) || (len < 3) || (data[2] != 0)) { /* Error clearing flags */ pr_warn(PFX "Error clearing flags: %d %d, %x\n", - result, len, data[2]); + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { pr_warn(PFX "Invalid response clearing flags: %x %x\n", diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b2aa0832a34f556184591456a5f636201ae93aad..23fdf3294e0d946060f1044fd080547f670e0356 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2278,6 +2278,9 @@ static int clk_core_get_phase(struct clk_core *core) int ret; clk_prepare_lock(); + /* Always try to update cached phase if possible */ + if (core->ops->get_phase) + core->phase = core->ops->get_phase(core->hw); ret = core->phase; clk_prepare_unlock(); diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c index 2007123832bb3ffce59e2740f572026a304eb8ef..53450b651e4c709b64324dcd7ee0273124cb0e9f 100644 --- a/drivers/clk/hisilicon/crg-hi3516cv300.c +++ b/drivers/clk/hisilicon/crg-hi3516cv300.c @@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = { /* hi3516CV300 sysctrl CRG */ #define HI3516CV300_SYSCTRL_NR_CLKS 16 -static const char *wdt_mux_p[] __initconst = { "3m", "apb" }; +static const char *const wdt_mux_p[] __initconst = { "3m", "apb" }; static u32 wdt_mux_table[] = {0, 1}; static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = { diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 9d5fbacb03e33642c4ff2e20a1cfc45d84c2a3b9..646565f68e9ac935a01c3b067b6543b89d961149 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -332,3 +332,12 @@ config MDM_DEBUGCC_QCS405 Support for the debug clock controller on Qualcomm Technologies, Inc QCS405 devices. Say Y if you want to support the clock measurement functionality. + +config CLOCK_CPU_QCS405 + bool "CPU QCS405 Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the cpu clock controller on QCS405 + based devices. + Say Y if you want to support CPU clock scaling using + CPUfreq drivers for dynamic power management. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index b0026c82358e4b3d8bd05551b8af2e2434cb60d8..b7efc3c1e3b0fb519fda0eaefc8c7f9a96f603da 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -20,6 +20,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o +obj-$(CONFIG_CLOCK_CPU_QCS405) += clk-cpu-qcs405.o obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o diff --git a/drivers/clk/qcom/camcc-sm8150.c b/drivers/clk/qcom/camcc-sm8150.c index 65a4ee63d4c4c8ea1baed70bed94f00f48382a92..919bf7cd1e6059ecc719f8e1194f307a9d1d902f 100644 --- a/drivers/clk/qcom/camcc-sm8150.c +++ b/drivers/clk/qcom/camcc-sm8150.c @@ -143,6 +143,17 @@ static const struct alpha_pll_config cam_cc_pll0_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config cam_cc_pll0_config_sm8150_v2 = { + .l = 0x3E, + .alpha = 0x8000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll cam_cc_pll0 = { .offset = 0x0, .vco_table = trion_vco, @@ -225,6 +236,17 @@ static const struct alpha_pll_config cam_cc_pll1_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config cam_cc_pll1_config_sm8150_v2 = { + .l = 0x1F, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll cam_cc_pll1 = { .offset = 0x1000, .vco_table = trion_vco, @@ -333,6 +355,17 @@ static const struct alpha_pll_config cam_cc_pll3_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config cam_cc_pll3_config_sm8150_v2 = { + .l = 0x29, + .alpha = 0xAAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll cam_cc_pll3 = { .offset = 0x3000, .vco_table = trion_vco, @@ -384,6 +417,17 @@ static const struct alpha_pll_config cam_cc_pll4_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config cam_cc_pll4_config_sm8150_v2 = { + .l = 0x29, + .alpha = 0xAAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll cam_cc_pll4 = { .offset = 0x4000, .vco_table = trion_vco, @@ -750,6 +794,16 @@ static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = { { } }; +static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src_sm8150_v2[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(558000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(847000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + F(950000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0), + { } +}; + static struct clk_rcg2 cam_cc_ife_0_clk_src = { .cmd_rcgr = 0xa010, .mnd_width = 0, @@ -815,6 +869,16 @@ static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = { { } }; +static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src_sm8150_v2[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(400000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(558000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(847000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + F(950000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0), + { } +}; + static struct clk_rcg2 cam_cc_ife_1_clk_src = { .cmd_rcgr = 0xb010, .mnd_width = 0, @@ -972,6 +1036,15 @@ static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = { { } }; +static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src_sm8150_v2[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(475000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0), + { } +}; + static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .cmd_rcgr = 0x8010, .mnd_width = 0, @@ -989,8 +1062,8 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_MIN] = 19200000, - [VDD_LOWER] = 375000000, - [VDD_LOW] = 475000000, + [VDD_LOWER] = 300000000, + [VDD_LOW] = 450000000, [VDD_LOW_L1] = 520000000, [VDD_NOMINAL] = 600000000}, }, @@ -2302,10 +2375,47 @@ static const struct qcom_cc_desc cam_cc_sm8150_desc = { static const struct of_device_id cam_cc_sm8150_match_table[] = { { .compatible = "qcom,camcc-sm8150" }, + { .compatible = "qcom,camcc-sm8150-v2" }, { } }; MODULE_DEVICE_TABLE(of, cam_cc_sm8150_match_table); +static void cam_cc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + clk_trion_pll_configure(&cam_cc_pll0, regmap, + &cam_cc_pll0_config_sm8150_v2); + clk_trion_pll_configure(&cam_cc_pll1, regmap, + &cam_cc_pll1_config_sm8150_v2); + clk_trion_pll_configure(&cam_cc_pll3, regmap, + &cam_cc_pll3_config_sm8150_v2); + clk_trion_pll_configure(&cam_cc_pll4, regmap, + &cam_cc_pll4_config_sm8150_v2); + cam_cc_ife_0_clk_src.freq_tbl = ftbl_cam_cc_ife_0_clk_src_sm8150_v2; + cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 847000000; + cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 950000000; + cam_cc_ife_1_clk_src.freq_tbl = ftbl_cam_cc_ife_1_clk_src_sm8150_v2; + cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 847000000; + cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 950000000; + cam_cc_ipe_0_clk_src.freq_tbl = ftbl_cam_cc_ipe_0_clk_src_sm8150_v2; + cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 475000000; +} + +static int cam_cc_sm8150_fixup(struct platform_device *pdev, + struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,camcc-sm8150-v2")) + cam_cc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + static int cam_cc_sm8150_probe(struct platform_device *pdev) { struct regmap *regmap; @@ -2342,6 +2452,10 @@ static int cam_cc_sm8150_probe(struct platform_device *pdev) return PTR_ERR(vdd_mm.regulator[0]); } + ret = cam_cc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + clk_trion_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config); clk_trion_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config); clk_regera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index b3c680049b99986b59266ba1e516cc99e1b9bf97..f23f88a3a116f2899feaf63ffcf4317b17e6e215 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -724,23 +724,15 @@ int clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, { int ret = 0; + if (pll->inited) + return ret; + if (trion_pll_is_enabled(pll, regmap)) { pr_warn("PLL is already enabled. Skipping configuration.\n"); pll->inited = true; return ret; } - /* - * Disable the PLL if it's already been initialized. Not doing so might - * lead to the PLL running with the old frequency configuration. - */ - if (pll->inited) { - ret = regmap_update_bits(regmap, pll->offset + PLL_MODE, - PLL_RESET_N, 0); - if (ret) - return ret; - } - if (config->l) regmap_write(regmap, pll->offset + PLL_L_VAL, config->l); @@ -1032,7 +1024,10 @@ int clk_regera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config) { u32 mode_regval; - int ret; + int ret = 0; + + if (pll->inited) + return ret; ret = regmap_read(regmap, pll->offset + PLL_MODE, &mode_regval); if (ret) @@ -1497,7 +1492,7 @@ static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); unsigned long freq_hz; - const struct pll_vco *curr_vco, *vco; + const struct pll_vco *curr_vco = NULL, *vco; u32 l, ctl; u64 a; int i = 0; @@ -1563,7 +1558,7 @@ static int clk_alpha_pll_calibrate(struct clk_hw *hw) unsigned long calibration_freq, freq_hz; struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); struct clk_hw *parent; - const struct pll_vco *vco; + const struct pll_vco *vco = NULL; u64 a; u32 l, ctl; int rc, i = 0; diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c index ed8acfb0367c54ebaedecaff378a18b28d0f475c..ad5836569be37fd7c9f1d00035e794d87691d3b6 100644 --- a/drivers/clk/qcom/clk-branch.c +++ b/drivers/clk/qcom/clk-branch.c @@ -83,8 +83,11 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling, const struct clk_hw *hw = &br->clkr.hw; const char *name = clk_hw_get_name(hw); - /* Skip checking halt bit if the clock is in hardware gated mode */ - if (clk_branch_in_hwcg_mode(br)) + /* + * Skip checking halt bit if we're explicitly ignoring the bit or the + * clock is in hardware gated mode + */ + if (br->halt_check == BRANCH_HALT_SKIP || clk_branch_in_hwcg_mode(br)) return 0; if (br->halt_check == BRANCH_HALT_DELAY || (!enabling && voted)) { diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h index b4f05da12a99219a2c5e6a8b5257e75b20b226e9..e2ad7e74371f43c569314ba077a34e8cb70240da 100644 --- a/drivers/clk/qcom/clk-branch.h +++ b/drivers/clk/qcom/clk-branch.h @@ -46,6 +46,7 @@ struct clk_branch { #define BRANCH_HALT_ENABLE 1 /* pol: 0 = halt */ #define BRANCH_HALT_ENABLE_VOTED (BRANCH_HALT_ENABLE | BRANCH_VOTED) #define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */ +#define BRANCH_HALT_SKIP 3 /* Don't check halt bit */ struct clk_regmap clkr; }; diff --git a/drivers/clk/qcom/clk-cpu-qcs405.c b/drivers/clk/qcom/clk-cpu-qcs405.c new file mode 100644 index 0000000000000000000000000000000000000000..ec920f62d68f4a58f3bf0bc4c6b32dad80c0a147 --- /dev/null +++ b/drivers/clk/qcom/clk-cpu-qcs405.c @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-alpha-pll.h" +#include "clk-pll.h" +#include "clk-debug.h" +#include "clk-rcg.h" +#include "clk-regmap-mux-div.h" +#include "common.h" +#include "vdd-level-405.h" + +#define to_clk_regmap_mux_div(_hw) \ + container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr) + +#define REG_OFFSET 0x4 +#define APCS_PLL 0x0b016000 +#define APCS_CMD 0x0b011050 +#define XO_RATE 19200000 + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGS_INIT(vdd_cpu, 1); +static unsigned int cpucc_clk_init_rate; + +enum apcs_mux_clk_parent { + P_BI_TCXO_AO, + P_GPLL0_AO_OUT_MAIN, + P_APCS_CPU_PLL, +}; + +static const struct parent_map apcs_mux_clk_parent_map[] = { + { P_BI_TCXO_AO, 0 }, + { P_GPLL0_AO_OUT_MAIN, 4 }, + { P_APCS_CPU_PLL, 5 }, +}; + +static const char *const apcs_mux_clk_parent_name[] = { + "cxo_a", + "gpll0_ao_out_main", + "apcs_cpu_pll", +}; + +static int cpucc_clk_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long prate, u8 index) +{ + struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw); + + return __mux_div_set_src_div(cpuclk, cpuclk->parent_map[index].cfg, + cpuclk->div); +} + +static int cpucc_clk_set_parent(struct clk_hw *hw, u8 index) +{ + /* + * Since cpucc_clk_set_rate_and_parent() is defined and set_parent() + * will never gets called from clk_change_rate() so return 0. + */ + return 0; +} + +static int cpucc_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw); + + /* + * Parent is same as the last rate. + * Here just configure new div. + */ + return __mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div); +} + +static int cpucc_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_hw *xo, *apc0_auxclk_hw, *apcs_cpu_pll_hw; + struct clk_rate_request parent_req = { }; + struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw); + unsigned long apc0_auxclk_rate, rate = req->rate; + unsigned long mask = BIT(cpuclk->hid_width) - 1; + int ret; + u32 div = 1; + + xo = clk_hw_get_parent_by_index(hw, P_BI_TCXO_AO); + if (rate == clk_hw_get_rate(xo)) { + req->best_parent_hw = xo; + req->best_parent_rate = rate; + cpuclk->div = div; + cpuclk->src = cpuclk->parent_map[P_BI_TCXO_AO].cfg; + return 0; + } + + apc0_auxclk_hw = clk_hw_get_parent_by_index(hw, P_GPLL0_AO_OUT_MAIN); + apcs_cpu_pll_hw = clk_hw_get_parent_by_index(hw, P_APCS_CPU_PLL); + + apc0_auxclk_rate = clk_hw_get_rate(apc0_auxclk_hw); + if (rate <= apc0_auxclk_rate) { + req->best_parent_hw = apc0_auxclk_hw; + req->best_parent_rate = apc0_auxclk_rate; + + div = DIV_ROUND_UP((2 * req->best_parent_rate), rate) - 1; + div = min_t(unsigned long, div, mask); + + req->rate = clk_rcg2_calc_rate(req->best_parent_rate, 0, + 0, 0, div); + cpuclk->src = cpuclk->parent_map[P_GPLL0_AO_OUT_MAIN].cfg; + } else { + parent_req.rate = rate; + parent_req.best_parent_hw = apcs_cpu_pll_hw; + + req->best_parent_hw = apcs_cpu_pll_hw; + ret = __clk_determine_rate(req->best_parent_hw, &parent_req); + if (ret) + return ret; + + req->best_parent_rate = parent_req.rate; + cpuclk->src = cpuclk->parent_map[P_APCS_CPU_PLL].cfg; + } + cpuclk->div = div; + + return 0; +} + +static void cpucc_clk_list_registers(struct seq_file *f, struct clk_hw *hw) +{ + struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw); + int i = 0, size = 0, val; + + static struct clk_register_data data[] = { + {"CMD_RCGR", 0x0}, + {"CFG_RCGR", 0x4}, + }; + + size = ARRAY_SIZE(data); + for (i = 0; i < size; i++) { + regmap_read(cpuclk->clkr.regmap, + cpuclk->reg_offset + data[i].offset, &val); + seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val); + } +} + +static unsigned long cpucc_clk_recalc_rate(struct clk_hw *hw, + unsigned long prate) +{ + struct clk_regmap_mux_div *cpuclk = to_clk_regmap_mux_div(hw); + struct clk_hw *parent; + const char *name = clk_hw_get_name(hw); + unsigned long parent_rate; + u32 i, div, src = 0; + u32 num_parents = clk_hw_get_num_parents(hw); + int ret = 0; + + ret = mux_div_get_src_div(cpuclk, &src, &div); + if (ret) + return ret; + + cpuclk->src = src; + cpuclk->div = div; + + for (i = 0; i < num_parents; i++) { + if (src == cpuclk->parent_map[i].cfg) { + parent = clk_hw_get_parent_by_index(hw, i); + parent_rate = clk_hw_get_rate(parent); + return clk_rcg2_calc_rate(parent_rate, 0, 0, 0, div); + } + } + pr_err("%s: Can't find parent %d\n", name, src); + + return ret; +} + +static int cpucc_clk_enable(struct clk_hw *hw) +{ + return clk_regmap_mux_div_ops.enable(hw); +} + +static void cpucc_clk_disable(struct clk_hw *hw) +{ + clk_regmap_mux_div_ops.disable(hw); +} + +static u8 cpucc_clk_get_parent(struct clk_hw *hw) +{ + return clk_regmap_mux_div_ops.get_parent(hw); +} + +/* + * We use the notifier function for switching to a temporary safe configuration + * (mux and divider), while the APSS pll is reconfigured. + */ +static int cpucc_notifier_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct clk_regmap_mux_div *cpuclk = container_of(nb, + struct clk_regmap_mux_div, clk_nb); + int ret = 0; + int safe_src = cpuclk->safe_src; + + if (event == PRE_RATE_CHANGE) + /* set the mux to safe source(sys_apc0_aux_clk) & div */ + ret = __mux_div_set_src_div(cpuclk, safe_src, 1); + + if (event == ABORT_RATE_CHANGE) + pr_err("Error in configuring PLL - stay at safe src only\n"); + + return notifier_from_errno(ret); +} + +static const struct clk_ops cpucc_clk_ops = { + .enable = cpucc_clk_enable, + .disable = cpucc_clk_disable, + .get_parent = cpucc_clk_get_parent, + .set_rate = cpucc_clk_set_rate, + .set_parent = cpucc_clk_set_parent, + .set_rate_and_parent = cpucc_clk_set_rate_and_parent, + .determine_rate = cpucc_clk_determine_rate, + .recalc_rate = cpucc_clk_recalc_rate, + .debug_init = clk_debug_measure_add, + .list_registers = cpucc_clk_list_registers, +}; + +/* Initial configuration for 960MHz */ +static const struct pll_config apcs_cpu_pll_config = { + .l = 0x32, + .m = 0, + .n = 1, + .vco_val = 0x0, + .vco_mask = 0x3 << 20, + .pre_div_val = 0x0, + .pre_div_mask = 0x7 << 12, + .post_div_val = 0x0, + .post_div_mask = 0x3 << 8, + .main_output_mask = BIT(3), + .aux_output_mask = BIT(0), +}; + +static struct clk_pll apcs_cpu_pll = { + .mode_reg = 0x0, + .l_reg = 0x4, + .m_reg = 0x8, + .n_reg = 0xc, + .config_reg = 0x10, + .status_reg = 0x1c, + .status_bit = 16, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apcs_cpu_pll", + .parent_names = (const char *[]){ "cxo_a" }, + .num_parents = 1, + .ops = &clk_pll_hf_ops, + .vdd_class = &vdd_cx, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOW] = 1000000000, + [VDD_NOMINAL] = 2000000000, + }, + .num_rate_max = VDD_NUM, + }, +}; + +static struct clk_regmap_mux_div apcs_mux_clk = { + .reg_offset = 0x0, + .hid_width = 5, + .hid_shift = 0, + .src_width = 3, + .src_shift = 8, + .safe_src = 4, + .safe_div = 1, + .parent_map = apcs_mux_clk_parent_map, + .clk_nb.notifier_call = cpucc_notifier_cb, + .clkr.hw.init = &(struct clk_init_data) { + .name = "apcs_mux_clk", + .parent_names = apcs_mux_clk_parent_name, + .num_parents = 3, + .vdd_class = &vdd_cpu, + .flags = CLK_SET_RATE_PARENT, + .ops = &cpucc_clk_ops, + }, +}; + +static const struct of_device_id match_table[] = { + { .compatible = "qcom,cpu-qcs405" }, + {} +}; + +static const struct regmap_config cpu_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x34, + .fast_io = true, +}; + +static struct clk_hw *cpu_clks_hws[] = { + [APCS_CPU_PLL] = &apcs_cpu_pll.clkr.hw, + [APCS_MUX_CLK] = &apcs_mux_clk.clkr.hw, +}; + +static void cpucc_clk_get_speed_bin(struct platform_device *pdev, int *bin, + int *version) +{ + struct resource *res; + u32 pte_efuse, valid; + void __iomem *base; + + *bin = 0; + *version = 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse"); + if (!res) { + dev_info(&pdev->dev, + "No speed/PVS binning available. Defaulting to 0!\n"); + return; + } + + base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!base) { + dev_info(&pdev->dev, + "Unable to read efuse data. Defaulting to 0!\n"); + return; + } + + pte_efuse = readl_relaxed(base); + devm_iounmap(&pdev->dev, base); + + *bin = pte_efuse & 0x7; + valid = ((pte_efuse >> 3) & 0x1) ? ((pte_efuse >> 3) & 0x1) : 0; + *version = (pte_efuse >> 4) & 0x3; + + dev_info(&pdev->dev, "PVS version: %d bin: %d\n", *version, *bin); +} + +static int cpucc_clk_get_fmax_vdd_class(struct platform_device *pdev, + struct clk_init_data *clk_intd, char *prop_name) +{ + struct device_node *of = pdev->dev.of_node; + struct clk_vdd_class *vdd = clk_intd->vdd_class; + int prop_len, i, j, ret; + int num = vdd->num_regulators + 1; + u32 *array; + + if (!of_find_property(of, prop_name, &prop_len)) { + dev_err(&pdev->dev, "missing %s\n", prop_name); + return -EINVAL; + } + + prop_len /= sizeof(u32); + if (prop_len % num) { + dev_err(&pdev->dev, "bad length %d\n", prop_len); + return -EINVAL; + } + + prop_len /= num; + vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int), + GFP_KERNEL); + if (!vdd->level_votes) + return -ENOMEM; + + vdd->vdd_uv = devm_kzalloc(&pdev->dev, + prop_len * sizeof(int) * (num - 1), GFP_KERNEL); + if (!vdd->vdd_uv) + return -ENOMEM; + + clk_intd->rate_max = devm_kzalloc(&pdev->dev, + prop_len * sizeof(unsigned long), GFP_KERNEL); + if (!clk_intd->rate_max) + return -ENOMEM; + + array = devm_kzalloc(&pdev->dev, + prop_len * sizeof(u32) * num, GFP_KERNEL); + if (!array) + return -ENOMEM; + + ret = of_property_read_u32_array(of, prop_name, array, prop_len * num); + if (ret) + return -ENOMEM; + + for (i = 0; i < prop_len; i++) { + clk_intd->rate_max[i] = array[num * i]; + for (j = 1; j < num; j++) { + vdd->vdd_uv[(num - 1) * i + (j - 1)] = + array[num * i + j]; + } + } + + devm_kfree(&pdev->dev, array); + vdd->num_levels = prop_len; + vdd->cur_level = prop_len; + clk_intd->num_rate_max = prop_len; + + return 0; +} + +/* + * Find the voltage level required for a given clock rate. + */ +static int find_vdd_level(struct clk_init_data *clk_intd, unsigned long rate) +{ + int level; + + for (level = 0; level < clk_intd->num_rate_max; level++) + if (rate <= clk_intd->rate_max[level]) + break; + + if (level == clk_intd->num_rate_max) { + pr_err("Rate %lu for %s is greater than highest Fmax\n", rate, + clk_intd->name); + return -EINVAL; + } + + return level; +} + +static int +cpucc_clk_add_opp(struct clk_hw *hw, struct device *dev, unsigned long max_rate) +{ + struct clk_init_data *clk_intd = (struct clk_init_data *)hw->init; + struct clk_vdd_class *vdd = clk_intd->vdd_class; + unsigned long rate = 0; + long ret; + int level, uv, j = 1; + + if (IS_ERR_OR_NULL(dev)) { + pr_err("%s: Invalid parameters\n", __func__); + return -EINVAL; + } + + while (1) { + rate = clk_intd->rate_max[j++]; + level = find_vdd_level(clk_intd, rate); + if (level <= 0) { + pr_warn("clock-cpu: no corner for %lu.\n", rate); + return -EINVAL; + } + + uv = vdd->vdd_uv[level]; + if (uv < 0) { + pr_warn("clock-cpu: no uv for %lu.\n", rate); + return -EINVAL; + } + + ret = dev_pm_opp_add(dev, rate, uv); + if (ret) { + pr_warn("clock-cpu: failed to add OPP for %lu\n", rate); + return rate; + } + + if (rate >= max_rate) + break; + } + + return 0; +} + +static void cpucc_clk_print_opp_table(int cpu) +{ + struct dev_pm_opp *oppfmax, *oppfmin; + unsigned long apc_fmax, apc_fmin; + u32 max_cpuss_index = apcs_mux_clk.clkr.hw.init->num_rate_max; + + apc_fmax = apcs_mux_clk.clkr.hw.init->rate_max[max_cpuss_index - 1]; + apc_fmin = apcs_mux_clk.clkr.hw.init->rate_max[1]; + + rcu_read_lock(); + + oppfmax = dev_pm_opp_find_freq_exact(get_cpu_device(cpu), + apc_fmax, true); + oppfmin = dev_pm_opp_find_freq_exact(get_cpu_device(cpu), + apc_fmin, true); + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, apc_fmin, + dev_pm_opp_get_voltage(oppfmin)); + pr_info("Clock_cpu:(cpu %d) OPP voltage for %lu: %ld\n", cpu, apc_fmax, + dev_pm_opp_get_voltage(oppfmax)); + + rcu_read_unlock(); +} + +static void cpucc_clk_populate_opp_table(struct platform_device *pdev) +{ + unsigned long apc_fmax; + int cpu, qcs405_cpu = 0; + u32 max_cpuss_index = apcs_mux_clk.clkr.hw.init->num_rate_max; + + apc_fmax = apcs_mux_clk.clkr.hw.init->rate_max[max_cpuss_index - 1]; + + for_each_possible_cpu(cpu) { + qcs405_cpu = cpu; + WARN(cpucc_clk_add_opp(&apcs_mux_clk.clkr.hw, + get_cpu_device(cpu), apc_fmax), + "Failed to add OPP levels for apcs_mux_clk\n"); + } + cpucc_clk_print_opp_table(qcs405_cpu); +} + +static int cpucc_driver_probe(struct platform_device *pdev) +{ + struct resource *res; + struct clk_hw_onecell_data *data; + struct device *dev = &pdev->dev; + struct device_node *of = pdev->dev.of_node; + struct clk *clk; + u32 rate = 0; + int i, ret, speed_bin, version, cpu; + char prop_name[] = "qcom,speedX-bin-vX"; + void __iomem *base; + + /* Require the RPM-XO clock to be registered before */ + clk = devm_clk_get(dev, "xo_ao"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Unable to get xo clock\n"); + return PTR_ERR(clk); + } + + clk = devm_clk_get(dev, "gpll0_ao"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Unable to get GPLL0 clock\n"); + return PTR_ERR(clk); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_pll"); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed to map apcs_cpu_pll register base\n"); + return PTR_ERR(base); + } + + apcs_cpu_pll.clkr.regmap = devm_regmap_init_mmio(dev, base, + &cpu_regmap_config); + if (IS_ERR(apcs_cpu_pll.clkr.regmap)) { + dev_err(&pdev->dev, "Couldn't get regmap for apcs_cpu_pll\n"); + return PTR_ERR(apcs_cpu_pll.clkr.regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_cmd"); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "Failed to map apcs_cmd register base\n"); + return PTR_ERR(base); + } + + apcs_mux_clk.clkr.regmap = devm_regmap_init_mmio(dev, base, + &cpu_regmap_config); + if (IS_ERR(apcs_mux_clk.clkr.regmap)) { + dev_err(&pdev->dev, "Couldn't get regmap for apcs_cmd\n"); + return PTR_ERR(apcs_mux_clk.clkr.regmap); + } + + /* Rail Regulator for apcs_pll */ + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_ao"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_dig_ao regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + /* Rail Regulator for APSS cpuss mux */ + vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd"); + if (IS_ERR(vdd_cpu.regulator[0])) { + if (!(PTR_ERR(vdd_cpu.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get cpu-vdd regulator\n"); + return PTR_ERR(vdd_cpu.regulator[0]); + } + + /* Get speed bin information */ + cpucc_clk_get_speed_bin(pdev, &speed_bin, &version); + + snprintf(prop_name, ARRAY_SIZE(prop_name), + "qcom,speed%d-bin-v%d", speed_bin, version); + + ret = cpucc_clk_get_fmax_vdd_class(pdev, + (struct clk_init_data *)apcs_mux_clk.clkr.hw.init, prop_name); + if (ret) { + dev_err(&pdev->dev, + "Can't get speed bin for apcs_mux_clk. Falling back to zero\n"); + ret = cpucc_clk_get_fmax_vdd_class(pdev, + (struct clk_init_data *) + apcs_mux_clk.clkr.hw.init, + "qcom,speed0-bin-v0"); + if (ret) { + dev_err(&pdev->dev, + "Unable to get speed bin for apcs_mux_clk freq-corner mapping info\n"); + return ret; + } + } + + ret = of_property_read_u32(of, "qcom,cpucc-init-rate", &rate); + if (ret || !rate) + dev_err(&pdev->dev, "Init rate for clock not defined\n"); + + cpucc_clk_init_rate = max(cpucc_clk_init_rate, rate); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->num = ARRAY_SIZE(cpu_clks_hws); + + /* Register clocks with clock framework */ + for (i = 0; i < ARRAY_SIZE(cpu_clks_hws); i++) { + ret = devm_clk_hw_register(dev, cpu_clks_hws[i]); + if (ret) { + dev_err(&pdev->dev, "Failed to register clock\n"); + return ret; + } + data->hws[i] = cpu_clks_hws[i]; + } + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, data); + if (ret) { + dev_err(&pdev->dev, "CPU clock driver registeration failed\n"); + return ret; + } + + ret = clk_notifier_register(apcs_mux_clk.clkr.hw.clk, + &apcs_mux_clk.clk_nb); + if (ret) { + dev_err(dev, "failed to register clock notifier: %d\n", ret); + return ret; + } + + /* Set to boot frequency */ + ret = clk_set_rate(apcs_mux_clk.clkr.hw.clk, cpucc_clk_init_rate); + if (ret) + dev_err(&pdev->dev, "Unable to set init rate on apcs_mux_clk\n"); + + /* + * We don't want the CPU clocks to be turned off at late init + * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the + * refcount of these clocks. Any cpufreq/hotplug manager can assume + * that the clocks have already been prepared and enabled by the time + * they take over. + */ + get_online_cpus(); + for_each_online_cpu(cpu) + WARN(clk_prepare_enable(apcs_mux_clk.clkr.hw.clk), + "Unable to turn on CPU clock\n"); + put_online_cpus(); + + cpucc_clk_populate_opp_table(pdev); + dev_info(dev, "CPU clock Driver probed successfully\n"); + + return ret; +} + +static struct platform_driver cpu_clk_driver = { + .probe = cpucc_driver_probe, + .driver = { + .name = "qcom-cpu-qcs405", + .of_match_table = match_table, + }, +}; + +static int __init cpu_clk_init(void) +{ + return platform_driver_register(&cpu_clk_driver); +} +subsys_initcall(cpu_clk_init); + +static void __exit cpu_clk_exit(void) +{ + platform_driver_unregister(&cpu_clk_driver); +} +module_exit(cpu_clk_exit); + +static void enable_hf_pll(void __iomem *base) +{ + writel_relaxed(0x2, base + apcs_cpu_pll.mode_reg); + udelay(2); + writel_relaxed(0x6, base + apcs_cpu_pll.mode_reg); + udelay(50); + writel_relaxed(0x7, base + apcs_cpu_pll.mode_reg); + /* Ensure that the writes go through before enabling PLL */ + mb(); +} + +static void __init configure_hf_pll(void __iomem *base) +{ + /* Disable Mode */ + writel_relaxed(0x0, base + apcs_cpu_pll.mode_reg); + + /* Configure L/M/N values */ + writel_relaxed(apcs_cpu_pll_config.l, base + apcs_cpu_pll.l_reg); + writel_relaxed(apcs_cpu_pll_config.m, base + apcs_cpu_pll.m_reg); + writel_relaxed(apcs_cpu_pll_config.n, base + apcs_cpu_pll.n_reg); + + /* Configure USER_CTL value */ + writel_relaxed(0x0100000f, base + apcs_cpu_pll.config_reg); +} + +static int __init cpu_clock_init(void) +{ + struct device_node *dev; + void __iomem *base; + int count, regval = 0, l_val; + unsigned long enable_mask = 0x7; + + dev = of_find_compatible_node(NULL, NULL, "qcom,cpu-qcs405"); + if (!dev) { + pr_debug("device node not initialized\n"); + return -ENOMEM; + } + + base = ioremap_nocache(APCS_PLL, SZ_64); + if (!base) + return -ENOMEM; + + l_val = readl_relaxed(base + apcs_cpu_pll.l_reg); + if (!l_val) + configure_hf_pll(base); + + cpucc_clk_init_rate = l_val * XO_RATE; + + regval = readl_relaxed(base); + if (!((regval & enable_mask) == enable_mask)) + enable_hf_pll(base); + + iounmap(base); + + base = ioremap_nocache(APCS_CMD, SZ_8); + if (!base) + return -ENOMEM; + + writel_relaxed(0x501, base + REG_OFFSET); + + /* Update bit */ + regval = readl_relaxed(base); + regval |= BIT(0); + writel_relaxed(regval, base); + + /* Wait for update to take effect */ + for (count = 500; count > 0; count--) { + if ((!(readl_relaxed(base))) & BIT(0)) + break; + udelay(1); + } + + return 0; +} +early_initcall(cpu_clock_init); + +MODULE_ALIAS("platform:cpu"); +MODULE_DESCRIPTION("QCS405 CPU clock Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index cb6cb8710daf11c0936e525fd1698caa440b884d..e6220a0b8d2e9581c3a214a1bf52281bd141cf3b 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -28,6 +28,7 @@ #define PLL_OUTCTRL BIT(0) #define PLL_BYPASSNL BIT(1) #define PLL_RESET_N BIT(2) +#define XO_RATE 19200000 static int clk_pll_enable(struct clk_hw *hw) { @@ -138,9 +139,11 @@ clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) f = find_freq(pll->freq_tbl, req->rate); if (!f) - req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate); + req->rate = DIV_ROUND_UP_ULL(req->rate, req->best_parent_rate) + * req->best_parent_rate; + else - req->rate = f->freq; + req->rate = req->rate = f->freq; return 0; } @@ -342,3 +345,63 @@ const struct clk_ops clk_pll_sr2_ops = { .determine_rate = clk_pll_determine_rate, }; EXPORT_SYMBOL_GPL(clk_pll_sr2_ops); + +static int +clk_pll_hf_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) +{ + struct clk_pll *pll = to_clk_pll(hw); + bool enabled; + u32 mode, l_val; + u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N; + + l_val = rate / XO_RATE; + + regmap_read(pll->clkr.regmap, pll->mode_reg, &mode); + enabled = (mode & enable_mask) == enable_mask; + + if (enabled) + clk_pll_disable(hw); + + regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, l_val); + regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, 0); + regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, 1); + + if (enabled) + clk_pll_sr2_enable(hw); + + return 0; +} + +static void clk_pll_hf_list_registers(struct seq_file *f, struct clk_hw *hw) +{ + struct clk_pll *pll = to_clk_pll(hw); + int size, i, val; + + static struct clk_register_data data[] = { + {"PLL_MODE", 0x0}, + {"PLL_L_VAL", 0x4}, + {"PLL_M_VAL", 0x8}, + {"PLL_N_VAL", 0xC}, + {"PLL_USER_CTL", 0x10}, + {"PLL_CONFIG_CTL", 0x14}, + {"PLL_STATUS_CTL", 0x1C}, + }; + + size = ARRAY_SIZE(data); + + for (i = 0; i < size; i++) { + regmap_read(pll->clkr.regmap, pll->mode_reg + data[i].offset, + &val); + seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val); + } +} + +const struct clk_ops clk_pll_hf_ops = { + .enable = clk_pll_sr2_enable, + .disable = clk_pll_disable, + .set_rate = clk_pll_hf_set_rate, + .recalc_rate = clk_pll_recalc_rate, + .determine_rate = clk_pll_determine_rate, + .list_registers = clk_pll_hf_list_registers, +}; +EXPORT_SYMBOL(clk_pll_hf_ops); diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h index ffd0c63bddbc41d78e65509bd4091826a3ab5408..bedd84352a431d5b00251a261bd4751ac70f9134 100644 --- a/drivers/clk/qcom/clk-pll.h +++ b/drivers/clk/qcom/clk-pll.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -63,6 +63,7 @@ struct clk_pll { extern const struct clk_ops clk_pll_ops; extern const struct clk_ops clk_pll_vote_ops; extern const struct clk_ops clk_pll_sr2_ops; +extern const struct clk_ops clk_pll_hf_ops; #define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr) diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index fa313737e65bdcc254fbad86b97763868b10c57e..79a6975ac5eec727ac0c8bd74927f4e6d9bf319e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -605,14 +605,14 @@ DEFINE_CLK_SMD_RPM_QDSS(qcs405, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); DEFINE_CLK_SMD_RPM(qcs405, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0); DEFINE_CLK_SMD_RPM(qcs405, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); - +DEFINE_CLK_SMD_RPM(qcs405, bimc_gpu_clk, bimc_gpu_a_clk, + QCOM_SMD_RPM_MEM_CLK, 0); /* SMD_XO_BUFFER */ -DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, bb_clk1, bb_clk1_a, 1); -DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, bb_clk2, bb_clk2_a, 2); -DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, rf_clk2, rf_clk2_a, 5); -DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, div_clk2, div_clk2_a, 0xc); -DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, bb_clk1_pin, bb_clk1_a_pin, 1); -DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, ln_bb_clk, ln_bb_clk_a, 8); +DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, div_clk1, div_clk1_a, 0xb); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, ln_bb_clk_pin, ln_bb_clk_a_pin, 8); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, rf_clk1_pin, rf_clk1_a_pin, 4); /* Voter clocks */ static DEFINE_CLK_VOTER(pnoc_msmbus_clk, pnoc_clk, LONG_MAX); @@ -648,6 +648,7 @@ static DEFINE_CLK_BRANCH_VOTER(cxo_pil_pronto_clk, cxo); static DEFINE_CLK_BRANCH_VOTER(cxo_pil_mss_clk, cxo); static DEFINE_CLK_BRANCH_VOTER(cxo_wlan_clk, cxo); static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, cxo); +static DEFINE_CLK_BRANCH_VOTER(cxo_pil_cdsp_clk, cxo); static struct clk_hw *qcs405_clks[] = { [RPM_SMD_XO_CLK_SRC] = &qcs405_cxo.hw, @@ -658,24 +659,24 @@ static struct clk_hw *qcs405_clks[] = { [RPM_SMD_BIMC_A_CLK] = &qcs405_bimc_a_clk.hw, [RPM_SMD_QDSS_CLK] = &qcs405_qdss_clk.hw, [RPM_SMD_QDSS_A_CLK] = &qcs405_qdss_a_clk.hw, - [RPM_SMD_BB_CLK1] = &qcs405_bb_clk1.hw, - [RPM_SMD_BB_CLK1_A] = &qcs405_bb_clk1_a.hw, - [RPM_SMD_BB_CLK2] = &qcs405_bb_clk2.hw, - [RPM_SMD_BB_CLK2_A] = &qcs405_bb_clk2_a.hw, - [RPM_SMD_RF_CLK2] = &qcs405_rf_clk2.hw, - [RPM_SMD_RF_CLK2_A] = &qcs405_rf_clk2_a.hw, - [RPM_SMD_BB_CLK1_PIN] = &qcs405_bb_clk1_pin.hw, - [RPM_SMD_BB_CLK1_A_PIN] = &qcs405_bb_clk1_a_pin.hw, - [RPM_SMD_BB_CLK2_PIN] = &qcs405_bb_clk2_pin.hw, - [RPM_SMD_BB_CLK2_A_PIN] = &qcs405_bb_clk2_a_pin.hw, - [RPM_SMD_DIV_CLK2] = &qcs405_div_clk2.hw, - [RPM_SMD_DIV_A_CLK2] = &qcs405_div_clk2_a.hw, + [RPM_SMD_RF_CLK1] = &qcs405_rf_clk1.hw, + [RPM_SMD_RF_CLK1_A] = &qcs405_rf_clk1_a.hw, + [RPM_SMD_RF_CLK1_PIN] = &qcs405_rf_clk1_pin.hw, + [RPM_SMD_RF_CLK1_A_PIN] = &qcs405_rf_clk1_a_pin.hw, + [RPM_SMD_LN_BB_CLK] = &qcs405_ln_bb_clk.hw, + [RPM_SMD_LN_BB_CLK_A] = &qcs405_ln_bb_clk_a.hw, + [RPM_SMD_LN_BB_CLK_PIN] = &qcs405_ln_bb_clk_pin.hw, + [RPM_SMD_LN_BB_CLK_A_PIN] = &qcs405_ln_bb_clk_a_pin.hw, + [RPM_SMD_DIV_CLK1] = &qcs405_div_clk1.hw, + [RPM_SMD_DIV_A_CLK1] = &qcs405_div_clk1_a.hw, [RPM_SMD_PNOC_CLK] = &qcs405_pnoc_clk.hw, [RPM_SMD_PNOC_A_CLK] = &qcs405_pnoc_a_clk.hw, [RPM_SMD_CE1_CLK] = &qcs405_ce1_clk.hw, [RPM_SMD_CE1_A_CLK] = &qcs405_ce1_a_clk.hw, [RPM_SMD_QPIC_CLK] = &qcs405_qpic_clk.hw, [RPM_SMD_QPIC_A_CLK] = &qcs405_qpic_a_clk.hw, + [RPM_SMD_BIMC_GPU_CLK] = &qcs405_bimc_gpu_clk.hw, + [RPM_SMD_BIMC_GPU_A_CLK] = &qcs405_bimc_gpu_a_clk.hw, [PNOC_MSMBUS_CLK] = &pnoc_msmbus_clk.hw, [PNOC_MSMBUS_A_CLK] = &pnoc_msmbus_a_clk.hw, [PNOC_KEEPALIVE_A_CLK] = &pnoc_keepalive_a_clk.hw, @@ -702,11 +703,12 @@ static struct clk_hw *qcs405_clks[] = { [CXO_SMD_PIL_MSS_CLK] = &cxo_pil_mss_clk.hw, [CXO_SMD_WLAN_CLK] = &cxo_wlan_clk.hw, [CXO_SMD_PIL_LPASS_CLK] = &cxo_pil_lpass_clk.hw, + [CXO_SMD_PIL_CDSP_CLK] = &cxo_pil_cdsp_clk.hw, }; static const struct rpm_smd_clk_desc rpm_clk_qcs405 = { .clks = qcs405_clks, - .num_rpm_clks = RPM_SMD_CE1_A_CLK, + .num_rpm_clks = RPM_SMD_BIMC_GPU_A_CLK, .num_clks = ARRAY_SIZE(qcs405_clks), }; diff --git a/drivers/clk/qcom/debugcc-qcs405.c b/drivers/clk/qcom/debugcc-qcs405.c index b231337d2e4f87d768da52366cf564ff5ccc2594..b7b07f5e934e27064f4731beef969ea5ababacfe 100644 --- a/drivers/clk/qcom/debugcc-qcs405.c +++ b/drivers/clk/qcom/debugcc-qcs405.c @@ -110,6 +110,7 @@ static const char *const debug_mux_parent_names[] = { "gcc_usb_hs_inactivity_timers_clk", "gcc_usb_hs_phy_cfg_ahb_clk", "gcc_usb_hs_system_clk", + "apcs_mux_clk", }; static struct clk_debug_mux gcc_debug_mux = { @@ -277,6 +278,7 @@ static struct clk_debug_mux gcc_debug_mux = { 0x64, 0x1FF, 0, 0xF000, 12, 4, 0x74000, 0x74000, 0x74000 }, { "gcc_usb_hs_system_clk", 0x60, 4, GCC, 0x60, 0x1FF, 0, 0xF000, 12, 4, 0x74000, 0x74000, 0x74000 }, + { "apcs_mux_clk", 0x16A, CPU_CC, 0x000, 0x3, 8, 0x0FF }, ), .hw.init = &(struct clk_init_data){ .name = "gcc_debug_mux", @@ -331,6 +333,10 @@ static int clk_debug_qcs405_probe(struct platform_device *pdev) if (ret) return ret; + ret = map_debug_bases(pdev, "qcom,cpucc", CPU_CC); + if (ret) + return ret; + clk = devm_clk_register(&pdev->dev, &gcc_debug_mux.hw); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Unable to register GCC debug mux\n"); diff --git a/drivers/clk/qcom/dispcc-sm8150.c b/drivers/clk/qcom/dispcc-sm8150.c index 2527a0587f48176cc1a4e6cef11703639dd1bf5f..46475d7e1761df8d9d0bac4877869ba474fd122b 100644 --- a/drivers/clk/qcom/dispcc-sm8150.c +++ b/drivers/clk/qcom/dispcc-sm8150.c @@ -171,6 +171,20 @@ static struct pll_vco trion_vco[] = { }; static const struct alpha_pll_config disp_cc_pll0_config = { + .l = 0x47, + .alpha = 0xE000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000002, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static const struct alpha_pll_config disp_cc_pll0_config_sm8150_v2 = { .l = 0x47, .alpha = 0xE000, .config_ctl_val = 0x20485699, @@ -204,6 +218,20 @@ static struct clk_alpha_pll disp_cc_pll0 = { }; static const struct alpha_pll_config disp_cc_pll1_config = { + .l = 0x1F, + .alpha = 0x4000, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000002, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static const struct alpha_pll_config disp_cc_pll1_config_sm8150_v2 = { .l = 0x1F, .alpha = 0x4000, .config_ctl_val = 0x20485699, @@ -374,8 +402,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto1_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_MIN] = 12800, - [VDD_LOWER] = 108000, - [VDD_LOW] = 180000, + [VDD_LOWER] = 180000, [VDD_LOW_L1] = 360000, [VDD_NOMINAL] = 540000}, }, @@ -397,8 +424,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_MIN] = 12800, - [VDD_LOWER] = 108000, - [VDD_LOW] = 180000, + [VDD_LOWER] = 180000, [VDD_LOW_L1] = 360000, [VDD_NOMINAL] = 540000}, }, @@ -428,8 +454,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link1_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_MIN] = 19200, - [VDD_LOWER] = 162000, - [VDD_LOW] = 270000, + [VDD_LOWER] = 270000, [VDD_LOW_L1] = 540000, [VDD_NOMINAL] = 810000}, }, @@ -451,8 +476,7 @@ static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_MIN] = 19200, - [VDD_LOWER] = 162000, - [VDD_LOW] = 270000, + [VDD_LOWER] = 270000, [VDD_LOW_L1] = 540000, [VDD_NOMINAL] = 810000}, }, @@ -472,9 +496,9 @@ static struct clk_rcg2 disp_cc_mdss_dp_pixel1_clk_src = { .vdd_class = &vdd_mm, .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { - [VDD_MIN] = 19200000, - [VDD_LOWER] = 337500000, - [VDD_LOW_L1] = 675000000}, + [VDD_MIN] = 19200, + [VDD_LOWER] = 337500, + [VDD_LOW_L1] = 675000}, }, }; @@ -1486,10 +1510,53 @@ static const struct qcom_cc_desc disp_cc_sm8150_desc = { static const struct of_device_id disp_cc_sm8150_match_table[] = { { .compatible = "qcom,dispcc-sm8150" }, + { .compatible = "qcom,dispcc-sm8150-v2" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_sm8150_match_table); +static void disp_cc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + clk_trion_pll_configure(&disp_cc_pll0, regmap, + &disp_cc_pll0_config_sm8150_v2); + clk_trion_pll_configure(&disp_cc_pll1, regmap, + &disp_cc_pll1_config_sm8150_v2); + disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = + 337500; + disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = + 675000; + disp_cc_mdss_dp_pixel2_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = + 337500; + disp_cc_mdss_dp_pixel2_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = + 675000; + disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = + 337500; + disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = + 675000; + disp_cc_mdss_edp_link_clk_src.clkr.hw.init->rate_max[VDD_LOW] = + 594000000; + disp_cc_mdss_edp_pixel_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = + 337500000; + disp_cc_mdss_edp_pixel_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = + 675000000; +} + +static int disp_cc_sm8150_fixup(struct platform_device *pdev, + struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,dispcc-sm8150-v2")) + disp_cc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + static int disp_cc_sm8150_probe(struct platform_device *pdev) { struct regmap *regmap; @@ -1518,11 +1585,15 @@ static int disp_cc_sm8150_probe(struct platform_device *pdev) return PTR_ERR(vdd_mm.regulator[0]); } + ret = disp_cc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + clk_trion_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); clk_trion_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); /* Enable clock gating for DSI and MDP clocks */ - regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x7f0, 0x7f0); + regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x670, 0x670); ret = qcom_cc_really_probe(pdev, &disp_cc_sm8150_desc, regmap); if (ret) { diff --git a/drivers/clk/qcom/gcc-qcs405.c b/drivers/clk/qcom/gcc-qcs405.c index eb11ebeae5c8a2274ff691c917ce6a02148b07cb..b8d9501ca58a359ae75c1fc2ed9c5861ee084efe 100644 --- a/drivers/clk/qcom/gcc-qcs405.c +++ b/drivers/clk/qcom/gcc-qcs405.c @@ -521,12 +521,23 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { }, }; +static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5), + F(19200000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2), + F(30000000, P_GPLL0_OUT_MAIN, 1, 3, 80), + { } +}; + static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { .cmd_rcgr = 0x2024, .mnd_width = 8, .hid_width = 5, .parent_map = gcc_parent_map_0, - .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup1_spi_apps_clk_src", .parent_names = gcc_parent_names_0, @@ -536,7 +547,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_LOW] = 25000000, - [VDD_NOMINAL] = 50000000}, + [VDD_NOMINAL] = 30000000}, }, }; @@ -563,7 +574,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { .mnd_width = 8, .hid_width = 5, .parent_map = gcc_parent_map_0, - .freq_tbl = ftbl_blsp1_qup0_spi_apps_clk_src, + .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "blsp1_qup2_spi_apps_clk_src", .parent_names = gcc_parent_names_0, @@ -573,7 +584,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { .num_rate_max = VDD_NUM, .rate_max = (unsigned long[VDD_NUM]) { [VDD_LOW] = 25000000, - [VDD_NOMINAL] = 50000000}, + [VDD_NOMINAL] = 30000000}, }, }; @@ -906,6 +917,7 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = { F_SLEW(523200000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1046400000), F_SLEW(550000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1100000000), F_SLEW(598000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1196000000), + F_SLEW(650000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1300000000), { } }; @@ -1391,6 +1403,19 @@ static struct clk_branch gcc_apss_ahb_clk = { }, }; +static struct clk_branch gcc_apss_tcu_clk = { + .halt_reg = 0x5b004, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_apss_tcu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_bimc_gfx_clk = { .halt_reg = 0x59034, .halt_check = BRANCH_HALT, @@ -1400,6 +1425,23 @@ static struct clk_branch gcc_bimc_gfx_clk = { .hw.init = &(struct clk_init_data){ .name = "gcc_bimc_gfx_clk", .ops = &clk_branch2_ops, + .parent_names = (const char *[]){ + "gcc_apss_tcu_clk", + }, + + }, + }, +}; + +static struct clk_branch gcc_bimc_gpu_clk = { + .halt_reg = 0x59030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x59030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_bimc_gpu_clk", + .ops = &clk_branch2_ops, }, }, }; @@ -1762,6 +1804,45 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { }, }; +static struct clk_branch gcc_crypto_ahb_clk = { + .halt_reg = 0x16024, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_axi_clk = { + .halt_reg = 0x16020, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_crypto_clk = { + .halt_reg = 0x1601c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_crypto_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_eth_axi_clk = { .halt_reg = 0x4e010, .halt_check = BRANCH_HALT, @@ -1837,6 +1918,32 @@ static struct clk_branch gcc_geni_ir_s_clk = { }, }; +static struct clk_branch gcc_gfx_tcu_clk = { + .halt_reg = 0x12020, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500C, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tcu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gfx_tbu_clk = { + .halt_reg = 0x12010, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500C, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gfx_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_gp1_clk = { .halt_reg = 0x8000, .halt_check = BRANCH_HALT, @@ -1891,6 +1998,32 @@ static struct clk_branch gcc_gp3_clk = { }, }; +static struct clk_branch gcc_gtcu_ahb_clk = { + .halt_reg = 0x12044, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gtcu_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mdp_tbu_clk = { + .halt_reg = 0x1201c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x4500c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mdp_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_mdss_ahb_clk = { .halt_reg = 0x4d07c, .halt_check = BRANCH_HALT, @@ -2271,6 +2404,19 @@ static struct clk_branch gcc_pwm2_xo512_clk = { }, }; +static struct clk_branch gcc_qdss_dap_clk = { + .halt_reg = 0x29084, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x45004, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qdss_dap_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_sdcc1_ahb_clk = { .halt_reg = 0x4201c, .halt_check = BRANCH_HALT, @@ -2351,6 +2497,19 @@ static struct clk_branch gcc_sdcc2_apps_clk = { }, }; +static struct clk_branch gcc_smmu_cfg_clk = { + .halt_reg = 0x12038, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x3600C, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_smmu_cfg_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_sys_noc_usb3_clk = { .halt_reg = 0x26014, .halt_check = BRANCH_HALT, @@ -2650,6 +2809,17 @@ static struct clk_regmap *gcc_qcs405_clocks[] = { [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, [GCC_USB_HS_INACTIVITY_TIMERS_CLK] = &gcc_usb_hs_inactivity_timers_clk.clkr, + [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr, + [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr, + [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr, + [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr, + [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr, + [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr, + [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr, + [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr, + [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, + [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr, + [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, }; static const struct qcom_reset_map gcc_qcs405_resets[] = { @@ -2732,6 +2902,7 @@ static int gcc_qcs405_probe(struct platform_device *pdev) clk_set_rate(apss_ahb_clk_src.clkr.hw.clk, 19200000); clk_prepare_enable(apss_ahb_clk_src.clkr.hw.clk); + clk_prepare_enable(gpll0_ao_out_main.clkr.hw.clk); dev_info(&pdev->dev, "Registered GCC clocks\n"); diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index cc5b19c1a4a891c4b0c23d5d114fffb02c51fa40..d7f5f65a782e057b84d9a4b1946d399367ce3139 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -4200,10 +4200,36 @@ static const struct qcom_cc_desc gcc_sm8150_desc = { static const struct of_device_id gcc_sm8150_match_table[] = { { .compatible = "qcom,gcc-sm8150" }, + { .compatible = "qcom,gcc-sm8150-v2" }, { } }; MODULE_DEVICE_TABLE(of, gcc_sm8150_match_table); +static void gcc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 9600000; + gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 19200000; + gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 9600000; + gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 19200000; + gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 50000000; + gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 100000000; +} + +static int gcc_sm8150_fixup(struct platform_device *pdev, struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,gcc-sm8150-v2")) + gcc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + static int gcc_sm8150_probe(struct platform_device *pdev) { struct clk *clk; @@ -4248,6 +4274,10 @@ static int gcc_sm8150_probe(struct platform_device *pdev) return PTR_ERR(vdd_cx_ao.regulator[0]); } + ret = gcc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + ret = qcom_cc_really_probe(pdev, &gcc_sm8150_desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register GCC clocks\n"); diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c index 0e7d35acee51a8aabdab33d5031992f817e5b0fb..d013069acd4e76907d946f378cd465479b9ca866 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c @@ -896,7 +896,10 @@ static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc) MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_3, 0x04); data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1); - MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5))); + + /* Turn on clk_en_sel bit prior to resync toggle fifo */ + MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5) | + BIT(4))); } static void dsi_pll_phy_dig_reset(struct mdss_pll_resources *rsc) @@ -1182,9 +1185,8 @@ static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw, pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n", dec, frac, outdiv, vco_rate); - (void)mdss_pll_resource_enable(pll, false); - end: + (void)mdss_pll_resource_enable(pll, false); return (unsigned long)vco_rate; } diff --git a/drivers/clk/qcom/npucc-sm8150.c b/drivers/clk/qcom/npucc-sm8150.c index 7690904899bad0595882010931874d6e464040a2..ed342ecfb27c9220db97a977ab24e6b0e47705a1 100644 --- a/drivers/clk/qcom/npucc-sm8150.c +++ b/drivers/clk/qcom/npucc-sm8150.c @@ -97,8 +97,8 @@ static struct pll_vco trion_vco[] = { }; static const struct alpha_pll_config npu_cc_pll0_config = { - .l = 0x14, - .alpha = 0xD555, + .l = 0x1F, + .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, @@ -110,6 +110,17 @@ static const struct alpha_pll_config npu_cc_pll0_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config npu_cc_pll0_config_sm8150_v2 = { + .l = 0xD, + .alpha = 0x555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll npu_cc_pll0 = { .offset = 0x0, .vco_table = trion_vco, @@ -169,6 +180,17 @@ static const struct alpha_pll_config npu_cc_pll1_config = { .user_ctl_hi1_val = 0x000000D0, }; +static const struct alpha_pll_config npu_cc_pll1_config_sm8150_v2 = { + .l = 0x29, + .alpha = 0xAAAA, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + static struct clk_alpha_pll npu_cc_pll1 = { .offset = 0x400, .vco_table = trion_vco, @@ -228,6 +250,16 @@ static const struct freq_tbl ftbl_npu_cc_cal_dp_clk_src[] = { { } }; +static const struct freq_tbl ftbl_npu_cc_cal_dp_clk_src_sm8150_v2[] = { + F(125000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(400000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(487000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(773000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + F(908000000, P_NPU_CC_CRC_DIV, 1, 0, 0), + { } +}; + static struct clk_rcg2 npu_cc_cal_dp_clk_src = { .cmd_rcgr = 0x1004, .mnd_width = 0, @@ -263,6 +295,16 @@ static const struct freq_tbl ftbl_npu_cc_npu_core_clk_src[] = { { } }; +static const struct freq_tbl ftbl_npu_cc_npu_core_clk_src_sm8150_v2[] = { + F(60000000, P_GPLL0_OUT_MAIN_DIV, 5, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(200000000, P_NPU_CC_PLL1_OUT_EVEN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + F(400000000, P_NPU_CC_PLL1_OUT_EVEN, 2, 0, 0), + { } +}; + static struct clk_rcg2 npu_cc_npu_core_clk_src = { .cmd_rcgr = 0x1030, .mnd_width = 0, @@ -593,19 +635,54 @@ static const struct qcom_cc_desc npu_cc_sm8150_desc = { static const struct of_device_id npu_cc_sm8150_match_table[] = { { .compatible = "qcom,npucc-sm8150" }, + { .compatible = "qcom,npucc-sm8150-v2" }, { } }; MODULE_DEVICE_TABLE(of, npu_cc_sm8150_match_table); +static void npu_cc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + clk_trion_pll_configure(&npu_cc_pll0, regmap, + &npu_cc_pll0_config_sm8150_v2); + clk_trion_pll_configure(&npu_cc_pll1, regmap, + &npu_cc_pll1_config_sm8150_v2); + npu_cc_cal_dp_clk_src.freq_tbl = ftbl_npu_cc_cal_dp_clk_src_sm8150_v2; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 0; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 400000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_LOW_L1] = 487000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 773000000; + npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 908000000; + npu_cc_npu_core_clk_src.freq_tbl = + ftbl_npu_cc_npu_core_clk_src_sm8150_v2; + npu_cc_npu_core_clk_src.clkr.hw.init->rate_max[VDD_MIN] = 0; + npu_cc_npu_core_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 400000000; +} + +static int npu_cc_sm8150_fixup(struct platform_device *pdev, + struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,npucc-sm8150-v2")) + npu_cc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + struct regulator *vdd_gdsc; static int enable_npu_crc(struct regmap *regmap) { int ret = 0; - /* Set npu_cc_cal_cp_clk to a safe frequency */ + /* Set npu_cc_cal_cp_clk to the lowest supported frequency */ clk_set_rate(npu_cc_cal_dp_clk.clkr.hw.clk, - npu_cc_cal_dp_clk_src.clkr.hw.init->rate_max[VDD_MIN]); + clk_round_rate(npu_cc_cal_dp_clk_src.clkr.hw.clk, 1)); /* Turn on the NPU GDSC */ ret = regulator_enable(vdd_gdsc); if (ret) { @@ -658,6 +735,10 @@ static int npu_cc_sm8150_probe(struct platform_device *pdev) return PTR_ERR(vdd_gdsc); } + ret = npu_cc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + clk_trion_pll_configure(&npu_cc_pll0, regmap, &npu_cc_pll0_config); clk_trion_pll_configure(&npu_cc_pll1, regmap, &npu_cc_pll1_config); diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c index c18ee37dc8b9da85eb7bbc1924730c159cb157da..4c43d8579868b6bec44dbc690e491513b55e0866 100644 --- a/drivers/clk/qcom/videocc-sm8150.c +++ b/drivers/clk/qcom/videocc-sm8150.c @@ -70,6 +70,20 @@ static struct pll_vco trion_vco[] = { }; static const struct alpha_pll_config video_pll0_config = { + .l = 0x14, + .alpha = 0xD555, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000002, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000D0, +}; + +static const struct alpha_pll_config video_pll0_config_sm8150_v2 = { .l = 0x14, .alpha = 0xD555, .config_ctl_val = 0x20485699, @@ -112,6 +126,16 @@ static const struct freq_tbl ftbl_video_cc_iris_clk_src[] = { { } }; +static const struct freq_tbl ftbl_video_cc_iris_clk_src_sm8150_v2[] = { + F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(338000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(365000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(444000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(533000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + static struct clk_rcg2 video_cc_iris_clk_src = { .cmd_rcgr = 0x7f0, .mnd_width = 0, @@ -249,10 +273,38 @@ static const struct qcom_cc_desc video_cc_sm8150_desc = { static const struct of_device_id video_cc_sm8150_match_table[] = { { .compatible = "qcom,videocc-sm8150" }, + { .compatible = "qcom,videocc-sm8150-v2" }, { } }; MODULE_DEVICE_TABLE(of, video_cc_sm8150_match_table); +static void video_cc_sm8150_fixup_sm8150v2(struct regmap *regmap) +{ + clk_trion_pll_configure(&video_pll0, regmap, + &video_pll0_config_sm8150_v2); + video_cc_iris_clk_src.freq_tbl = ftbl_video_cc_iris_clk_src_sm8150_v2; + video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_LOWER] = 240000000; + video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_LOW] = 338000000; + video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 444000000; + video_cc_iris_clk_src.clkr.hw.init->rate_max[VDD_HIGH] = 533000000; +} + +static int video_cc_sm8150_fixup(struct platform_device *pdev, + struct regmap *regmap) +{ + const char *compat = NULL; + int compatlen = 0; + + compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen); + if (!compat || (compatlen <= 0)) + return -EINVAL; + + if (!strcmp(compat, "qcom,videocc-sm8150-v2")) + video_cc_sm8150_fixup_sm8150v2(regmap); + + return 0; +} + static int video_cc_sm8150_probe(struct platform_device *pdev) { struct regmap *regmap; @@ -280,6 +332,10 @@ static int video_cc_sm8150_probe(struct platform_device *pdev) return PTR_ERR(vdd_mm.regulator[0]); } + ret = video_cc_sm8150_fixup(pdev, regmap); + if (ret) + return ret; + clk_trion_pll_configure(&video_pll0, regmap, &video_pll0_config); ret = qcom_cc_really_probe(pdev, &video_cc_sm8150_desc, regmap); diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 077fcdc7908bb9f3791fe20bc60a0266327ca050..fe7d9ed1d4364eba777e6104bda92504e9559aff 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -58,6 +58,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) u16 degrees; u32 delay_num = 0; + /* See the comment for rockchip_mmc_set_phase below */ + if (!rate) { + pr_err("%s: invalid clk rate\n", __func__); + return -EINVAL; + } + raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; @@ -84,6 +90,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) u32 raw_value; u32 delay; + /* + * The below calculation is based on the output clock from + * MMC host to the card, which expects the phase clock inherits + * the clock rate from its parent, namely the output clock + * provider of MMC host. However, things may go wrong if + * (1) It is orphan. + * (2) It is assigned to the wrong parent. + * + * This check help debug the case (1), which seems to be the + * most likely problem we often face and which makes it difficult + * for people to debug unstable mmc tuning results. + */ + if (!rate) { + pr_err("%s: invalid clk rate\n", __func__); + return -EINVAL; + } + nineties = degrees / 90; remainder = (degrees % 90); diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 11e7f2d1c0548166f8b762582414b3337f363a6f..7af48184b0224b1428ba1e9788f941511a47c9bb 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS, RK2928_CLKGATE_CON(2), 15, GFLAGS), - COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, + COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0, RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS, RK2928_CLKGATE_CON(2), 11, GFLAGS), diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 1b81e283f60589c900c5bfc6370a125d8c8c845d..ed36728424a21f31d3ff3317c96a5ba16270484d 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -698,7 +698,7 @@ static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = PLL_36XX_RATE(144000000, 96, 2, 3, 0), PLL_36XX_RATE( 96000000, 128, 2, 4, 0), PLL_36XX_RATE( 84000000, 112, 2, 4, 0), - PLL_36XX_RATE( 80000004, 106, 2, 4, 43691), + PLL_36XX_RATE( 80000003, 106, 2, 4, 43691), PLL_36XX_RATE( 73728000, 98, 2, 4, 19923), PLL_36XX_RATE( 67737598, 270, 3, 5, 62285), PLL_36XX_RATE( 65535999, 174, 2, 5, 49982), @@ -734,7 +734,7 @@ static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = PLL_36XX_RATE(148352005, 98, 2, 3, 59070), PLL_36XX_RATE(108000000, 144, 2, 4, 0), PLL_36XX_RATE( 74250000, 99, 2, 4, 0), - PLL_36XX_RATE( 74176002, 98, 3, 4, 59070), + PLL_36XX_RATE( 74176002, 98, 2, 4, 59070), PLL_36XX_RATE( 54054000, 216, 3, 5, 14156), PLL_36XX_RATE( 54000000, 144, 2, 5, 0), { /* sentinel */ } diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index 27a227d6620c7b32bedf04bf16f2f2a7f71ce49b..6a0cb8a515e892475aec6188032e3edf09d96e19 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -711,13 +711,13 @@ static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ PLL_36XX_RATE(192000000, 64, 2, 2, 0), - PLL_36XX_RATE(180633600, 90, 3, 2, 20762), + PLL_36XX_RATE(180633605, 90, 3, 2, 20762), PLL_36XX_RATE(180000000, 90, 3, 2, 0), PLL_36XX_RATE(73728000, 98, 2, 4, 19923), - PLL_36XX_RATE(67737600, 90, 2, 4, 20762), + PLL_36XX_RATE(67737602, 90, 2, 4, 20762), PLL_36XX_RATE(49152000, 98, 3, 4, 19923), - PLL_36XX_RATE(45158400, 90, 3, 4, 20762), - PLL_36XX_RATE(32768000, 131, 3, 5, 4719), + PLL_36XX_RATE(45158401, 90, 3, 4, 20762), + PLL_36XX_RATE(32768001, 131, 3, 5, 4719), { }, }; diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index fd1d9bfc151b9c4f41f2f2adae512710dc52801f..8eae1752d700a43f083ddf3b0ca332703589fc78 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -65,7 +65,7 @@ static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = { PLL_36XX_RATE(480000000, 160, 2, 2, 0), PLL_36XX_RATE(432000000, 144, 2, 2, 0), PLL_36XX_RATE(400000000, 200, 3, 2, 0), - PLL_36XX_RATE(394073130, 459, 7, 2, 49282), + PLL_36XX_RATE(394073128, 459, 7, 2, 49282), PLL_36XX_RATE(333000000, 111, 2, 2, 0), PLL_36XX_RATE(300000000, 100, 2, 2, 0), PLL_36XX_RATE(266000000, 266, 3, 3, 0), diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 11343a5970933d14c490006727c9fc39a86e1a5e..1d2265f9ee97436cda8a023a57bb3b6aa898c521 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -725,7 +725,7 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = PLL_35XX_RATE(800000000U, 400, 6, 1), PLL_35XX_RATE(733000000U, 733, 12, 1), PLL_35XX_RATE(700000000U, 175, 3, 1), - PLL_35XX_RATE(667000000U, 222, 4, 1), + PLL_35XX_RATE(666000000U, 222, 4, 1), PLL_35XX_RATE(633000000U, 211, 4, 1), PLL_35XX_RATE(600000000U, 500, 5, 2), PLL_35XX_RATE(552000000U, 460, 5, 2), @@ -753,12 +753,12 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst = /* AUD_PLL */ static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = { PLL_36XX_RATE(400000000U, 200, 3, 2, 0), - PLL_36XX_RATE(393216000U, 197, 3, 2, -25690), + PLL_36XX_RATE(393216003U, 197, 3, 2, -25690), PLL_36XX_RATE(384000000U, 128, 2, 2, 0), - PLL_36XX_RATE(368640000U, 246, 4, 2, -15729), - PLL_36XX_RATE(361507200U, 181, 3, 2, -16148), - PLL_36XX_RATE(338688000U, 113, 2, 2, -6816), - PLL_36XX_RATE(294912000U, 98, 1, 3, 19923), + PLL_36XX_RATE(368639991U, 246, 4, 2, -15729), + PLL_36XX_RATE(361507202U, 181, 3, 2, -16148), + PLL_36XX_RATE(338687988U, 113, 2, 2, -6816), + PLL_36XX_RATE(294912002U, 98, 1, 3, 19923), PLL_36XX_RATE(288000000U, 96, 1, 3, 0), PLL_36XX_RATE(252000000U, 84, 1, 3, 0), { /* sentinel */ } diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index 5931a4140c3d3f264d75510c277e06401dc31944..bbfa57b4e01765d1ede2283010c59e942855a607 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = { }; static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = { - PLL_36XX_RATE(491520000, 20, 1, 0, 31457), + PLL_36XX_RATE(491519897, 20, 1, 0, 31457), {}, }; diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c index e0650c33863bbc221f981374a26f2503c2c6dc1f..d8e58a65946759d512acf0eb2b221bf32f527112 100644 --- a/drivers/clk/samsung/clk-s3c2410.c +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = { PLL_35XX_RATE(226000000, 105, 1, 1), PLL_35XX_RATE(210000000, 132, 2, 1), /* 2410 common */ - PLL_35XX_RATE(203000000, 161, 3, 1), + PLL_35XX_RATE(202800000, 161, 3, 1), PLL_35XX_RATE(192000000, 88, 1, 1), PLL_35XX_RATE(186000000, 85, 1, 1), PLL_35XX_RATE(180000000, 82, 1, 1), @@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = { PLL_35XX_RATE(147000000, 90, 2, 1), PLL_35XX_RATE(135000000, 82, 2, 1), PLL_35XX_RATE(124000000, 116, 1, 2), - PLL_35XX_RATE(118000000, 150, 2, 2), + PLL_35XX_RATE(118500000, 150, 2, 2), PLL_35XX_RATE(113000000, 105, 1, 2), - PLL_35XX_RATE(101000000, 127, 2, 2), + PLL_35XX_RATE(101250000, 127, 2, 2), PLL_35XX_RATE(90000000, 112, 2, 2), - PLL_35XX_RATE(85000000, 105, 2, 2), + PLL_35XX_RATE(84750000, 105, 2, 2), PLL_35XX_RATE(79000000, 71, 1, 2), - PLL_35XX_RATE(68000000, 82, 2, 2), - PLL_35XX_RATE(56000000, 142, 2, 3), + PLL_35XX_RATE(67500000, 82, 2, 2), + PLL_35XX_RATE(56250000, 142, 2, 3), PLL_35XX_RATE(48000000, 120, 2, 3), - PLL_35XX_RATE(51000000, 161, 3, 3), + PLL_35XX_RATE(50700000, 161, 3, 3), PLL_35XX_RATE(45000000, 82, 1, 3), - PLL_35XX_RATE(34000000, 82, 2, 3), + PLL_35XX_RATE(33750000, 82, 2, 3), { /* sentinel */ }, }; diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 7c369e21c91cb8d156f08dc6b1756ecd01a7e2dd..830d1c87fa7cb6d089ff95992a3e2592691801ea 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = { .enable = clk_pllu_enable, .disable = clk_pll_disable, .recalc_rate = clk_pll_recalc_rate, + .round_rate = clk_pll_round_rate, + .set_rate = clk_pll_set_rate, }; static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 3ee7e6fea6212668d8a9a4d4c1e5c52aedab58fb..846d18daf893b03d926276ba454a5babca7cc75d 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c @@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, static unsigned long __init ftm_clk_init(struct device_node *np) { - unsigned long freq; + long freq; freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); if (freq <= 0) diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index ae3167c28b129b6c3d3cc0fbc5c4a0da1a9abacd..a07f51231e335029a2ff2726092f8570c3b81a9d 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -164,7 +164,7 @@ static int __init __gic_clocksource_init(void) /* Set clocksource mask. */ count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; - count_width >>= __fls(GIC_CONFIG_COUNTBITS); + count_width >>= __ffs(GIC_CONFIG_COUNTBITS); count_width *= 4; count_width += 32; gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index dcb1cb9a4572a6220956bc59b3b57be9b752d588..8b432d6e846d9677e03c21604b6e495f453a14fa 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) NSEC_PER_USEC; policy->shared_type = cpu->shared_type; - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { + int i; + cpumask_copy(policy->cpus, cpu->shared_cpu_map); - else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { + + for_each_cpu(i, policy->cpus) { + if (unlikely(i == policy->cpu)) + continue; + + memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps, + sizeof(cpu->perf_caps)); + } + } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) { /* Support only SW_ANY for now. */ pr_debug("Unsupported CPU co-ord type\n"); return -EFAULT; @@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void) return ret; out: - for_each_possible_cpu(i) - kfree(all_cpu_data[i]); + for_each_possible_cpu(i) { + cpu = all_cpu_data[i]; + if (!cpu) + break; + free_cpumask_var(cpu->shared_cpu_map); + kfree(cpu); + } kfree(all_cpu_data); return -ENODEV; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 55fa8cb1a2f727f30746bea03653018d801d074d..becc3cd0923eee3ad63adc0fb12ea01b34f2a6c7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1324,14 +1324,14 @@ static int cpufreq_online(unsigned int cpu) return 0; out_exit_policy: + for_each_cpu(j, policy->real_cpus) + remove_cpu_dev_symlink(policy, get_cpu_device(j)); + up_write(&policy->rwsem); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); - for_each_cpu(j, policy->real_cpus) - remove_cpu_dev_symlink(policy, get_cpu_device(j)); - out_free_policy: cpufreq_policy_free(policy); return ret; diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index d07f31503ed827cfd12560866ae2f3b392258665..149e7ab3e17ac5d8b9aba64dfa772cf5f122eaec 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -614,6 +614,7 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask) break; } } + of_node_put(cpu_node); cpu_node = of_parse_phandle(node, "qcom,cpu", idx++); } @@ -652,13 +653,16 @@ static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu) cpu->nlevels++; ret = parse_cpu_mode(n, l); - if (ret) + if (ret) { + of_node_put(n); return ret; + } ret = parse_power_params(n, &l->pwr); - if (ret) + if (ret) { + of_node_put(n); return ret; - + } key = "qcom,use-broadcast-timer"; l->use_bc_timer = of_property_read_bool(n, key); @@ -671,6 +675,7 @@ static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu) l->reset_level = LPM_RESET_LVL_NONE; else if (ret) return ret; + of_node_put(n); } for (i = 0; i < cpu->nlevels; i++) { @@ -820,8 +825,11 @@ struct lpm_cluster *parse_cluster(struct device_node *node, key = "qcom,pm-cluster-level"; if (!of_node_cmp(n->name, key)) { - if (parse_cluster_level(n, c)) + if (parse_cluster_level(n, c)) { + of_node_put(n); goto failed_parse_cluster; + } + of_node_put(n); continue; } @@ -830,22 +838,28 @@ struct lpm_cluster *parse_cluster(struct device_node *node, struct lpm_cluster *child; child = parse_cluster(n, c); - if (!child) + if (!child) { + of_node_put(n); goto failed_parse_cluster; + } list_add(&child->list, &c->child); cpumask_or(&c->child_cpus, &c->child_cpus, &child->child_cpus); c->aff_level = child->aff_level + 1; + of_node_put(n); continue; } key = "qcom,pm-cpu"; if (!of_node_cmp(n->name, key)) { - if (parse_cpu_levels(n, c)) + if (parse_cpu_levels(n, c)) { + of_node_put(n); goto failed_parse_cluster; + } c->aff_level = 1; + of_node_put(n); } } @@ -879,6 +893,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) { struct device_node *top = NULL; + struct lpm_cluster *c; top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster"); if (!top) { @@ -887,7 +902,9 @@ struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) } lpm_pdev = pdev; - return parse_cluster(top, NULL); + c = parse_cluster(top, NULL); + of_node_put(top); + return c; } void cluster_dt_walkthrough(struct lpm_cluster *cluster) diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 29e20c37f3a6719861631207b2363af4c01862ba..11129b796ddaf0f41df8afcbd7c047cd19d2bb7e 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -2145,7 +2145,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, badkey: crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); - memzero_explicit(&key, sizeof(keys)); + memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 59d4ca4e72d8c2820804c2f284cd6ac442a47d61..1a734bd2070a249ef00eb49150c3519eec31e4ec 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = { }; static struct dentry *ccp_debugfs_dir; -static DEFINE_RWLOCK(ccp_debugfs_lock); +static DEFINE_MUTEX(ccp_debugfs_lock); #define MAX_NAME_LEN 20 @@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) struct dentry *debugfs_stats; struct dentry *debugfs_q_instance; struct dentry *debugfs_q_stats; - unsigned long flags; int i; if (!debugfs_initialized()) return; - write_lock_irqsave(&ccp_debugfs_lock, flags); + mutex_lock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - write_unlock_irqrestore(&ccp_debugfs_lock, flags); + mutex_unlock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) return; diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index d4c81cb73bee6ccfdbea9a67af7237cea86649d2..3ee68ecde9ec1bf14cefd6f492ce483d38955832 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -462,6 +462,15 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) if (backlog) backlog->complete(backlog, -EINPROGRESS); + /* In case the send() helper did not issue any command to push + * to the engine because the input data was cached, continue to + * dequeue other requests as this is valid and not an error. + */ + if (!commands && !results) { + kfree(request); + continue; + } + spin_lock_bh(&priv->ring[ring].egress_lock); list_add_tail(&request->list, &priv->ring[ring].list); spin_unlock_bh(&priv->ring[ring].egress_lock); diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index fcc0a606d74839bb46f2af6d0ac66bdb12b3d511..29cf7e00b574378e6cbbc7aa0357a4b9180f3a20 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -446,7 +446,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) if (!priv->ring[ring].need_dequeue) safexcel_dequeue(priv, ring); - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); if (result.error) { dev_warn(priv->dev, diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index d626aa485a7639ab8addb01b073657bcb8e231a5..69f29776591a40b97d26b4ea8ab224aa88ecd684 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -185,7 +185,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; queued = len = req->len - req->processed; - if (queued < crypto_ahash_blocksize(ahash)) + if (queued <= crypto_ahash_blocksize(ahash)) cache_len = queued; else cache_len = queued - areq->nbytes; @@ -199,7 +199,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, /* If this is not the last request and the queued data * is a multiple of a block, cache the last one for now. */ - extra = queued - crypto_ahash_blocksize(ahash); + extra = crypto_ahash_blocksize(ahash); if (extra) { sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), @@ -494,7 +494,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) if (!priv->ring[ring].need_dequeue) safexcel_dequeue(priv, ring); - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); if (result.error) { dev_warn(priv->dev, "hash: completion error (%d)\n", @@ -819,7 +819,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq, init_completion(&result.completion); ret = crypto_ahash_digest(areq); - if (ret == -EINPROGRESS) { + if (ret == -EINPROGRESS || ret == -EBUSY) { wait_for_completion_interruptible(&result.completion); ret = result.error; } diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 1547cbe13dc2d04222d630577a61a15c1652bed8..a81d89b3b7d8d173666bf819337433f24f6bc0af 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = { module_platform_driver(sun4i_ss_driver); +MODULE_ALIAS("platform:sun4i-ss"); MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corentin LABBE "); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index c4480f5429d14e154939fc7ffb37a04bad15940d..3a92f3eba813d171c991e5d1db34c6b16d217597 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { - struct sg_table *sg_table = ERR_PTR(-EINVAL); + struct sg_table *sg_table; might_sleep(); diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f652a0e0f5a2a46d78bece1d41cd0895dfc5d593..3548caa9e9339f17208a62066ad055c842491e3b 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -163,6 +163,7 @@ struct mv_xor_v2_device { void __iomem *dma_base; void __iomem *glob_base; struct clk *clk; + struct clk *reg_clk; struct tasklet_struct irq_tasklet; struct list_head free_sw_desc; struct dma_device dmadev; @@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev) if (ret) return ret; + xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); + if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { + if (!IS_ERR(xor_dev->reg_clk)) { + ret = clk_prepare_enable(xor_dev->reg_clk); + if (ret) + return ret; + } else { + return PTR_ERR(xor_dev->reg_clk); + } + } + xor_dev->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { + ret = EPROBE_DEFER; + goto disable_reg_clk; + } if (!IS_ERR(xor_dev->clk)) { ret = clk_prepare_enable(xor_dev->clk); if (ret) - return ret; + goto disable_reg_clk; } ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, @@ -866,8 +880,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev) free_msi_irqs: platform_msi_domain_free_irqs(&pdev->dev); disable_clk: - if (!IS_ERR(xor_dev->clk)) - clk_disable_unprepare(xor_dev->clk); + clk_disable_unprepare(xor_dev->clk); +disable_reg_clk: + clk_disable_unprepare(xor_dev->reg_clk); return ret; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f122c2a7b9f0bc7383ac5114107fb4aac018066d..7432c8894e321c415bb3f6d18a3ff73e3b181e04 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data) /* Returns 1 if state was updated, 0 otherwise */ static int pl330_update(struct pl330_dmac *pl330) { - struct dma_pl330_desc *descdone, *tmp; + struct dma_pl330_desc *descdone; unsigned long flags; void __iomem *regs; u32 val; @@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330) } /* Now that we are in no hurry, do the callbacks */ - list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { + while (!list_empty(&pl330->req_done)) { + descdone = list_first_entry(&pl330->req_done, + struct dma_pl330_desc, rqd); list_del(&descdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); dma_pl330_rqcb(descdone, PL330_ERR_NONE); diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 6d89fb6a6a92a2f87c4bf9b521fead026742a54c..8fbf175fdcc75408834966bd22d3505d87088bfc 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -388,6 +388,7 @@ struct bam_device { struct device_dma_parameters dma_parms; struct bam_chan *channels; u32 num_channels; + u32 num_ees; /* execution environment ID, from DT */ u32 ee; @@ -1080,15 +1081,19 @@ static int bam_init(struct bam_device *bdev) u32 val; /* read revision and configuration information */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; - val &= NUM_EES_MASK; + if (!bdev->num_ees) { + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); + bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; + } /* check that configured EE is within range */ - if (bdev->ee >= val) + if (bdev->ee >= bdev->num_ees) return -EINVAL; - val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); - bdev->num_channels = val & BAM_NUM_PIPES_MASK; + if (!bdev->num_channels) { + val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); + bdev->num_channels = val & BAM_NUM_PIPES_MASK; + } if (bdev->controlled_remotely) return 0; @@ -1183,6 +1188,18 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, "qcom,controlled-remotely"); + if (bdev->controlled_remotely) { + ret = of_property_read_u32(pdev->dev.of_node, "num-channels", + &bdev->num_channels); + if (ret) + dev_err(bdev->dev, "num-channels unspecified in dt\n"); + + ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees", + &bdev->num_ees); + if (ret) + dev_err(bdev->dev, "num-ees unspecified in dt\n"); + } + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); if (IS_ERR(bdev->bamclk)) return PTR_ERR(bdev->bamclk); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b2c7db3e48043fcdb1d0ed377613dde185f0a1b..9d6ce5051d8f580ed3fd7a14d47e80a1a2eb03de 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -880,7 +880,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, rcar_dmac_chan_configure_desc(chan, desc); - max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; + max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; /* * Allocate and fill the transfer chunk descriptors. We own the only @@ -1264,8 +1264,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, * If the cookie doesn't correspond to the currently running transfer * then the descriptor hasn't been processed yet, and the residue is * equal to the full descriptor size. + * Also, a client driver is possible to call this function before + * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" + * will be the next descriptor, and the done list will appear. So, if + * the argument cookie matches the done list's cookie, we can assume + * the residue is zero. */ if (cookie != desc->async_tx.cookie) { + list_for_each_entry(desc, &chan->desc.done, node) { + if (cookie == desc->async_tx.cookie) + return 0; + } list_for_each_entry(desc, &chan->desc.pending, node) { if (cookie == desc->async_tx.cookie) return desc->size; diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index 3408cf975dcfad3938773afb834aad8059c38858..0fa9a0a090bca10098653dc998cef653549c1b25 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -101,7 +101,7 @@ static void mdm_enable_irqs(struct mdm_ctrl *mdm) } } -static void mdm_disable_irqs(struct mdm_ctrl *mdm) +void mdm_disable_irqs(struct mdm_ctrl *mdm) { if (!mdm) return; @@ -374,6 +374,9 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) case ESOC_BOOT_FAIL: esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc); break; + case ESOC_PON_RETRY: + esoc_clink_evt_notify(ESOC_RETRY_PON_EVT, esoc); + break; case ESOC_UPGRADE_AVAILABLE: break; case ESOC_DEBUG_DONE: @@ -415,7 +418,7 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) mdm_disable_irqs(mdm); mdm->debug = 0; mdm->ready = false; - mdm_cold_reset(mdm); + mdm_power_down(mdm); break; }; } diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c index 0f9f1207cb2feacaddcac8c1bccd0557fa447935..cba2128f5295af3c57826e893524d0562729ab94 100644 --- a/drivers/esoc/esoc-mdm-drv.c +++ b/drivers/esoc/esoc-mdm-drv.c @@ -16,8 +16,19 @@ #include #include #include "esoc.h" +#include "esoc-mdm.h" #include "mdm-dbg.h" +/* Maximum number of powerup trial requests per session */ +#define ESOC_MAX_PON_REQ 2 + +enum esoc_pon_state { + PON_INIT, + PON_SUCCESS, + PON_RETRY, + PON_FAIL +}; + enum { PWR_OFF = 0x1, PWR_ON, @@ -33,10 +44,10 @@ enum { struct mdm_drv { unsigned int mode; struct esoc_eng cmd_eng; - struct completion boot_done; + struct completion pon_done; struct completion req_eng_wait; struct esoc_clink *esoc_clink; - bool boot_fail; + enum esoc_pon_state pon_state; struct workqueue_struct *mdm_queue; struct work_struct ssr_work; struct notifier_block esoc_restart; @@ -66,18 +77,22 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, switch (evt) { case ESOC_INVALID_STATE: - mdm_drv->boot_fail = true; - complete(&mdm_drv->boot_done); + mdm_drv->pon_state = PON_FAIL; + complete(&mdm_drv->pon_done); break; case ESOC_RUN_STATE: - mdm_drv->boot_fail = false; + mdm_drv->pon_state = PON_SUCCESS; mdm_drv->mode = RUN, - complete(&mdm_drv->boot_done); + complete(&mdm_drv->pon_done); + break; + case ESOC_RETRY_PON_EVT: + mdm_drv->pon_state = PON_RETRY; + complete(&mdm_drv->pon_done); break; case ESOC_UNEXPECTED_RESET: case ESOC_ERR_FATAL: /* - * Modem can crash while we are waiting for boot_done during + * Modem can crash while we are waiting for pon_done during * a subsystem_get(). Setting mode to CRASH will prevent a * subsequent subsystem_get() from entering poweron ops. Avoid * this by seting mode to CRASH only if device was up and @@ -205,6 +220,18 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys, return 0; } +static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink) +{ + struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink); + struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink); + + esoc_client_link_power_off(esoc_clink, false); + mdm_disable_irqs(mdm); + mdm_drv->pon_state = PON_INIT; + reinit_completion(&mdm_drv->pon_done); + reinit_completion(&mdm_drv->req_eng_wait); +} + static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys) { int ret; @@ -213,49 +240,65 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys) subsys); struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink); const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops; + struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink); int timeout = INT_MAX; - - if (!esoc_clink->auto_boot && !esoc_req_eng_enabled(esoc_clink)) { - dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n"); - wait_for_completion(&mdm_drv->req_eng_wait); - } - if (mdm_drv->mode == PWR_OFF) { - if (mdm_dbg_stall_cmd(ESOC_PWR_ON)) - return -EBUSY; - ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); - if (ret) { - dev_err(&esoc_clink->dev, "pwr on fail\n"); - return ret; + u8 pon_trial = 1; + + do { + if (!esoc_clink->auto_boot && + !esoc_req_eng_enabled(esoc_clink)) { + dev_dbg(&esoc_clink->dev, + "Wait for req eng registration\n"); + wait_for_completion(&mdm_drv->req_eng_wait); } - esoc_client_link_power_on(esoc_clink, false); - } else if (mdm_drv->mode == IN_DEBUG) { - ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink); - if (ret) { - dev_err(&esoc_clink->dev, "cannot exit debug mode\n"); - return ret; + if (mdm_drv->mode == PWR_OFF) { + if (mdm_dbg_stall_cmd(ESOC_PWR_ON)) + return -EBUSY; + ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); + if (ret) { + dev_err(&esoc_clink->dev, "pwr on fail\n"); + return ret; + } + esoc_client_link_power_on(esoc_clink, false); + } else if (mdm_drv->mode == IN_DEBUG) { + ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink); + if (ret) { + dev_err(&esoc_clink->dev, + "cannot exit debug mode\n"); + return ret; + } + mdm_drv->mode = PWR_OFF; + ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); + if (ret) { + dev_err(&esoc_clink->dev, "pwr on fail\n"); + return ret; + } + esoc_client_link_power_on(esoc_clink, true); } - mdm_drv->mode = PWR_OFF; - ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); - if (ret) { - dev_err(&esoc_clink->dev, "pwr on fail\n"); - return ret; + + /* + * In autoboot case, it is possible that we can forever wait for + * boot completion, when esoc fails to boot. This is because + * there is no helper application which can alert esoc driver + * about boot failure. Prevent going to wait forever in such + * case. + */ + if (esoc_clink->auto_boot) + timeout = 10 * HZ; + ret = wait_for_completion_timeout(&mdm_drv->pon_done, timeout); + if (mdm_drv->pon_state == PON_FAIL || ret <= 0) { + dev_err(&esoc_clink->dev, "booting failed\n"); + mdm_subsys_retry_powerup_cleanup(esoc_clink); + mdm_power_down(mdm); + return -EIO; + } else if (mdm_drv->pon_state == PON_RETRY) { + pon_trial++; + mdm_subsys_retry_powerup_cleanup(esoc_clink); + } else if (mdm_drv->pon_state == PON_SUCCESS) { + break; } - esoc_client_link_power_on(esoc_clink, true); - } + } while (pon_trial <= ESOC_MAX_PON_REQ); - /* - * In autoboot case, it is possible that we can forever wait for - * boot completion, when esoc fails to boot. This is because there - * is no helper application which can alert esoc driver about boot - * failure. Prevent going to wait forever in such case. - */ - if (esoc_clink->auto_boot) - timeout = 10 * HZ; - ret = wait_for_completion_timeout(&mdm_drv->boot_done, timeout); - if (mdm_drv->boot_fail || ret <= 0) { - dev_err(&esoc_clink->dev, "booting failed\n"); - return -EIO; - } return 0; } @@ -314,12 +357,12 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv) goto queue_err; } esoc_set_drv_data(esoc_clink, mdm_drv); - init_completion(&mdm_drv->boot_done); + init_completion(&mdm_drv->pon_done); init_completion(&mdm_drv->req_eng_wait); INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn); mdm_drv->esoc_clink = esoc_clink; mdm_drv->mode = PWR_OFF; - mdm_drv->boot_fail = false; + mdm_drv->pon_state = PON_INIT; mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler; ret = register_reboot_notifier(&mdm_drv->esoc_restart); if (ret) diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c index f2e6f7af446f93f7715f1db595fe08cecc914d2f..3f73364c2fedd739f86778da394fa7ad1e34daad 100644 --- a/drivers/esoc/esoc-mdm-pon.c +++ b/drivers/esoc/esoc-mdm-pon.c @@ -81,7 +81,7 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm) mdm_toggle_soft_reset(mdm, false); /* Add a delay to allow PON sequence to complete*/ - msleep(50); + msleep(150); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1); if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) { for (i = 0; i < MDM_PBLRDY_CNT; i++) { diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h index 4482d48dcb54f27707e711c7661f3282a31e5aca..fb33b8585e3bc76ba6086325983887d2d1b879ac 100644 --- a/drivers/esoc/esoc-mdm.h +++ b/drivers/esoc/esoc-mdm.h @@ -123,6 +123,8 @@ struct mdm_ops { struct platform_device *pdev); }; +void mdm_disable_irqs(struct mdm_ctrl *mdm); + static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) { return mdm->pon_ops->soft_reset(mdm, atomic); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index e8db9659a36b2de1662416d901aad9f3352d873a..fe0d30340e963b324c1ae4fd87c04d2e96a5237d 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -191,7 +191,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, char *s; int is_ff = 1, is_00 = 1, i; - if (dmi_ident[slot] || dm->length <= index + 16) + if (dmi_ident[slot] || dm->length < index + 16) return; d = (u8 *) dm + index; diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 1cc41c3d6315212a1add532e03c54ccfb508e38f..86a1ad17a32e2cd4acf3fb64a774b7cfabc271f1 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = { static int __init ptdump_init(void) { + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); } device_initcall(ptdump_init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 8d689ab7e4291fad585448c6e679aa32556facff..1ef486b5d54b0ccef2dd6cc93795b1c628148cde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -26,6 +26,7 @@ #define AMDGPU_AMDKFD_H_INCLUDED #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 659997bfff303b789f9f5fa6ae8ec17b0a02ae5c..cd84bd0b1eafdc571d51fa3e604416ac7088f375 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -322,14 +322,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) { unsigned i; int r, ret = 0; + long tmo_gfx, tmo_mm; + + tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT; + if (amdgpu_sriov_vf(adev)) { + /* for MM engines in hypervisor side they are not scheduled together + * with CP and SDMA engines, so even in exclusive mode MM engine could + * still running on other VF thus the IB TEST TIMEOUT for MM engines + * under SR-IOV should be set to a long time. 8 sec should be enough + * for the MM comes back to this VF. + */ + tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT; + } + + if (amdgpu_sriov_runtime(adev)) { + /* for CP & SDMA engines since they are scheduled together so + * need to make the timeout width enough to cover the time + * cost waiting for it coming back under RUNTIME only + */ + tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + long tmo; if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); + /* MM engine need more time */ + if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || + ring->funcs->type == AMDGPU_RING_TYPE_VCE || + ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + tmo = tmo_mm; + else + tmo = tmo_gfx; + + r = amdgpu_ring_test_ib(ring, tmo); if (r) { ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69182eeca264e723d2ae8a7bce6567258600732d..1a30c54a0889f20e451389e495ba562d5549a1c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2889,7 +2889,13 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (amdgpu_sriov_vf(adev)) { - pr_debug("For SRIOV client, shouldn't do anything.\n"); + gfx_v9_0_cp_gfx_enable(adev, false); + /* must disable polling for SRIOV when hw finished, otherwise + * CPC engine may still keep fetching WB address which is already + * invalid after sw finished and trigger DMAR reading error in + * hypervisor side. + */ + WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); return 0; } gfx_v9_0_cp_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6dc0f6e346e7fce357981df05dc26b4a39f2fe54..a1d71429fb720dd8a7b72be6996fbdcc2d417cbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -456,7 +456,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); if (!adev->mc.vram_width) { /* hbm memory channel size */ - chansize = 128; + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 1d312603de9fbb1624a98fbfac9a3d240490bf4b..308571b09c6b1c3bcd1d80d4c16a376abcebf988 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -166,8 +166,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; - /* TODO: scratch support */ - packet->sh_hidden_private_base_vmid = 0; + packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b33935fcf42838b190357336e799529db1e0da88..e6c6994e74badb4c043ac548ed83925c83fe868e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -176,10 +176,10 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerup_uvd(hwmgr); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -208,11 +208,11 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 261b828ad59086990f9f054906448a5526f4cbc4..2f3509be226f205a5f7234640e40bbb1ab93fbfb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -162,7 +162,7 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) AMD_CG_STATE_UNGATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); + AMD_PG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); } diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 5f4c2e833a650dd6be2e6afb5e9835cf7e434e17..d665dd5af5dd80f2348dd1290c41ecb4756ab9d7 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = { {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ @@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index b1ab4ab09532b49901bdd7126f951c40a10424d5..60373d7eb22021127cb6f4a9bc7edf646e934be6 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector) struct sii902x *sii902x = connector_to_sii902x(connector); struct regmap *regmap = sii902x->regmap; u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; + struct device *dev = &sii902x->i2c->dev; unsigned long timeout; + unsigned int retries; unsigned int status; struct edid *edid; int num = 0; @@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector) time_before(jiffies, timeout)); if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { - dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n"); + dev_err(dev, "failed to acquire the i2c bus\n"); return -ETIMEDOUT; } @@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector) if (ret) return ret; - ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status); + /* + * Sometimes the I2C bus can stall after failure to use the + * EDID channel. Retry a few times to see if things clear + * up, else continue anyway. + */ + retries = 5; + do { + ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, + &status); + retries--; + } while (ret && retries); if (ret) - return ret; + dev_err(dev, "failed to read status (%d)\n", ret); ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_DDC_BUS_REQ | @@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector) if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | SII902X_SYS_CTRL_DDC_BUS_GRTD)) { - dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n"); + dev_err(dev, "failed to release the i2c bus\n"); return -ETIMEDOUT; } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index abfd4530f98d45f8b33c308c986cd55afe1973f8..89594591032d601bc7a160abccad3b955a26765b 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -1136,6 +1136,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) static const u16 psr_setup_time_us[] = { PSR_SETUP_TIME(330), PSR_SETUP_TIME(275), + PSR_SETUP_TIME(220), PSR_SETUP_TIME(165), PSR_SETUP_TIME(110), PSR_SETUP_TIME(55), diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index be38ac7050d473b9be9be24ddb44c1ed79a152f9..a7b6734bc3c3248cbfaa52a5ebfbfb87882be987 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -749,7 +749,7 @@ static void remove_compat_control_link(struct drm_device *dev) if (!minor) return; - name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); + name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64); if (!name) return; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 75b0eb9ae5316f2ee8591360754fe2161c8b5752..a85861438dc2bfa513a3d8fed6e4b0f648939433 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2787,7 +2787,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, return closure.modes; } - +#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 @@ -3846,6 +3846,161 @@ add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid) return modes; } +/* + * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * + * Parses the HDMI VCDB to extract sink info for @connector. + */ +static void +drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) +{ + /* + * Check if the sink specifies underscan + * support for: + * BIT 5: preferred video format + * BIT 3: IT video format + * BIT 1: CE video format + */ + + connector->pt_scan_info = + (db[2] & (BIT(4) | BIT(5))) >> 4; + connector->it_scan_info = + (db[2] & (BIT(3) | BIT(2))) >> 2; + connector->ce_scan_info = + db[2] & (BIT(1) | BIT(0)); + + DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", + (int) connector->pt_scan_info, + (int) connector->it_scan_info, + (int) connector->ce_scan_info); +} + +static bool drm_edid_is_luminance_value_present( +u32 block_length, enum luminance_value value) +{ + return block_length > NO_LUMINANCE_DATA && value <= block_length; +} + +/* + * drm_extract_hdr_db - Parse the HDMI HDR extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI HDR extended block + * + * Parses the HDMI HDR extended block to extract sink info for @connector. + */ +static void +drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) +{ + + u8 len = 0; + + if (!db) + return; + + len = db[0] & 0x1f; + /* Byte 3: Electro-Optical Transfer Functions */ + connector->hdr_eotf = db[2] & 0x3F; + + /* Byte 4: Static Metadata Descriptor Type 1 */ + connector->hdr_metadata_type_one = (db[3] & BIT(0)); + + /* Byte 5: Desired Content Maximum Luminance */ + if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE)) + connector->hdr_max_luminance = + db[MAXIMUM_LUMINANCE]; + + /* Byte 6: Desired Content Max Frame-average Luminance */ + if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE)) + connector->hdr_avg_luminance = + db[FRAME_AVERAGE_LUMINANCE]; + + /* Byte 7: Desired Content Min Luminance */ + if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE)) + connector->hdr_min_luminance = + db[MINIMUM_LUMINANCE]; + + connector->hdr_supported = true; + + DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf); + DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one); + DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance); + DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance); + DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance); +} + +/* + * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks + * @connector: connector corresponding to the HDMI sink + * @edid: handle to the EDID structure + * Parses the all extended tag blocks extract sink info for @connector. + */ +static void +drm_hdmi_extract_extended_blk_info(struct drm_connector *connector, + struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == USE_EXTENDED_TAG) { + DRM_DEBUG_KMS("found extended tag block = %d\n", + db[1]); + switch (db[1]) { + case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK: + drm_extract_vcdb_info(connector, db); + break; + case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK: + drm_extract_hdr_db(connector, db); + break; + default: + break; + } + } + } + } +} + +static void +parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db) +{ + u8 len = cea_db_payload_len(db); + + if (len < 7) + return; + + if (db[4] != 1) + return; /* invalid version */ + + connector->max_tmds_char = db[5] * 5; + connector->scdc_present = db[6] & (1 << 7); + connector->rr_capable = db[6] & (1 << 6); + connector->flags_3d = db[6] & 0x7; + connector->supports_scramble = connector->scdc_present && + (db[6] & (1 << 3)); + + DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, " + "scdc %s, " + "rr %s, " + "3D flags 0x%x, " + "scramble %s\n", + connector->max_tmds_char, + connector->scdc_present ? "available" : "not available", + connector->rr_capable ? "capable" : "not capable", + connector->flags_3d, + connector->supports_scramble ? + "supported" : "not supported"); +} + static void monitor_name(struct detailed_timing *t, void *data) { @@ -3972,6 +4127,9 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) drm_parse_hdmi_vsdb_audio(connector, db); + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); break; default: break; @@ -4461,6 +4619,37 @@ static void drm_parse_cea_ext(struct drm_connector *connector, } } +static void +drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == VENDOR_BLOCK) { + /* HDMI Vendor-Specific Data Block */ + if (cea_db_is_hdmi_vsdb(db)) { + drm_parse_hdmi_vsdb_video( + connector, db); + drm_parse_hdmi_vsdb_audio( + connector, db); + } + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_forum_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); + } + } + } +} + static void drm_add_display_info(struct drm_connector *connector, struct edid *edid) { @@ -4498,6 +4687,11 @@ static void drm_add_display_info(struct drm_connector *connector, connector->name, info->bpc); } + /* Extract audio and video latency fields for the sink */ + drm_hdmi_extract_vsdbs_info(connector, edid); + /* Extract info from extended tag blocks */ + drm_hdmi_extract_extended_blk_info(connector, edid); + /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2b8bf2dd63874e36e0d29c4c383a74c2d365489d..9effe40f5fa5d6e6a61f7b1f21be7d24684ca466 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -926,7 +926,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) struct drm_device *drm_dev = g2d->subdrv.drm_dev; struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; struct drm_exynos_pending_g2d_event *e; - struct timeval now; + struct timespec64 now; if (list_empty(&runqueue_node->event_list)) return; @@ -934,9 +934,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) e = list_first_entry(&runqueue_node->event_list, struct drm_exynos_pending_g2d_event, base.link); - do_gettimeofday(&now); + ktime_get_ts64(&now); e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; + e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; e->event.cmdlist_no = cmdlist_no; drm_send_event(drm_dev, &e->base); diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h index 30496134a3d0720bcc649711bd8d2ef5d35cf47a..d7cbe53c4c01f440cf9c0aecec153dc3d4f3f912 100644 --- a/drivers/gpu/drm/exynos/regs-fimc.h +++ b/drivers/gpu/drm/exynos/regs-fimc.h @@ -569,7 +569,7 @@ #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) -#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) +#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0)) /* Real input DMA size register */ #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3b2c0538e48d7b18c1210faef38612af6de6d337..90359c7954c8d66532808e82f8116121ac576ca5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3378,24 +3378,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) return 0; } -static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) -{ - return wait_for(intel_engine_is_idle(engine), timeout_ms); -} - static int wait_for_engines(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (GEM_WARN_ON(wait_for_engine(engine, 50))) { - i915_gem_set_wedged(i915); - return -EIO; - } - - GEM_BUG_ON(intel_engine_get_seqno(engine) != - intel_engine_last_submit(engine)); + if (wait_for(intel_engines_are_idle(i915), 50)) { + DRM_ERROR("Failed to idle engines, declaring wedged!\n"); + i915_gem_set_wedged(i915); + return -EIO; } return 0; @@ -4575,7 +4563,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED); - if (ret) + if (ret && ret != -EIO) goto err_unlock; assert_kernel_context_is_current(dev_priv); @@ -4619,11 +4607,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machine in an unusable condition. */ i915_gem_sanitize(dev_priv); - goto out_rpm_put; + + intel_runtime_pm_put(dev_priv); + return 0; err_unlock: mutex_unlock(&dev->struct_mutex); -out_rpm_put: intel_runtime_pm_put(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cc70e24702721375fe611b3fee1597314113fce4..61a2203b75df9e13211c8422bec27691c99934ed 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7044,6 +7044,9 @@ enum { #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) +#define GEN9_WM_CHICKEN3 _MMIO(0x5588) +#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9) + /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index b6a7e492c1a314d0292c02e9c5e9be8c433edbfd..c0e3e2ffb87d65f35a1f01063c66bba396066bb3 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -900,6 +900,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES)); + /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ + if (IS_GEN9_LP(dev_priv)) + WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); + /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); if (ret) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 240308f1b6ddf7cfd944a548f3bd8ad7f444de0b..dae4e22a2c3f8fa034db688052708eb3f7d0cd3b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -565,6 +565,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, return NOTIFY_OK; } +static int +intel_lvds_connector_register(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + lvds->lid_notifier.notifier_call = intel_lid_notify; + if (acpi_lid_notifier_register(&lvds->lid_notifier)) { + DRM_DEBUG_KMS("lid notifier registration failed\n"); + lvds->lid_notifier.notifier_call = NULL; + } + + return 0; +} + +static void +intel_lvds_connector_unregister(struct drm_connector *connector) +{ + struct intel_lvds_connector *lvds = to_lvds_connector(connector); + + if (lvds->lid_notifier.notifier_call) + acpi_lid_notifier_unregister(&lvds->lid_notifier); + + intel_connector_unregister(connector); +} + /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -577,9 +607,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); - if (lvds_connector->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds_connector->lid_notifier); - if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) kfree(lvds_connector->base.edid); @@ -600,8 +627,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, - .early_unregister = intel_connector_unregister, + .late_register = intel_lvds_connector_register, + .early_unregister = intel_lvds_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -818,6 +845,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Radiant P845", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "P845"), + }, + }, { } /* terminating entry */ }; @@ -1149,12 +1184,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; - lvds_connector->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { - DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds_connector->lid_notifier.notifier_call = NULL; - } - return; failed: diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 53e0b24beda6e0e2ee44c57550d5c752393b5863..d976391dfa31c01720a9e4c78fc024b4c13921f0 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { drm_crtc_vblank_on(crtc); +} +static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc)); @@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .mode_set_nofb = ipu_crtc_mode_set_nofb, .atomic_check = ipu_crtc_atomic_check, .atomic_begin = ipu_crtc_atomic_begin, + .atomic_flush = ipu_crtc_atomic_flush, .atomic_disable = ipu_crtc_atomic_disable, .atomic_enable = ipu_crtc_atomic_enable, }; diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 5155f0179b61744f41f18922a9d0c1b39ec28b10..05520202c96778c1401dac07a9b9ff768ba97b91 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c @@ -36,6 +36,7 @@ #include "meson_venc.h" #include "meson_vpp.h" #include "meson_viu.h" +#include "meson_canvas.h" #include "meson_registers.h" /* CRTC definition */ @@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv) } else meson_vpp_disable_interlace_vscaler_osd1(priv); + meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, + priv->viu.osd1_addr, priv->viu.osd1_stride, + priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, + MESON_CANVAS_BLKMODE_LINEAR); + /* Enable OSD1 */ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, priv->io_base + _REG(VPP_MISC)); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 7742c7d81ed8fbaac2e036a3c5d061ff553eed73..4ad8223c60eaeb74697f23e3e412c317b6d96662 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -180,40 +180,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu"); regs = devm_ioremap_resource(dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto free_drm; + } priv->io_base = regs; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) - return -EADDRNOTAVAIL; + if (!regs) { + ret = -EADDRNOTAVAIL; + goto free_drm; + } priv->hhi = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->hhi)) { dev_err(&pdev->dev, "Couldn't create the HHI regmap\n"); - return PTR_ERR(priv->hhi); + ret = PTR_ERR(priv->hhi); + goto free_drm; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); /* Simply ioremap since it may be a shared register zone */ regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!regs) - return -EADDRNOTAVAIL; + if (!regs) { + ret = -EADDRNOTAVAIL; + goto free_drm; + } priv->dmc = devm_regmap_init_mmio(dev, regs, &meson_regmap_config); if (IS_ERR(priv->dmc)) { dev_err(&pdev->dev, "Couldn't create the DMC regmap\n"); - return PTR_ERR(priv->dmc); + ret = PTR_ERR(priv->dmc); + goto free_drm; } priv->vsync_irq = platform_get_irq(pdev, 0); - drm_vblank_init(drm, 1); + ret = drm_vblank_init(drm, 1); + if (ret) + goto free_drm; + drm_mode_config_init(drm); drm->mode_config.max_width = 3840; drm->mode_config.max_height = 2160; diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 5e8b392b9d1ff0da66a429b386a308019467c62a..8450d6ac8c9bc1dcd049fd8c2205d1c5a8c7c924 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -43,6 +43,9 @@ struct meson_drm { bool osd1_commit; uint32_t osd1_ctrl_stat; uint32_t osd1_blk0_cfg[5]; + uint32_t osd1_addr; + uint32_t osd1_stride; + uint32_t osd1_height; } viu; struct { diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 17e96fa4786854e2001a9c8553c0500127db403b..0b6011b8d6321a2e32e7aaaace5d85828e725d8b 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane, /* Update Canvas with buffer address */ gem = drm_fb_cma_get_gem_obj(fb, 0); - meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, - gem->paddr, fb->pitches[0], - fb->height, MESON_CANVAS_WRAP_NONE, - MESON_CANVAS_BLKMODE_LINEAR); + priv->viu.osd1_addr = gem->paddr; + priv->viu.osd1_stride = fb->pitches[0]; + priv->viu.osd1_height = fb->height; spin_unlock_irqrestore(&priv->drm->event_lock, flags); } diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 840e44e8a5e15edb4a833f04da88816656b7d3bf..7731b96aac30ca454410be29ff16a3b5b2855ba8 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -371,8 +371,11 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, bool i2c_read = input_msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); - if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) { + /* reset the offset for all other transaction types */ + aux->offset = 0; return; + } /* * Sending the segment value and EDID offset will be performed @@ -417,7 +420,6 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, dp_aux_cmd_fifo_tx(aux, &helper_msg); end: aux->offset += message_size; - if (aux->offset == 0x80 || aux->offset == 0x100) aux->segment = 0x0; /* reset segment at end of block */ } @@ -485,8 +487,25 @@ static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux, aux->aux_error_num = DP_AUX_ERR_NONE; + if (!aux->dpcd || !aux->edid) { + pr_err("invalid aux/dpcd structure\n"); + goto end; + } + + if ((msg->address + msg->size) > SZ_16K) { + pr_err("invalid dpcd access: addr=0x%x, size=0x%x\n", + msg->address + msg->size); + goto address_error; + } + + if ((msg->size + aux->offset) > SZ_256) { + pr_err("invalid edid access: offset=0x%x, size=0x%x\n", + aux->offset, msg->size); + goto address_error; + } + if (aux->native) { - if (aux->read && ((msg->address + msg->size) < SZ_1K)) { + if (aux->read) { aux->dp_aux.reg = msg->address; reinit_completion(&aux->comp); @@ -525,6 +544,10 @@ static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux, DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; } + return msg->size; + +address_error: + memset(msg->buffer, 0, msg->size); ret = msg->size; end: return ret; @@ -555,7 +578,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux, } ret = dp_aux_cmd_fifo_tx(aux, msg); - if ((ret < 0) && aux->native && !atomic_read(&aux->aborted)) { + if ((ret < 0) && !atomic_read(&aux->aborted)) { aux->retry_cnt++; if (!(aux->retry_cnt % retry_count)) aux->catalog->update_aux_cfg(aux->catalog, diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 19e4d5244f44f3d03b27a45935f0cee298393840..89cd4cc6091ae25487e8bdd744f5806da2e6d9d8 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -222,6 +222,11 @@ static void dp_display_update_hdcp_info(struct dp_display_private *dp) return; } + if (dp->debug->sim_mode) { + pr_debug("skip HDCP version checks for simulation mode\n"); + return; + } + fd = dp->hdcp.hdcp2; if (fd) ops = sde_dp_hdcp2p2_start(fd); @@ -372,8 +377,10 @@ static void dp_display_unbind(struct device *dev, struct device *master, return; } - (void)dp->power->power_client_deinit(dp->power); - (void)dp->aux->drm_aux_deregister(dp->aux); + if (dp->power) + (void)dp->power->power_client_deinit(dp->power); + if (dp->aux) + (void)dp->aux->drm_aux_deregister(dp->aux); dp_display_deinitialize_hdcp(dp); } @@ -542,8 +549,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) return 0; rc = dp->panel->read_sink_caps(dp->panel, - dp->dp_display.base_connector); - + dp->dp_display.base_connector, dp->usbpd->multi_func); if (rc) { /* * ETIMEDOUT --> cable may have been removed @@ -677,6 +683,8 @@ static int dp_display_usbpd_configure_cb(struct device *dev) goto end; } + atomic_set(&dp->aborted, 0); + dp_display_host_init(dp); /* check for hpd high and framework ready */ @@ -761,6 +769,7 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev) /* wait for idle state */ cancel_delayed_work(&dp->connect_work); + cancel_work(&dp->attention_work); flush_workqueue(dp->wq); dp_display_handle_disconnect(dp); @@ -768,7 +777,6 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev) if (!dp->debug->sim_mode) dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE); - atomic_set(&dp->aborted, 0); end: return rc; } @@ -911,6 +919,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) /* wait for idle state */ cancel_delayed_work(&dp->connect_work); + cancel_work(&dp->attention_work); flush_workqueue(dp->wq); dp_display_handle_disconnect(dp); @@ -926,7 +935,7 @@ static void dp_display_connect_work(struct work_struct *work) struct dp_display_private *dp = container_of(dw, struct dp_display_private, connect_work); - if (dp->dp_display.is_connected) { + if (dp->dp_display.is_connected && dp_display_framework_ready(dp)) { pr_debug("HPD already on\n"); return; } @@ -1251,6 +1260,11 @@ static int dp_display_enable(struct dp_display *dp_display, void *panel) dp->aux->init(dp->aux, dp->parser->aux_cfg); + if (dp->debug->psm_enabled) { + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + } + rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active); if (!rc) dp->power_on = true; @@ -1367,7 +1381,8 @@ static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) dp->hdcp.ops->off(dp->hdcp.data); } - if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done) { + if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) && + dp->usbpd->alt_mode_cfg_done) { if (dp_panel->audio_supported) dp_panel->audio->off(dp_panel->audio); @@ -1425,10 +1440,9 @@ static int dp_display_disable(struct dp_display *dp_display, void *panel) * any notification from driver. Initialize post_open callback to notify * DP connection once framework restarts. */ - if (dp->usbpd->hpd_high && dp->usbpd->alt_mode_cfg_done && - !dp->mst.mst_active) { + if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) && + dp->usbpd->alt_mode_cfg_done && !dp->mst.mst_active) { dp_display->post_open = dp_display_post_open; - dp->dp_display.is_connected = false; dp->dp_display.is_sst_connected = false; } diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c index 04cb948134f460a342804631c4a599945ecaee84..6d1f9a27c4cf2bb5b6a8cf3fa33865a195684c7e 100644 --- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c +++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c @@ -359,6 +359,12 @@ static int dp_hdcp2p2_aux_read_message(struct dp_hdcp2p2_ctrl *ctrl) goto exit; } + if (!buf) { + pr_err("invalid request buffer\n"); + rc = -EINVAL; + goto exit; + } + pr_debug("request: offset(0x%x), size(%d)\n", offset, size); do { @@ -471,9 +477,9 @@ static void dp_hdcp2p2_send_msg_work(struct kthread_work *work) exit: if (rc == -ETIMEDOUT) - cdata.cmd = HDCP_2X_CMD_MSG_RECV_TIMEOUT; + cdata.cmd = HDCP_2X_CMD_MSG_SEND_TIMEOUT; else if (rc) - cdata.cmd = HDCP_2X_CMD_MSG_RECV_FAILED; + cdata.cmd = HDCP_2X_CMD_MSG_SEND_FAILED; dp_hdcp2p2_wakeup_lib(ctrl, &cdata); } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 16aa6e74963f026080533e86cc76bb000b0a712e..28ef79db94b5bf770e7a9a74d58733b9bf272cbe 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -662,7 +662,7 @@ static void dp_panel_config_tr_unit(struct dp_panel *dp_panel) catalog->update_transfer_unit(catalog); } -static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +static int dp_panel_read_dpcd(struct dp_panel *dp_panel, bool multi_func) { int rlen, rc = 0; struct dp_panel_private *panel; @@ -734,6 +734,10 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + if (multi_func) + link_info->num_lanes = min_t(unsigned int, + link_info->num_lanes, 2); + pr_debug("lane_count=%d\n", link_info->num_lanes); if (drm_dp_enhanced_frame_cap(dpcd)) @@ -857,7 +861,7 @@ static int dp_panel_read_edid(struct dp_panel *dp_panel, } static int dp_panel_read_sink_caps(struct dp_panel *dp_panel, - struct drm_connector *connector) + struct drm_connector *connector, bool multi_func) { int rc = 0, rlen, count, downstream_ports; const int count_len = 1; @@ -871,7 +875,7 @@ static int dp_panel_read_sink_caps(struct dp_panel *dp_panel, panel = container_of(dp_panel, struct dp_panel_private, dp_panel); - rc = dp_panel_read_dpcd(dp_panel); + rc = dp_panel_read_dpcd(dp_panel, multi_func); if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code( dp_panel->link_info.rate)) || !is_lane_count_valid( dp_panel->link_info.num_lanes) || diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index eda2d9e8d72a042d9fac6c0a16a01a573fabca1e..600af83c09ff10b7016041812620ecea941602df 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -98,7 +98,7 @@ struct dp_panel { int (*deinit)(struct dp_panel *dp_panel); int (*hw_cfg)(struct dp_panel *dp_panel); int (*read_sink_caps)(struct dp_panel *dp_panel, - struct drm_connector *connector); + struct drm_connector *connector, bool multi_func); u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel); u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp, u32 mode_pclk_khz); diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h index a8d4e409d09eeb78e383df05dd4d606b5acfa057..e33519a663786b9d1bb54df2eb09e56a21eab000 100644 --- a/drivers/gpu/drm/msm/dp/dp_reg.h +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -267,9 +267,9 @@ #define DP_PHY_AUX_CFG7 (0x0000003C) #define DP_PHY_AUX_CFG8 (0x00000040) #define DP_PHY_AUX_CFG9 (0x00000044) -#define DP_PHY_AUX_INTERRUPT_MASK (0x00000048) -#define DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) -#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) +#define DP_PHY_AUX_INTERRUPT_MASK (0x00000054) +#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000058) +#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000D8) #define DP_PHY_SPARE0 (0x00AC) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c index 06b3e48f096261116fd22540ab299ecb2172cb90..1eb7da710877a34089009ed5114dece2fbcee2e5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c @@ -244,6 +244,7 @@ static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy) phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v4_0; phy->ops.phy_lane_reset = dsi_phy_hw_v4_0_lane_reset; phy->ops.toggle_resync_fifo = dsi_phy_hw_v4_0_toggle_resync_fifo; + phy->ops.reset_clk_en_sel = dsi_phy_hw_v4_0_reset_clk_en_sel; } /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h index 03cb251dbe3f159b628ac1524c92f1b779933704..29f75a83e69ce892cf9bf0df7a1e24641f4792f1 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h @@ -119,6 +119,7 @@ int dsi_phy_hw_timing_val_v4_0(struct dsi_phy_per_lane_cfgs *timing_cfg, u32 *timing_val, u32 size); int dsi_phy_hw_v4_0_lane_reset(struct dsi_phy_hw *phy); void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy); +void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy); /* DSI controller common ops */ u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 2587a6efd79fe9640d535c6a07f8c9e131c20282..3263b55424b1b073f3b34b1c836b87de4af16b8e 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -466,7 +466,7 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev, } ctrl->hw.base = ptr; - pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name, + pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name, ctrl->hw.base); switch (ctrl->version) { @@ -2851,10 +2851,10 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) } /** - * _dsi_ctrl_cache_misr - Cache frame MISR value + * dsi_ctrl_cache_misr - Cache frame MISR value * @dsi_ctrl: Pointer to associated dsi_ctrl structure */ -static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl) +void dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl) { u32 misr; @@ -2869,8 +2869,8 @@ static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl) pr_debug("DSI_%d misr_cache = %x\n", dsi_ctrl->cell_index, dsi_ctrl->misr_cache); - } + /** * dsi_ctrl_get_host_engine_init_state() - Return host init state * @dsi_ctrl: DSI controller handle. @@ -2968,9 +2968,6 @@ int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl, goto error; } } else if (state == DSI_CTRL_POWER_VREG_OFF) { - if (dsi_ctrl->misr_enable) - _dsi_ctrl_cache_misr(dsi_ctrl); - rc = dsi_ctrl_enable_supplies(dsi_ctrl, false); if (rc) { pr_err("[%d]failed to disable vreg supplies, rc=%d\n", diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 6ea34689bc3c185f041f1f0db3835bb135af9307..edb1b31f323833b1eca169682d2085d69a71f8e5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -667,6 +667,12 @@ int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl, */ u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl); +/** + * dsi_ctrl_cache_misr - Cache frame MISR value + * @dsi_ctrl: DSI controller handle. + */ +void dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl); + /** * dsi_ctrl_drv_register() - register platform driver for dsi controller */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h index d45f8493d29d446da3ffa9a47079e4de51ad881a..42d7cc0f27c524da328e1c7e549d242ac5c6a0cf 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -242,6 +242,8 @@ enum dsi_dfps_type { * @DSI_CMD_SET_ROI: Panel ROI update * @DSI_CMD_SET_TIMING_SWITCH: Timing switch * @DSI_CMD_SET_POST_TIMING_SWITCH: Post timing switch + * @DSI_CMD_SET_QSYNC_ON Enable qsync mode + * @DSI_CMD_SET_QSYNC_OFF Disable qsync mode * @DSI_CMD_SET_MAX */ enum dsi_cmd_set_type { @@ -266,6 +268,8 @@ enum dsi_cmd_set_type { DSI_CMD_SET_ROI, DSI_CMD_SET_TIMING_SWITCH, DSI_CMD_SET_POST_TIMING_SWITCH, + DSI_CMD_SET_QSYNC_ON, + DSI_CMD_SET_QSYNC_OFF, DSI_CMD_SET_MAX }; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 31f94b08c42dc34a11bb70daaab096a1f563e34d..d0ca32c99468ca76ec1810bbd3476cca1137d904 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -16,6 +16,7 @@ #include #include +#include #include #include "msm_drv.h" @@ -36,6 +37,7 @@ #define NO_OVERRIDE -1 #define MISR_BUFF_SIZE 256 +#define ESD_MODE_STRING_MAX_LEN 256 #define MAX_NAME_SIZE 64 @@ -123,8 +125,11 @@ int dsi_display_set_backlight(struct drm_connector *connector, panel = dsi_display->panel; - if (!dsi_panel_initialized(panel)) - return -EINVAL; + mutex_lock(&panel->panel_lock); + if (!dsi_panel_initialized(panel)) { + rc = -EINVAL; + goto error; + } panel->bl_config.bl_level = bl_lvl; @@ -159,6 +164,7 @@ int dsi_display_set_backlight(struct drm_connector *connector, } error: + mutex_unlock(&panel->panel_lock); return rc; } @@ -305,6 +311,102 @@ static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach) dsi_panel_release_panel_lock(display->panel); } +static irqreturn_t dsi_display_panel_te_irq_handler(int irq, void *data) +{ + struct dsi_display *display = (struct dsi_display *)data; + + /* + * This irq handler is used for sole purpose of identifying + * ESD attacks on panel and we can safely assume IRQ_HANDLED + * in case of display not being initialized yet + */ + if (!display) + return IRQ_HANDLED; + + complete_all(&display->esd_te_gate); + return IRQ_HANDLED; +} + +static void dsi_display_change_te_irq_status(struct dsi_display *display, + bool enable) +{ + if (!display) { + pr_err("Invalid params\n"); + return; + } + + /* Handle unbalanced irq enable/disbale calls */ + if (enable && !display->is_te_irq_enabled) { + enable_irq(gpio_to_irq(display->disp_te_gpio)); + display->is_te_irq_enabled = true; + } else if (!enable && display->is_te_irq_enabled) { + disable_irq(gpio_to_irq(display->disp_te_gpio)); + display->is_te_irq_enabled = false; + } +} + +static void dsi_display_register_te_irq(struct dsi_display *display) +{ + int rc = 0; + struct platform_device *pdev; + struct device *dev; + + pdev = display->pdev; + if (!pdev) { + pr_err("invalid platform device\n"); + return; + } + + dev = &pdev->dev; + if (!dev) { + pr_err("invalid device\n"); + return; + } + + if (!gpio_is_valid(display->disp_te_gpio)) { + rc = -EINVAL; + goto error; + } + + init_completion(&display->esd_te_gate); + + rc = devm_request_irq(dev, gpio_to_irq(display->disp_te_gpio), + dsi_display_panel_te_irq_handler, IRQF_TRIGGER_FALLING, + "TE_GPIO", display); + if (rc) { + pr_err("TE request_irq failed for ESD rc:%d\n", rc); + goto error; + } + + disable_irq(gpio_to_irq(display->disp_te_gpio)); + display->is_te_irq_enabled = false; + + return; + +error: + /* disable the TE based ESD check */ + pr_warn("Unable to register for TE IRQ\n"); + if (display->panel->esd_config.status_mode == ESD_MODE_PANEL_TE) + display->panel->esd_config.esd_enabled = false; +} + +static bool dsi_display_is_te_based_esd(struct dsi_display *display) +{ + u32 status_mode = 0; + + if (!display->panel) { + pr_err("Invalid panel data\n"); + return false; + } + + status_mode = display->panel->esd_config.status_mode; + + if (status_mode == ESD_MODE_PANEL_TE && + gpio_is_valid(display->disp_te_gpio)) + return true; + return false; +} + /* Allocate memory for cmd dma tx buffer */ static int dsi_host_alloc_cmd_tx_buffer(struct dsi_display *display) { @@ -415,6 +517,27 @@ static bool dsi_display_validate_reg_read(struct dsi_panel *panel) return false; } +static void dsi_display_parse_te_gpio(struct dsi_display *display) +{ + struct platform_device *pdev; + struct device *dev; + + pdev = display->pdev; + if (!pdev) { + pr_err("Inavlid platform device\n"); + return; + } + + dev = &pdev->dev; + if (!dev) { + pr_err("Inavlid platform device\n"); + return; + } + + display->disp_te_gpio = of_get_named_gpio(dev->of_node, + "qcom,platform-te-gpio", 0); +} + static int dsi_display_read_status(struct dsi_display_ctrl *ctrl, struct dsi_panel *panel) { @@ -538,10 +661,9 @@ static int dsi_display_status_reg_read(struct dsi_display *display) } } exit: - if (rc <= 0) { - dsi_display_ctrl_irq_update(display, false); + /* mask only error interrupts */ + if (rc <= 0) dsi_display_mask_ctrl_error_interrupts(display); - } dsi_display_cmd_engine_disable(display); done: @@ -560,10 +682,19 @@ static int dsi_display_status_bta_request(struct dsi_display *display) static int dsi_display_status_check_te(struct dsi_display *display) { - int rc = 0; + int rc = 1; + int const esd_te_timeout = msecs_to_jiffies(3*20); - pr_debug(" ++\n"); - /* TODO: wait for TE interrupt from panel */ + dsi_display_change_te_irq_status(display, true); + + reinit_completion(&display->esd_te_gate); + if (!wait_for_completion_timeout(&display->esd_te_gate, + esd_te_timeout)) { + pr_err("ESD check failed\n"); + rc = -EINVAL; + } + + dsi_display_change_te_irq_status(display, false); return rc; } @@ -914,6 +1045,71 @@ static ssize_t debugfs_misr_setup(struct file *file, return rc; } +static ssize_t debugfs_misr_read(struct file *file, + char __user *user_buf, + size_t user_len, + loff_t *ppos) +{ + struct dsi_display *display = file->private_data; + char *buf; + u32 len = 0; + int rc = 0; + struct dsi_ctrl *dsi_ctrl; + int i; + u32 misr; + size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE); + + if (!display) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(max_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&display->display_lock); + rc = dsi_display_clk_ctrl(display->dsi_clk_handle, + DSI_CORE_CLK, DSI_CLK_ON); + if (rc) { + pr_err("[%s] failed to enable DSI core clocks, rc=%d\n", + display->name, rc); + goto error; + } + + for (i = 0; i < display->ctrl_count; i++) { + dsi_ctrl = display->ctrl[i].ctrl; + misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl); + + len += snprintf((buf + len), max_len - len, + "DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr); + + if (len >= max_len) + break; + } + + rc = dsi_display_clk_ctrl(display->dsi_clk_handle, + DSI_CORE_CLK, DSI_CLK_OFF); + if (rc) { + pr_err("[%s] failed to disable DSI core clocks, rc=%d\n", + display->name, rc); + goto error; + } + + if (copy_to_user(user_buf, buf, len)) { + rc = -EFAULT; + goto error; + } + + *ppos += len; + +error: + mutex_unlock(&display->display_lock); + kfree(buf); + return len; +} + static ssize_t debugfs_esd_trigger_check(struct file *file, const char __user *user_buf, size_t user_len, @@ -970,19 +1166,82 @@ static ssize_t debugfs_esd_trigger_check(struct file *file, return rc; } -static ssize_t debugfs_misr_read(struct file *file, +static ssize_t debugfs_alter_esd_check_mode(struct file *file, + const char __user *user_buf, + size_t user_len, + loff_t *ppos) +{ + struct dsi_display *display = file->private_data; + struct drm_panel_esd_config *esd_config; + char *buf; + int rc = 0; + size_t len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN); + + if (!display) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, user_len)) { + rc = -EINVAL; + goto error; + } + + buf[len] = '\0'; /* terminate the string */ + if (!display->panel) { + rc = -EINVAL; + goto error; + } + + esd_config = &display->panel->esd_config; + if (!esd_config) { + pr_err("Invalid panel esd config\n"); + rc = -EINVAL; + goto error; + } + + if (!esd_config->esd_enabled) + goto error; + + if (!strcmp(buf, "te_signal_check\n")) { + esd_config->status_mode = ESD_MODE_PANEL_TE; + dsi_display_change_te_irq_status(display, true); + } + + if (!strcmp(buf, "reg_read\n")) { + rc = dsi_panel_parse_esd_reg_read_configs(display->panel); + if (rc) { + pr_err("failed to alter esd check mode,rc=%d\n", + rc); + rc = user_len; + goto error; + } + esd_config->status_mode = ESD_MODE_REG_READ; + if (dsi_display_is_te_based_esd(display)) + dsi_display_change_te_irq_status(display, false); + } + + rc = len; +error: + kfree(buf); + return rc; +} + +static ssize_t debugfs_read_esd_check_mode(struct file *file, char __user *user_buf, size_t user_len, loff_t *ppos) { struct dsi_display *display = file->private_data; + struct drm_panel_esd_config *esd_config; char *buf; - u32 len = 0; int rc = 0; - struct dsi_ctrl *dsi_ctrl; - int i; - u32 misr; - size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE); + size_t len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN); if (!display) return -ENODEV; @@ -990,35 +1249,36 @@ static ssize_t debugfs_misr_read(struct file *file, if (*ppos) return 0; - buf = kzalloc(max_len, GFP_KERNEL); + if (!display->panel) { + pr_err("invalid panel data\n"); + return -EINVAL; + } + + buf = kzalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; - mutex_lock(&display->display_lock); - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_CORE_CLK, DSI_CLK_ON); - if (rc) { - pr_err("[%s] failed to enable DSI core clocks, rc=%d\n", - display->name, rc); + esd_config = &display->panel->esd_config; + if (!esd_config) { + pr_err("Invalid panel esd config\n"); + rc = -EINVAL; goto error; } - for (i = 0; i < display->ctrl_count; i++) { - dsi_ctrl = display->ctrl[i].ctrl; - misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl); + if (!esd_config->esd_enabled) { + rc = snprintf(buf, len, "ESD feature not enabled"); + goto output_mode; + } - len += snprintf((buf + len), max_len - len, - "DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr); + if (esd_config->status_mode == ESD_MODE_REG_READ) + rc = snprintf(buf, len, "reg_read"); - if (len >= max_len) - break; - } + if (esd_config->status_mode == ESD_MODE_PANEL_TE) + rc = snprintf(buf, len, "te_signal_check"); - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, - DSI_CORE_CLK, DSI_CLK_OFF); - if (rc) { - pr_err("[%s] failed to disable DSI core clocks, rc=%d\n", - display->name, rc); +output_mode: + if (!rc) { + rc = -EINVAL; goto error; } @@ -1030,7 +1290,6 @@ static ssize_t debugfs_misr_read(struct file *file, *ppos += len; error: - mutex_unlock(&display->display_lock); kfree(buf); return len; } @@ -1051,6 +1310,12 @@ static const struct file_operations esd_trigger_fops = { .write = debugfs_esd_trigger_check, }; +static const struct file_operations esd_check_mode_fops = { + .open = simple_open, + .write = debugfs_alter_esd_check_mode, + .read = debugfs_read_esd_check_mode, +}; + static int dsi_display_debugfs_init(struct dsi_display *display) { int rc = 0; @@ -1090,6 +1355,18 @@ static int dsi_display_debugfs_init(struct dsi_display *display) goto error_remove_dir; } + dump_file = debugfs_create_file("esd_check_mode", + 0644, + dir, + display, + &esd_check_mode_fops); + if (IS_ERR_OR_NULL(dump_file)) { + rc = PTR_ERR(dump_file); + pr_err("[%s] debugfs for esd check mode failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + misr_data = debugfs_create_file("misr_data", 0600, dir, @@ -1904,6 +2181,16 @@ static void dsi_display_toggle_resync_fifo(struct dsi_display *display) ctrl = &display->ctrl[i]; dsi_phy_toggle_resync_fifo(ctrl->phy); } + + /* + * After retime buffer synchronization we need to turn of clk_en_sel + * bit on each phy. + */ + for (i = 0; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + dsi_phy_reset_clk_en_sel(ctrl->phy); + } + } static int dsi_display_ctrl_update(struct dsi_display *display) @@ -2608,8 +2895,9 @@ int dsi_pre_clkoff_cb(void *priv, enum dsi_clk_type clk, enum dsi_clk_state new_state) { - int rc = 0; + int rc = 0, i; struct dsi_display *display = priv; + struct dsi_display_ctrl *ctrl; if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF)) { /* @@ -2656,6 +2944,15 @@ int dsi_pre_clkoff_cb(void *priv, } /* dsi will not be able to serve irqs from here on */ dsi_display_ctrl_irq_update(display, false); + + /* cache the MISR values */ + for (i = 0; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + if (!ctrl->ctrl) + continue; + dsi_ctrl_cache_misr(ctrl->ctrl); + } + } return rc; @@ -3021,6 +3318,9 @@ static int dsi_display_parse_dt(struct dsi_display *display) goto error; } + /* Parse TE gpio */ + dsi_display_parse_te_gpio(display); + pr_debug("success\n"); error: return rc; @@ -3593,16 +3893,24 @@ static int _dsi_display_dev_deinit(struct dsi_display *display) } /** - * dsi_display_splash_res_init() - Initialize resources for continuous splash - * @display: Pointer to dsi display + * dsi_display_cont_splash_config() - Initialize resources for continuous splash + * @dsi_display: Pointer to dsi display * Returns: Zero on success */ -static int dsi_display_splash_res_init(struct dsi_display *display) +int dsi_display_cont_splash_config(void *dsi_display) { + struct dsi_display *display = dsi_display; int rc = 0; /* Vote for gdsc required to read register address space */ + if (!display) { + pr_err("invalid input display param\n"); + return -EINVAL; + } + + mutex_lock(&display->display_lock); + /* Vote for gdsc required to read register address space */ display->cont_splash_client = sde_power_client_create(display->phandle, "cont_splash_client"); rc = sde_power_resource_enable(display->phandle, @@ -3610,6 +3918,7 @@ static int dsi_display_splash_res_init(struct dsi_display *display) if (rc) { pr_err("failed to vote gdsc for continuous splash, rc=%d\n", rc); + mutex_unlock(&display->display_lock); return -EINVAL; } @@ -3625,6 +3934,9 @@ static int dsi_display_splash_res_init(struct dsi_display *display) dsi_display_clk_mngr_update_splash_status(display->clk_mngr, display->is_cont_splash_enabled); + /* Set up ctrl isr before enabling core clk */ + dsi_display_ctrl_isr_configure(display, true); + /* Vote for Core clk and link clk. Votes on ctrl and phy * regulator are inplicit from pre clk on callback */ @@ -3645,6 +3957,7 @@ static int dsi_display_splash_res_init(struct dsi_display *display) } dsi_config_host_engine_state_for_cont_splash(display); + mutex_unlock(&display->display_lock); return rc; @@ -3653,6 +3966,7 @@ static int dsi_display_splash_res_init(struct dsi_display *display) DSI_ALL_CLKS, DSI_CLK_OFF); clk_manager_update: + dsi_display_ctrl_isr_configure(display, false); /* Update splash status for clock manager */ dsi_display_clk_mngr_update_splash_status(display->clk_mngr, false); @@ -3661,6 +3975,7 @@ static int dsi_display_splash_res_init(struct dsi_display *display) (void)sde_power_resource_enable(display->phandle, display->cont_splash_client, false); display->is_cont_splash_enabled = false; + mutex_unlock(&display->display_lock); return rc; } @@ -4106,10 +4421,8 @@ static int dsi_display_bind(struct device *dev, } } - /* Initialize resources for continuous splash */ - rc = dsi_display_splash_res_init(display); - if (rc) - pr_err("Continuous splash resource init failed, rc=%d\n", rc); + /* register te irq handler */ + dsi_display_register_te_irq(display); goto error; @@ -4523,6 +4836,8 @@ int dsi_display_get_info(struct drm_connector *connector, info->height_mm = phy_props.panel_height_mm; info->max_width = 1920; info->max_height = 1080; + info->qsync_min_fps = + display->panel->qsync_min_fps; switch (display->panel->panel_mode) { case DSI_OP_VIDEO_MODE: @@ -5461,6 +5776,43 @@ static int dsi_display_calc_ctrl_roi(const struct dsi_display *display, return rc; } +static int dsi_display_qsync(struct dsi_display *display, bool enable) +{ + int i; + int rc = 0; + + if (!display->panel->qsync_min_fps) { + pr_err("%s:ERROR: qsync set, but no fps\n", __func__); + return 0; + } + + mutex_lock(&display->display_lock); + + for (i = 0; i < display->ctrl_count; i++) { + + if (enable) { + /* send the commands to enable qsync */ + rc = dsi_panel_send_qsync_on_dcs(display->panel, i); + if (rc) { + pr_err("fail qsync ON cmds rc:%d\n", rc); + goto exit; + } + } else { + /* send the commands to enable qsync */ + rc = dsi_panel_send_qsync_off_dcs(display->panel, i); + if (rc) { + pr_err("fail qsync OFF cmds rc:%d\n", rc); + goto exit; + } + } + } + +exit: + SDE_EVT32(enable, display->panel->qsync_min_fps, rc); + mutex_unlock(&display->display_lock); + return rc; +} + static int dsi_display_set_roi(struct dsi_display *display, struct msm_roi_list *rois) { @@ -5524,11 +5876,21 @@ int dsi_display_pre_kickoff(struct drm_connector *connector, { int rc = 0; int i; + bool enable; /* check and setup MISR */ if (display->misr_enable) _dsi_display_setup_misr(display); + if (params->qsync_update) { + enable = (params->qsync_mode > 0) ? true : false; + rc = dsi_display_qsync(display, enable); + if (rc) + pr_err("%s failed to send qsync commands", + __func__); + SDE_EVT32(params->qsync_mode, rc); + } + rc = dsi_display_set_roi(display, params->rois); /* dynamic DSI clock setting */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index faeb58bb96f59f9216d3d503277bec72e02cdbee..c58b41770f10e12eaf1412ed6cf8c08cf2a82503 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -135,6 +135,9 @@ struct dsi_display_clk_info { * @is_cont_splash_enabled: Is continuous splash enabled * @sw_te_using_wd: Is software te enabled * @display_lock: Mutex for dsi_display interface. + * @disp_te_gpio: GPIO for panel TE interrupt. + * @is_te_irq_enabled:bool to specify whether TE interrupt is enabled. + * @esd_te_gate: completion gate to signal TE interrupt. * @ctrl_count: Number of DSI interfaces required by panel. * @ctrl: Controller information for DSI display. * @panel: Handle to DSI panel. @@ -178,6 +181,9 @@ struct dsi_display { bool is_cont_splash_enabled; bool sw_te_using_wd; struct mutex display_lock; + int disp_te_gpio; + bool is_te_irq_enabled; + struct completion esd_te_gate; u32 ctrl_count; struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY]; @@ -622,4 +628,11 @@ enum dsi_pixel_format dsi_display_get_dst_format( struct drm_connector *connector, void *display); +/** + * dsi_display_cont_splash_config() - initialize splash resources + * @display: Handle to display + * + * Return: Zero on Success + */ +int dsi_display_cont_splash_config(void *display); #endif /* _DSI_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index b4be3c20ffda00eb7dabd1f020bf4563eec34bda..18933ce402e793b843c4c16fc766c6781c8ce2ca 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -434,11 +434,15 @@ int dsi_conn_set_info_blob(struct drm_connector *connector, switch (panel->panel_mode) { case DSI_OP_VIDEO_MODE: sde_kms_info_add_keystr(info, "panel mode", "video"); + sde_kms_info_add_keystr(info, "qsync support", + panel->qsync_min_fps ? "true" : "false"); break; case DSI_OP_CMD_MODE: sde_kms_info_add_keystr(info, "panel mode", "command"); sde_kms_info_add_keyint(info, "mdp_transfer_time_us", panel->cmd_config.mdp_transfer_time_us); + sde_kms_info_add_keystr(info, "qsync support", + panel->qsync_min_fps ? "true" : "false"); break; default: pr_debug("invalid panel type:%d\n", panel->panel_mode); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c index 58e8782ce966a12e7f41712399e6d6299dce0ee4..b4c26514ec223d9de3a154849a25a5c2e62e92c9 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c @@ -610,13 +610,10 @@ static int dsi_panel_update_backlight(struct dsi_panel *panel, dsi = &panel->mipi_device; - mutex_lock(&panel->panel_lock); - rc = mipi_dsi_dcs_set_display_brightness(dsi, bl_lvl); if (rc < 0) pr_err("failed to update dcs backlight:%d\n", bl_lvl); - mutex_unlock(&panel->panel_lock); return rc; } @@ -631,7 +628,7 @@ int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl) rc = backlight_device_set_brightness(bl->raw_bd, bl_lvl); break; case DSI_BACKLIGHT_DCS: - dsi_panel_update_backlight(panel, bl_lvl); + rc = dsi_panel_update_backlight(panel, bl_lvl); break; default: pr_err("Backlight type(%d) not supported\n", bl->type); @@ -1054,6 +1051,24 @@ static int dsi_panel_parse_host_config(struct dsi_panel *panel) return rc; } +static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel, + struct device_node *of_node) +{ + int rc = 0; + u32 val = 0; + + rc = of_property_read_u32(of_node, + "qcom,mdss-dsi-qsync-min-refresh-rate", + &val); + if (rc) + pr_err("[%s] qsync min fps not defined rc:%d\n", + panel->name, rc); + + panel->qsync_min_fps = val; + + return rc; +} + static int dsi_panel_parse_dfps_caps(struct dsi_panel *panel) { int rc = 0; @@ -1386,6 +1401,8 @@ const char *cmd_set_prop_map[DSI_CMD_SET_MAX] = { "ROI not parsed from DTSI, generated dynamically", "qcom,mdss-dsi-timing-switch-command", "qcom,mdss-dsi-post-mode-switch-on-command", + "qcom,mdss-dsi-qsync-on-commands", + "qcom,mdss-dsi-qsync-off-commands", }; const char *cmd_set_state_map[DSI_CMD_SET_MAX] = { @@ -1410,6 +1427,8 @@ const char *cmd_set_state_map[DSI_CMD_SET_MAX] = { "ROI not parsed from DTSI, generated dynamically", "qcom,mdss-dsi-timing-switch-command-state", "qcom,mdss-dsi-post-mode-switch-on-command-state", + "qcom,mdss-dsi-qsync-on-commands-state", + "qcom,mdss-dsi-qsync-off-commands-state", }; static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt) @@ -2564,54 +2583,23 @@ static void dsi_panel_esd_config_deinit(struct drm_panel_esd_config *esd_config) kfree(esd_config->status_cmd.cmds); } -static int dsi_panel_parse_esd_config(struct dsi_panel *panel) +int dsi_panel_parse_esd_reg_read_configs(struct dsi_panel *panel) { + struct drm_panel_esd_config *esd_config; int rc = 0; u32 tmp; u32 i, status_len, *lenp; struct property *data; - const char *string; - struct drm_panel_esd_config *esd_config; struct dsi_parser_utils *utils = &panel->utils; - u8 *esd_mode = NULL; - esd_config = &panel->esd_config; - esd_config->status_mode = ESD_MODE_MAX; - esd_config->esd_enabled = utils->read_bool(utils->data, - "qcom,esd-check-enabled"); - - if (!esd_config->esd_enabled) - return 0; - - rc = utils->read_string(utils->data, - "qcom,mdss-dsi-panel-status-check-mode", &string); - if (!rc) { - if (!strcmp(string, "bta_check")) { - esd_config->status_mode = ESD_MODE_SW_BTA; - } else if (!strcmp(string, "reg_read")) { - esd_config->status_mode = ESD_MODE_REG_READ; - } else if (!strcmp(string, "te_signal_check")) { - if (panel->panel_mode == DSI_OP_CMD_MODE) { - esd_config->status_mode = ESD_MODE_PANEL_TE; - } else { - pr_err("TE-ESD not valid for video mode\n"); - rc = -EINVAL; - goto error; - } - } else { - pr_err("No valid panel-status-check-mode string\n"); - rc = -EINVAL; - goto error; - } - } else { - pr_debug("status check method not defined!\n"); - rc = -EINVAL; - goto error; + if (!panel) { + pr_err("Invalid Params\n"); + return -EINVAL; } - if ((esd_config->status_mode == ESD_MODE_SW_BTA) || - (esd_config->status_mode == ESD_MODE_PANEL_TE)) - return 0; + esd_config = &panel->esd_config; + if (!esd_config) + return -EINVAL; dsi_panel_parse_cmd_sets_sub(&esd_config->status_cmd, DSI_CMD_SET_PANEL_STATUS, utils); @@ -2686,8 +2674,10 @@ static int dsi_panel_parse_esd_config(struct dsi_panel *panel) } esd_config->status_buf = kzalloc(SZ_4K, GFP_KERNEL); - if (!esd_config->status_buf) + if (!esd_config->status_buf) { + rc = -ENOMEM; goto error4; + } rc = utils->read_u32_array(utils->data, "qcom,mdss-dsi-panel-status-value", @@ -2698,15 +2688,6 @@ static int dsi_panel_parse_esd_config(struct dsi_panel *panel) esd_config->groups * status_len); } - if (panel->esd_config.status_mode == ESD_MODE_REG_READ) - esd_mode = "register_read"; - else if (panel->esd_config.status_mode == ESD_MODE_SW_BTA) - esd_mode = "bta_trigger"; - else if (panel->esd_config.status_mode == ESD_MODE_PANEL_TE) - esd_mode = "te_check"; - - pr_info("ESD enabled with mode: %s\n", esd_mode); - return 0; error4: @@ -2718,6 +2699,70 @@ static int dsi_panel_parse_esd_config(struct dsi_panel *panel) kfree(esd_config->status_cmds_rlen); error1: kfree(esd_config->status_cmd.cmds); +error: + return rc; +} + +static int dsi_panel_parse_esd_config(struct dsi_panel *panel) +{ + int rc = 0; + const char *string; + struct drm_panel_esd_config *esd_config; + struct dsi_parser_utils *utils = &panel->utils; + u8 *esd_mode = NULL; + + esd_config = &panel->esd_config; + esd_config->status_mode = ESD_MODE_MAX; + esd_config->esd_enabled = utils->read_bool(utils->data, + "qcom,esd-check-enabled"); + + if (!esd_config->esd_enabled) + return 0; + + rc = utils->read_string(utils->data, + "qcom,mdss-dsi-panel-status-check-mode", &string); + if (!rc) { + if (!strcmp(string, "bta_check")) { + esd_config->status_mode = ESD_MODE_SW_BTA; + } else if (!strcmp(string, "reg_read")) { + esd_config->status_mode = ESD_MODE_REG_READ; + } else if (!strcmp(string, "te_signal_check")) { + if (panel->panel_mode == DSI_OP_CMD_MODE) { + esd_config->status_mode = ESD_MODE_PANEL_TE; + } else { + pr_err("TE-ESD not valid for video mode\n"); + rc = -EINVAL; + goto error; + } + } else { + pr_err("No valid panel-status-check-mode string\n"); + rc = -EINVAL; + goto error; + } + } else { + pr_debug("status check method not defined!\n"); + rc = -EINVAL; + goto error; + } + + if (panel->esd_config.status_mode == ESD_MODE_REG_READ) { + rc = dsi_panel_parse_esd_reg_read_configs(panel); + if (rc) { + pr_err("failed to parse esd reg read mode params, rc=%d\n", + rc); + goto error; + } + esd_mode = "register_read"; + } else if (panel->esd_config.status_mode == ESD_MODE_SW_BTA) { + esd_mode = "bta_trigger"; + } else if (panel->esd_config.status_mode == ESD_MODE_PANEL_TE) { + esd_mode = "te_check"; + } + + pr_info("ESD enabled with mode: %s\n", esd_mode); + + return 0; + error: panel->esd_config.esd_enabled = false; return rc; @@ -2785,6 +2830,13 @@ struct dsi_panel *dsi_panel_get(struct device *parent, if (rc) pr_err("failed to parse dfps configuration, rc=%d\n", rc); + if (!(panel->dfps_caps.dfps_support)) { + /* qsync and dfps are mutually exclusive features */ + rc = dsi_panel_parse_qsync_caps(panel, of_node); + if (rc) + pr_err("failed to parse qsync features, rc=%d\n", rc); + } + rc = dsi_panel_parse_phy_props(panel); if (rc) { pr_err("failed to parse panel physical dimension, rc=%d\n", rc); @@ -3404,6 +3456,50 @@ static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel_cmd_set *set, return rc; } +int dsi_panel_send_qsync_on_dcs(struct dsi_panel *panel, + int ctrl_idx) +{ + int rc = 0; + + if (!panel) { + pr_err("invalid params\n"); + return -EINVAL; + } + + mutex_lock(&panel->panel_lock); + + pr_debug("ctrl:%d qsync on\n", ctrl_idx); + rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_QSYNC_ON); + if (rc) + pr_err("[%s] failed to send DSI_CMD_SET_QSYNC_ON cmds rc=%d\n", + panel->name, rc); + + mutex_unlock(&panel->panel_lock); + return rc; +} + +int dsi_panel_send_qsync_off_dcs(struct dsi_panel *panel, + int ctrl_idx) +{ + int rc = 0; + + if (!panel) { + pr_err("invalid params\n"); + return -EINVAL; + } + + mutex_lock(&panel->panel_lock); + + pr_debug("ctrl:%d qsync off\n", ctrl_idx); + rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_QSYNC_OFF); + if (rc) + pr_err("[%s] failed to send DSI_CMD_SET_QSYNC_OFF cmds rc=%d\n", + panel->name, rc); + + mutex_unlock(&panel->panel_lock); + return rc; +} + int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx, struct dsi_rect *roi) { diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h index 4d3d23e7fddc5ab9fc5e372cc6651609cc08a66e..07a141caaacd561338a0c5482886d8772adcbf2a 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h @@ -179,6 +179,7 @@ struct dsi_panel { bool panel_initialized; bool te_using_watchdog_timer; + u32 qsync_min_fps; char dsc_pps_cmd[DSI_CMD_PPS_SIZE]; enum dsi_dms_mode dms_mode; @@ -267,6 +268,11 @@ int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl); int dsi_panel_update_pps(struct dsi_panel *panel); +int dsi_panel_send_qsync_on_dcs(struct dsi_panel *panel, + int ctrl_idx); +int dsi_panel_send_qsync_off_dcs(struct dsi_panel *panel, + int ctrl_idx); + int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx, struct dsi_rect *roi); @@ -276,4 +282,12 @@ int dsi_panel_post_switch(struct dsi_panel *panel); void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width); +struct dsi_panel *dsi_panel_ext_bridge_get(struct device *parent, + struct device_node *of_node, + int topology_override); + +int dsi_panel_parse_esd_reg_read_configs(struct dsi_panel *panel); + +void dsi_panel_ext_bridge_put(struct dsi_panel *panel); + #endif /* _DSI_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index 3c07ed2a07513536598ac3a3b56aecc74dddf196..df69e70038453c2792866d46d8b7cad98a1ce048 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -117,7 +117,8 @@ static int dsi_phy_regmap_init(struct platform_device *pdev, phy->hw.base = ptr; - pr_debug("[%s] map dsi_phy registers to %p\n", phy->name, phy->hw.base); + pr_debug("[%s] map dsi_phy registers to %pK\n", + phy->name, phy->hw.base); return rc; } @@ -775,6 +776,18 @@ void dsi_phy_toggle_resync_fifo(struct msm_dsi_phy *phy) phy->hw.ops.toggle_resync_fifo(&phy->hw); } + +void dsi_phy_reset_clk_en_sel(struct msm_dsi_phy *phy) +{ + if (!phy) + return; + + if (!phy->hw.ops.reset_clk_en_sel) + return; + + phy->hw.ops.reset_clk_en_sel(&phy->hw); +} + int dsi_phy_set_ulps(struct msm_dsi_phy *phy, struct dsi_host_config *config, bool enable, bool clamp_enabled) { diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h index 56d5ee3bd5d43614183748fbcabc5c6bbd993e18..4062d3298417bc42dba4343d2b3df92b3089d446 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h @@ -259,6 +259,15 @@ int dsi_phy_lane_reset(struct msm_dsi_phy *phy); */ void dsi_phy_toggle_resync_fifo(struct msm_dsi_phy *phy); +/** + * dsi_phy_reset_clk_en_sel() - reset clk_en_select on cmn_clk_cfg1 register + * @phy: DSI PHY handle + * + * After toggling resync fifo regiater, clk_en_sel bit on cmn_clk_cfg1 + * register has to be reset + */ +void dsi_phy_reset_clk_en_sel(struct msm_dsi_phy *phy); + /** * dsi_phy_drv_register() - register platform driver for dsi phy */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h index c7cecb28cb4118d42b67c9a0daaef52c8d3285c3..5d0a8e4fb6b3ca9a79fc1074a0b7ba455d28b64d 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h @@ -251,6 +251,12 @@ struct dsi_phy_hw_ops { */ void (*toggle_resync_fifo)(struct dsi_phy_hw *phy); + /** + * reset_clk_en_sel() - reset clk_en_sel on phy cmn_clk_cfg1 register + * @phy: Pointer to DSI PHY hardware object. + */ + void (*reset_clk_en_sel)(struct dsi_phy_hw *phy); + void *timing_ops; struct phy_ulps_config_ops ulps_ops; }; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c index 504cbc3847845013b04c0cae55aef1895e2dde54..4ed484a49e5cacfdb803a5b4d82fb6bfc728dd79 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c @@ -276,6 +276,18 @@ void dsi_phy_hw_v4_0_toggle_resync_fifo(struct dsi_phy_hw *phy) wmb(); } +void dsi_phy_hw_v4_0_reset_clk_en_sel(struct dsi_phy_hw *phy) +{ + u32 data = 0; + + /*Turning off CLK_EN_SEL after retime buffer sync */ + data = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1); + data &= ~BIT(4); + DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, data); + /* ensure that clk_en_sel bit is turned off */ + wmb(); +} + int dsi_phy_hw_v4_0_wait_for_lane_idle( struct dsi_phy_hw *phy, u32 lanes) { diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index 0940e84b2821b6df619b901f4c25f2fbf9297828..59d8f1eb279da7dfaa6c606ea72b07758fc2fc65 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,7 +54,7 @@ static struct msm_edp *edp_init(struct platform_device *pdev) ret = -ENOMEM; goto fail; } - DBG("eDP probed=%p", edp); + DBG("eDP probed=%pK", edp); edp->pdev = pdev; platform_set_drvdata(pdev, edp); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9aef53dc65ea50415bff9f8b4bd3d58cff0e7b0c..95d9955e89db27a9bb00aa8f1b1c5f3d4b46fa9a 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -187,7 +187,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, } if (reglog) - printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size); + dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n", + dbgname, ptr, size); return ptr; } @@ -218,7 +219,7 @@ void msm_iounmap(struct platform_device *pdev, void __iomem *addr) void msm_writel(u32 data, void __iomem *addr) { if (reglog) - printk(KERN_DEBUG "IO:W %p %08x\n", addr, data); + pr_debug("IO:W %pK %08x\n", addr, data); writel(data, addr); } @@ -227,7 +228,7 @@ u32 msm_readl(const void __iomem *addr) u32 val = readl(addr); if (reglog) - pr_err("IO:R %p %08x\n", addr, val); + pr_err("IO:R %pK %08x\n", addr, val); return val; } @@ -992,6 +993,14 @@ static void msm_lastclose(struct drm_device *dev) struct drm_modeset_acquire_ctx ctx; int i, rc; + /* check for splash status before triggering cleanup + * if we end up here with splash status ON i.e before first + * commit then ignore the last close call + */ + if (kms && kms->funcs && kms->funcs->check_for_splash + && kms->funcs->check_for_splash(kms)) + return; + /* * clean up vblank disable immediately as this is the last close. */ @@ -1079,7 +1088,7 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe) if (!kms) return -ENXIO; - DBG("dev=%p, crtc=%u", dev, pipe); + DBG("dev=%pK, crtc=%u", dev, pipe); return vblank_ctrl_queue_work(priv, pipe, true); } @@ -1090,7 +1099,7 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) if (!kms) return; - DBG("dev=%p, crtc=%u", dev, pipe); + DBG("dev=%pK, crtc=%u", dev, pipe); vblank_ctrl_queue_work(priv, pipe, false); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 0df1b3eee15b93545a79986f25224ca9c89c9954..b19c4532a8dd8979f200194bb39e7d1876111220 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -195,6 +195,7 @@ enum msm_mdp_conn_property { CONNECTOR_PROP_AUTOREFRESH, CONNECTOR_PROP_LP, CONNECTOR_PROP_FB_TRANSLATION_MODE, + CONNECTOR_PROP_QSYNC_MODE, /* total # of properties */ CONNECTOR_PROP_COUNT @@ -456,6 +457,7 @@ struct msm_mode_info { * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is * used instead of panel TE in cmd mode panels * @roi_caps: Region of interest capability info + * @qsync_min_fps Minimum fps supported by Qsync feature */ struct msm_display_info { int intf_type; @@ -476,6 +478,8 @@ struct msm_display_info { bool is_primary; bool is_te_using_watchdog_timer; struct msm_roi_caps roi_caps; + + uint32_t qsync_min_fps; }; #define MSM_MAX_ROI 4 @@ -493,10 +497,14 @@ struct msm_roi_list { /** * struct - msm_display_kickoff_params - info for display features at kickoff * @rois: Regions of interest structure for mapping CRTC to Connector output + * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode + * @qsync_update: Qsync settings were changed/updated */ struct msm_display_kickoff_params { struct msm_roi_list *rois; struct drm_msm_ext_hdr_metadata *hdr_meta; + uint32_t qsync_mode; + bool qsync_update; }; /** diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index f3fcca8d225b24129b9bec694577e5d0c727cf1f..52f6f177cc1e24fb8f8660d21e4fddaa96692a92 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -66,7 +66,7 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) msm_fb = to_msm_framebuffer(fb); n = fb->format->num_planes; - DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); + DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb); drm_framebuffer_cleanup(fb); @@ -366,7 +366,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, unsigned int hsub, vsub; bool is_modified = false; - DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", + DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); @@ -450,7 +450,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, goto fail; } - DBG("create: FB ID: %d (%p)", fb->base.id, fb); + DBG("create: FB ID: %d (%pK)", fb->base.id, fb); return fb; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index c178563fcd4dc56a2d1fbdaf4197c847b5c11904..f8f95deb137dee1012cfea2b467f964c4e495268 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -118,7 +118,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, goto fail_unlock; } - DBG("fbi=%p, dev=%p", fbi, dev); + DBG("fbi=%pK, dev=%pK", fbi, dev); fbdev->fb = fb; helper->fb = fb; @@ -142,7 +142,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fbi->fix.smem_start = paddr; fbi->fix.smem_len = bo->size; - DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); + DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres); DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ba2b637989731ee84cb5d1e9dd6b9699ac967191..766221196c32a3932682b367bcc640f3ba29d3e8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -263,7 +263,7 @@ int msm_gem_fault(struct vm_fault *vmf) pfn = page_to_pfn(pages[pgoff]); - VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, + VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); @@ -880,7 +880,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) break; } - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", + seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, kref_read(&obj->refcount), off, msm_obj->vaddr); diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 4a22c3be5a616a2826b3c537ef38c0784d9f6ee3..48bcffcdbbea71c65ab7683a4994d62bac322094 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -115,6 +115,8 @@ struct msm_kms_funcs { #endif /* handle continuous splash */ int (*cont_splash_config)(struct msm_kms *kms); + /* check for continuous splash status */ + bool (*check_for_splash)(struct msm_kms *kms); }; struct msm_kms { diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 82e70699c1a54baf720bdecd30f8b48ff621002a..54f38cf509ec260a614585740cf4f8b9ac527642 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -60,6 +60,10 @@ static const struct drm_prop_enum_list e_power_mode[] = { {SDE_MODE_DPMS_LP2, "LP2"}, {SDE_MODE_DPMS_OFF, "OFF"}, }; +static const struct drm_prop_enum_list e_qsync_mode[] = { + {SDE_RM_QSYNC_DISABLED, "none"}, + {SDE_RM_QSYNC_CONTINUOUS_MODE, "continuous"}, +}; static int sde_backlight_device_update_status(struct backlight_device *bd) { @@ -68,6 +72,7 @@ static int sde_backlight_device_update_status(struct backlight_device *bd) struct sde_connector *c_conn; int bl_lvl; struct drm_event event; + int rc = 0; brightness = bd->props.brightness; @@ -93,11 +98,11 @@ static int sde_backlight_device_update_status(struct backlight_device *bd) event.length = sizeof(u32); msm_mode_object_event_notify(&c_conn->base.base, c_conn->base.dev, &event, (u8 *)&brightness); - c_conn->ops.set_backlight(&c_conn->base, + rc = c_conn->ops.set_backlight(&c_conn->base, c_conn->display, bl_lvl); } - return 0; + return rc; } static int sde_backlight_device_get_brightness(struct backlight_device *bd) @@ -415,16 +420,32 @@ void sde_connector_schedule_status_work(struct drm_connector *connector, if (!c_conn) return; + /* Return if there is no change in ESD status check condition */ + if (en == c_conn->esd_status_check) + return; + sde_connector_get_info(connector, &info); if (c_conn->ops.check_status && (info.capabilities & MSM_DISPLAY_ESD_ENABLED)) { - if (en) + if (en) { + u32 interval; + + /* + * If debugfs property is not set then take + * default value + */ + interval = c_conn->esd_status_interval ? + c_conn->esd_status_interval : + STATUS_CHECK_INTERVAL_MS; /* Schedule ESD status check */ schedule_delayed_work(&c_conn->status_work, - msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS)); - else + msecs_to_jiffies(interval)); + c_conn->esd_status_check = true; + } else { /* Cancel any pending ESD status check */ cancel_delayed_work_sync(&c_conn->status_work); + c_conn->esd_status_check = false; + } } } @@ -473,8 +494,12 @@ static int _sde_connector_update_power_locked(struct sde_connector *c_conn) } c_conn->last_panel_power_mode = mode; + mutex_unlock(&c_conn->lock); if (mode != SDE_MODE_DPMS_ON) sde_connector_schedule_status_work(connector, false); + else + sde_connector_schedule_status_work(connector, true); + mutex_lock(&c_conn->lock); return rc; } @@ -533,6 +558,7 @@ static int _sde_connector_update_dirty_properties( c_conn = to_sde_connector(connector); c_state = to_sde_connector_state(connector->state); + c_conn->qsync_updated = false; while ((idx = msm_property_pop_dirty(&c_conn->property_info, &c_state->property_state)) >= 0) { @@ -548,6 +574,11 @@ static int _sde_connector_update_dirty_properties( case CONNECTOR_PROP_AD_BL_SCALE: _sde_connector_update_bl_scale(c_conn); break; + case CONNECTOR_PROP_QSYNC_MODE: + c_conn->qsync_updated = true; + c_conn->qsync_mode = sde_connector_get_property( + connector->state, CONNECTOR_PROP_QSYNC_MODE); + break; default: /* nothing to do for most properties */ break; @@ -593,6 +624,13 @@ int sde_connector_pre_kickoff(struct drm_connector *connector) params.rois = &c_state->rois; params.hdr_meta = &c_state->hdr_meta; + params.qsync_update = false; + + if (c_conn->qsync_updated) { + params.qsync_mode = c_conn->qsync_mode; + params.qsync_update = true; + SDE_EVT32(connector->base.id, params.qsync_mode); + } SDE_EVT32_VERBOSE(connector->base.id); @@ -1499,10 +1537,14 @@ static int sde_connector_init_debugfs(struct drm_connector *connector) sde_connector_get_info(connector, &info); if (sde_connector->ops.check_status && - (info.capabilities & MSM_DISPLAY_ESD_ENABLED)) + (info.capabilities & MSM_DISPLAY_ESD_ENABLED)) { debugfs_create_u32("force_panel_dead", 0600, connector->debugfs_entry, &sde_connector->force_panel_dead); + debugfs_create_u32("esd_status_interval", 0600, + connector->debugfs_entry, + &sde_connector->esd_status_interval); + } if (!debugfs_create_bool("fb_kmap", 0600, connector->debugfs_entry, &sde_connector->fb_kmap)) { @@ -1715,10 +1757,16 @@ static void sde_connector_check_status_work(struct work_struct *work) } if (rc > 0) { + u32 interval; + SDE_DEBUG("esd check status success conn_id: %d enc_id: %d\n", conn->base.base.id, conn->encoder->base.id); + + /* If debugfs property is not set then take default value */ + interval = conn->esd_status_interval ? + conn->esd_status_interval : STATUS_CHECK_INTERVAL_MS; schedule_delayed_work(&conn->status_work, - msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS)); + msecs_to_jiffies(interval)); return; } @@ -2098,6 +2146,16 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, 0x0, 0, AUTOREFRESH_MAX_FRAME_CNT, 0, CONNECTOR_PROP_AUTOREFRESH); + if (connector_type == DRM_MODE_CONNECTOR_DSI) { + if (sde_kms->catalog->has_qsync && display_info.qsync_min_fps) { + + msm_property_install_enum(&c_conn->property_info, + "qsync_mode", 0, 0, e_qsync_mode, + ARRAY_SIZE(e_qsync_mode), + CONNECTOR_PROP_QSYNC_MODE); + } + } + msm_property_install_range(&c_conn->property_info, "bl_scale", 0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL, CONNECTOR_PROP_BL_SCALE); @@ -2162,6 +2220,23 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, return ERR_PTR(rc); } +static int _sde_conn_hw_recovery_handler( + struct drm_connector *connector, bool val) +{ + struct sde_connector *c_conn; + + if (!connector) { + SDE_ERROR("invalid connector\n"); + return -EINVAL; + } + c_conn = to_sde_connector(connector); + + if (c_conn->encoder) + sde_encoder_recovery_events_handler(c_conn->encoder, val); + + return 0; +} + int sde_connector_register_custom_event(struct sde_kms *kms, struct drm_connector *conn_drm, u32 event, bool val) { @@ -2174,8 +2249,46 @@ int sde_connector_register_custom_event(struct sde_kms *kms, case DRM_EVENT_PANEL_DEAD: ret = 0; break; + case DRM_EVENT_SDE_HW_RECOVERY: + ret = _sde_conn_hw_recovery_handler(conn_drm, val); + break; default: break; } return ret; } + +int sde_connector_event_notify(struct drm_connector *connector, uint32_t type, + uint32_t len, uint32_t val) +{ + struct drm_event event; + int ret; + + if (!connector) { + SDE_ERROR("invalid connector\n"); + return -EINVAL; + } + + switch (type) { + case DRM_EVENT_SYS_BACKLIGHT: + case DRM_EVENT_PANEL_DEAD: + case DRM_EVENT_SDE_HW_RECOVERY: + ret = 0; + break; + default: + SDE_ERROR("connector %d, Unsupported event %d\n", + connector->base.id, type); + return -EINVAL; + } + + event.type = type; + event.length = len; + msm_mode_object_event_notify(&connector->base, connector->dev, &event, + (u8 *)&val); + + SDE_EVT32(connector->base.id, type, len, val); + SDE_DEBUG("connector:%d hw recovery event(%d) value (%d) notified\n", + connector->base.id, type, val); + + return ret; +} diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index ba6d1740c56de6abd312dfa907ba287b7e4778b0..220b0cab53add79b4611ae4e50bc8318d120cc35 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -292,6 +292,13 @@ struct sde_connector_ops { * Returns: Zero on success, negative error code for failures */ void (*pre_destroy)(struct drm_connector *connector, void *display); + + /** + * cont_splash_config - initialize splash resources + * @display: Pointer to private display handle + * Returns: zero for success, negetive for failure + */ + int (*cont_splash_config)(void *display); }; /** @@ -350,9 +357,12 @@ struct sde_connector_evt { * @bl_device: backlight device node * @status_work: work object to perform status checks * @force_panel_dead: variable to trigger forced ESD recovery + * @esd_status_interval: variable to change ESD check interval in millisec * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed * @bl_scale: BL scale value for ABA feature * @bl_scale_ad: BL scale value for AD feature + * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode + * @qsync_updated: Qsync settings were updated * last_cmd_tx_sts: status of the last command transfer */ struct sde_connector { @@ -392,11 +402,17 @@ struct sde_connector { struct backlight_device *bl_device; struct delayed_work status_work; u32 force_panel_dead; + u32 esd_status_interval; + + bool esd_status_check; bool bl_scale_dirty; u32 bl_scale; u32 bl_scale_ad; + u32 qsync_mode; + bool qsync_updated; + bool last_cmd_tx_sts; }; @@ -431,6 +447,14 @@ struct sde_connector { #define sde_connector_get_encoder(C) \ ((C) ? to_sde_connector((C))->encoder : NULL) +/** + * sde_connector_qsync_updated - indicates if connector updated qsync + * @C: Pointer to drm connector structure + * Returns: True if qsync is updated; false otherwise + */ +#define sde_connector_qsync_updated(C) \ + ((C) ? to_sde_connector((C))->qsync_updated : 0) + /** * sde_connector_get_propinfo - get sde connector's property info pointer * @C: Pointer to drm connector structure @@ -793,4 +817,14 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector); */ void sde_connector_destroy(struct drm_connector *connector); +/** + * sde_connector_event_notify - signal hw recovery event to client + * @connector: pointer to connector + * @type: event type + * @len: length of the value of the event + * @val: value + */ +int sde_connector_event_notify(struct drm_connector *connector, uint32_t type, + uint32_t len, uint32_t val); + #endif /* _SDE_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index a5e293b5d7dd6dd45cebd456f331d732d5cb91d1..310d6498df61eea1412ac7aead38f9304dc23592 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "sde_kms.h" #include "sde_hw_lm.h" @@ -2300,7 +2301,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; unsigned long flags; - bool frame_done = false; if (!work) { SDE_ERROR("invalid work handle\n"); @@ -2353,10 +2353,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE3); } - - if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE - | SDE_ENCODER_FRAME_EVENT_ERROR)) - frame_done = true; } if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) { @@ -2373,9 +2369,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) SDE_ERROR("crtc%d ts:%lld received panel dead event\n", crtc->base.id, ktime_to_ns(fevent->ts)); - if (frame_done) - complete_all(&sde_crtc->frame_done_comp); - spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_add_tail(&fevent->list, &sde_crtc->frame_event_list); spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); @@ -3293,10 +3286,10 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc, &cstate->property_state); } -static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc) +static int _sde_crtc_flush_event_thread(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc; - int ret, rc = 0, i; + int i; if (!crtc) { SDE_ERROR("invalid argument\n"); @@ -3320,17 +3313,9 @@ static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc) kthread_flush_work(&sde_crtc->frame_events[i].work); } - ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp, - msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT)); - if (!ret) { - SDE_ERROR("frame done completion wait timed out, ret:%d\n", - ret); - SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL); - rc = -ETIMEDOUT; - } SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT); - return rc; + return 0; } static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc, @@ -3450,11 +3435,12 @@ static void _sde_crtc_remove_pipe_flush(struct drm_crtc *crtc) * _sde_crtc_reset_hw - attempt hardware reset on errors * @crtc: Pointer to DRM crtc instance * @old_state: Pointer to crtc state for previous commit - * @dump_status: Whether or not to dump debug status before reset + * @recovery_events: Whether or not recovery events are enabled * Returns: Zero if current commit should still be attempted */ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, - struct drm_crtc_state *old_state, bool dump_status) + struct drm_crtc_state *old_state, + bool recovery_events) { struct drm_plane *plane_halt[MAX_PLANES]; struct drm_plane *plane; @@ -3474,10 +3460,7 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode; SDE_EVT32(DRMID(crtc), old_rot_op_mode, - dump_status, SDE_EVTLOG_FUNC_ENTRY); - - if (dump_status) - SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus"); + recovery_events, SDE_EVTLOG_FUNC_ENTRY); /* optionally generate a panic instead of performing a h/w reset */ SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic"); @@ -3509,7 +3492,7 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, */ if (i == sde_crtc->num_ctls && old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE) - return false; + return 0; SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc)); @@ -3586,7 +3569,8 @@ static int _sde_crtc_reset_hw(struct drm_crtc *crtc, sde_encoder_kickoff(encoder, false); } - return -EAGAIN; + /* panic the device if VBIF is not in good state */ + return !recovery_events ? 0 : -EAGAIN; } /** @@ -3653,8 +3637,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, struct msm_drm_private *priv; struct sde_kms *sde_kms; struct sde_crtc_state *cstate; - bool is_error, reset_req; - int ret; + bool is_error, reset_req, recovery_events; if (!crtc) { SDE_ERROR("invalid argument\n"); @@ -3700,6 +3683,9 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, crtc->state); if (sde_encoder_prepare_for_kickoff(encoder, ¶ms)) reset_req = true; + + recovery_events = + sde_encoder_recovery_events_enabled(encoder); } /* @@ -3707,26 +3693,13 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, * preparing for the kickoff */ if (reset_req) { - if (_sde_crtc_reset_hw(crtc, old_state, - !sde_crtc->reset_request)) + if (_sde_crtc_reset_hw(crtc, old_state, recovery_events)) is_error = true; } - sde_crtc->reset_request = reset_req; - /* wait for frame_event_done completion */ - SDE_ATRACE_BEGIN("wait_for_frame_done_event"); - ret = _sde_crtc_wait_for_frame_done(crtc); - SDE_ATRACE_END("wait_for_frame_done_event"); - if (ret) { - SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", - crtc->base.id, - atomic_read(&sde_crtc->frame_pending)); - - is_error = true; - - /* force offline rotation mode since the commit has no pipes */ - cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE; - } + SDE_ATRACE_BEGIN("flush_event_thread"); + _sde_crtc_flush_event_thread(crtc); + SDE_ATRACE_END("flush_event_thread"); if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ @@ -3765,7 +3738,6 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, sde_encoder_kickoff(encoder, false); } - reinit_completion(&sde_crtc->frame_done_comp); SDE_ATRACE_END("crtc_commit"); } @@ -4160,15 +4132,17 @@ static void sde_crtc_disable(struct drm_crtc *crtc) msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&power_on); + /* disable mdp LUT memory retention */ + ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", + CLKFLAG_NORETAIN_MEM); + if (ret) + SDE_ERROR("failed to disable LUT memory retention %d\n", ret); + /* destination scaler if enabled should be reconfigured on resume */ if (cstate->num_ds_enabled) sde_crtc->ds_reconfig = true; - /* wait for frame_event_done completion */ - if (_sde_crtc_wait_for_frame_done(crtc)) - SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", - crtc->base.id, - atomic_read(&sde_crtc->frame_pending)); + _sde_crtc_flush_event_thread(crtc); SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend, sde_crtc->vblank_requested, @@ -4183,6 +4157,8 @@ static void sde_crtc_disable(struct drm_crtc *crtc) sde_crtc->enabled = false; if (atomic_read(&sde_crtc->frame_pending)) { + SDE_ERROR("crtc%d frame_pending%d\n", crtc->base.id, + atomic_read(&sde_crtc->frame_pending)); SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending), SDE_EVTLOG_FUNC_CASE2); sde_core_perf_crtc_release_bw(crtc); @@ -4308,6 +4284,12 @@ static void sde_crtc_enable(struct drm_crtc *crtc, msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&power_on); + /* enable mdp LUT memory retention */ + ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk", + CLKFLAG_RETAIN_MEM); + if (ret) + SDE_ERROR("failed to enable LUT memory retention %d\n", ret); + mutex_unlock(&sde_crtc->crtc_lock); spin_lock_irqsave(&sde_crtc->spin_lock, flags); @@ -4929,7 +4911,7 @@ void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); - SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file); + SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file); _sde_crtc_complete_flip(crtc, file); } @@ -5412,7 +5394,6 @@ int sde_crtc_helper_reset_custom_properties(struct drm_crtc *crtc, void sde_crtc_misr_setup(struct drm_crtc *crtc, bool enable, u32 frame_count) { - struct sde_kms *sde_kms; struct sde_crtc *sde_crtc; struct sde_crtc_mixer *m; int i; @@ -5423,20 +5404,6 @@ void sde_crtc_misr_setup(struct drm_crtc *crtc, bool enable, u32 frame_count) } sde_crtc = to_sde_crtc(crtc); - sde_kms = _sde_crtc_get_kms(crtc); - if (!sde_kms) { - SDE_ERROR("invalid sde_kms\n"); - return; - } - - mutex_lock(&sde_crtc->crtc_lock); - if (sde_kms_is_secure_session_inprogress(sde_kms)) { - SDE_DEBUG("crtc:%d misr enable/disable not allowed\n", - DRMID(crtc)); - mutex_unlock(&sde_crtc->crtc_lock); - return; - } - sde_crtc->misr_enable = enable; sde_crtc->misr_frame_count = frame_count; for (i = 0; i < sde_crtc->num_mixers; ++i) { @@ -5447,7 +5414,6 @@ void sde_crtc_misr_setup(struct drm_crtc *crtc, bool enable, u32 frame_count) m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count); } - mutex_unlock(&sde_crtc->crtc_lock); } #ifdef CONFIG_DEBUG_FS @@ -5604,6 +5570,7 @@ static ssize_t _sde_crtc_misr_setup(struct file *file, char buf[MISR_BUFF_SIZE + 1]; u32 frame_count, enable; size_t buff_copy; + struct sde_kms *sde_kms; if (!file || !file->private_data) return -EINVAL; @@ -5611,6 +5578,12 @@ static ssize_t _sde_crtc_misr_setup(struct file *file, sde_crtc = file->private_data; crtc = &sde_crtc->base; + sde_kms = _sde_crtc_get_kms(crtc); + if (!sde_kms) { + SDE_ERROR("invalid sde_kms\n"); + return -EINVAL; + } + buff_copy = min_t(size_t, count, MISR_BUFF_SIZE); if (copy_from_user(buf, user_buf, buff_copy)) { SDE_ERROR("buffer copy failed\n"); @@ -5626,7 +5599,16 @@ static ssize_t _sde_crtc_misr_setup(struct file *file, if (rc) return rc; + mutex_lock(&sde_crtc->crtc_lock); + if (sde_kms_is_secure_session_inprogress(sde_kms)) { + SDE_DEBUG("crtc:%d misr enable/disable not allowed\n", + DRMID(crtc)); + goto end; + } sde_crtc_misr_setup(crtc, enable, frame_count); + +end: + mutex_unlock(&sde_crtc->crtc_lock); _sde_crtc_power_enable(sde_crtc, false); return count; @@ -6003,7 +5985,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) mutex_init(&sde_crtc->rp_lock); INIT_LIST_HEAD(&sde_crtc->rp_head); - init_completion(&sde_crtc->frame_done_comp); sde_crtc->enabled = false; INIT_LIST_HEAD(&sde_crtc->frame_event_list); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index b9984e1d4de3c1b1b448f27f68b750e05810eb7b..37e197e1e37a296210097544bb741e7241bbbf88 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -143,8 +143,6 @@ struct sde_crtc_event { * @enabled : whether the SDE CRTC is currently enabled. updated in the * commit-thread, not state-swap time which is earlier, so * safe to make decisions on during VBLANK on/off work - * @reset_request : whether or not a h/w request was requested for the previous - * frame * @ds_reconfig : force reconfiguration of the destination scaler block * @feature_list : list of color processing features supported on a crtc * @active_list : list of color processing features are active @@ -158,7 +156,6 @@ struct sde_crtc_event { * @spin_lock : spin lock for frame event, transaction status, etc... * @retire_events : static allocation of retire fence connector * @retire_event_list : available retire fence connector list - * @frame_done_comp : for frame_event_done synchronization * @event_thread : Pointer to event handler thread * @event_worker : Event worker queue * @event_cache : Local cache of event worker structures @@ -209,7 +206,6 @@ struct sde_crtc { bool vblank_requested; bool suspend; bool enabled; - bool reset_request; bool ds_reconfig; struct list_head feature_list; @@ -228,7 +224,6 @@ struct sde_crtc { spinlock_t spin_lock; struct sde_crtc_retire_event retire_events[SDE_CRTC_FRAME_EVENT_SIZE]; struct list_head retire_event_list; - struct completion frame_done_comp; /* for handling internal event thread */ struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT]; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 239afa447f70bf0687beda5fc0b6b5f6f44ef1cc..f1499487adeee2a42f64c101e17fad1bff895e2f 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -79,6 +79,11 @@ /* Maximum number of VSYNC wait attempts for RSC state transition */ #define MAX_RSC_WAIT 5 +#define TOPOLOGY_DUALPIPE_MERGE_MODE(x) \ + (((x) == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE) || \ + ((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE) || \ + ((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC)) + /** * enum sde_enc_rc_events - events for resource control state machine * @SDE_ENC_RC_EVENT_KICKOFF: @@ -202,6 +207,7 @@ enum sde_enc_rc_states { * clks and resources after IDLE_TIMEOUT time. * @vsync_event_work: worker to handle vsync event for autorefresh * @input_event_work: worker to handle input device touch events + * @esd_trigger_work: worker to handle esd trigger events * @input_handler: handler for input device events * @topology: topology of the display * @vblank_enabled: boolean to track userspace vblank vote @@ -210,6 +216,7 @@ enum sde_enc_rc_states { * @cur_conn_roi: current connector roi * @prv_conn_roi: previous connector roi to optimize if unchanged * @crtc pointer to drm_crtc + * @recovery_events_enabled: status of hw recovery feature enable by client */ struct sde_encoder_virt { struct drm_encoder base; @@ -249,6 +256,7 @@ struct sde_encoder_virt { struct kthread_delayed_work delayed_off_work; struct kthread_work vsync_event_work; struct kthread_work input_event_work; + struct kthread_work esd_trigger_work; struct input_handler *input_handler; struct msm_display_topology topology; bool vblank_enabled; @@ -258,6 +266,8 @@ struct sde_encoder_virt { struct sde_rect cur_conn_roi; struct sde_rect prv_conn_roi; struct drm_crtc *crtc; + + bool recovery_events_enabled; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) @@ -1722,7 +1732,9 @@ static int _sde_encoder_update_rsc_client( struct drm_crtc *primary_crtc; int pipe = -1; int rc = 0; - int wait_refcount; + int wait_refcount, i; + struct sde_encoder_phys *phys; + u32 qsync_mode = 0; if (!drm_enc || !drm_enc->dev) { SDE_ERROR("invalid encoder arguments\n"); @@ -1751,14 +1763,28 @@ static int _sde_encoder_update_rsc_client( } /** - * only primary command mode panel can request CMD state. + * only primary command mode panel without Qsync can request CMD state. * all other panels/displays can request for VID state including * secondary command mode panel. */ + for (i = 0; i < sde_enc->num_phys_encs; i++) { + phys = sde_enc->phys_encs[i]; + + if (phys) { + qsync_mode = sde_connector_get_property( + phys->connector->state, + CONNECTOR_PROP_QSYNC_MODE); + break; + } + } + rsc_state = enable ? (((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) && - disp_info->is_primary) ? SDE_RSC_CMD_STATE : - SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE; + disp_info->is_primary && !qsync_mode) ? + SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : + SDE_RSC_IDLE_STATE; + + SDE_EVT32(rsc_state, qsync_mode); prefill_lines = config ? mode_info.prefill_lines + config->inline_rotate_prefill : mode_info.prefill_lines; @@ -2467,8 +2493,20 @@ static int sde_encoder_resource_control(struct drm_encoder *drm_enc, IDLE_POWERCOLLAPSE_DURATION)); } else if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) { /* enable all the clks and resources */ + ret = _sde_encoder_resource_control_helper(drm_enc, + true); + if (ret) { + SDE_ERROR_ENC(sde_enc, + "sw_event:%d, rc in state %d\n", + sw_event, sde_enc->rc_state); + SDE_EVT32(DRMID(drm_enc), sw_event, + sde_enc->rc_state, + SDE_EVTLOG_ERROR); + mutex_unlock(&sde_enc->rc_lock); + return ret; + } + _sde_encoder_resource_control_rsc_update(drm_enc, true); - _sde_encoder_resource_control_helper(drm_enc, true); kthread_mod_delayed_work(&disp_thread->worker, &sde_enc->delayed_off_work, @@ -3093,6 +3131,27 @@ static void sde_encoder_frame_done_callback( } } +static void sde_encoder_get_qsync_fps_callback( + struct drm_encoder *drm_enc, + u32 *qsync_fps) +{ + struct msm_display_info *disp_info; + struct sde_encoder_virt *sde_enc; + + if (!qsync_fps) + return; + + *qsync_fps = 0; + if (!drm_enc) { + SDE_ERROR("invalid drm encoder\n"); + return; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + disp_info = &sde_enc->disp_info; + *qsync_fps = disp_info->qsync_min_fps; +} + int sde_encoder_idle_request(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; @@ -3424,9 +3483,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) &pending_flush); } - _sde_encoder_trigger_start(sde_enc->cur_master); - - /* update pending_kickoff_cnt AFTER next frame is queued in HW */ + /* update pending_kickoff_cnt AFTER flush but before trigger start */ for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -3446,6 +3503,8 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) SDE_EVTLOG_FUNC_CASE2); } } + + _sde_encoder_trigger_start(sde_enc->cur_master); } static void _sde_encoder_ppsplit_swap_intf_for_right_only_update( @@ -3628,22 +3687,55 @@ void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys) { void *dither_cfg; - int ret = 0; + int ret = 0, rc, i = 0; size_t len = 0; enum sde_rm_topology_name topology; + struct drm_encoder *drm_enc; + struct msm_mode_info mode_info; + struct msm_display_dsc_info *dsc = NULL; + struct sde_encoder_virt *sde_enc; + struct sde_hw_pingpong *hw_pp; if (!phys || !phys->connector || !phys->hw_pp || - !phys->hw_pp->ops.setup_dither) + !phys->hw_pp->ops.setup_dither || !phys->parent) return; + topology = sde_connector_get_topology_name(phys->connector); if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) && (phys->split_role == ENC_ROLE_SLAVE)) return; + drm_enc = phys->parent; + sde_enc = to_sde_encoder_virt(drm_enc); + rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info); + if (rc) { + SDE_ERROR_ENC(sde_enc, "failed to get mode info\n"); + return; + } + + dsc = &mode_info.comp_info.dsc_info; + /* disable dither for 10 bpp or 10bpc dsc config */ + if (dsc->bpp == 10 || dsc->bpc == 10) { + phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0); + return; + } + ret = sde_connector_get_dither_cfg(phys->connector, - phys->connector->state, &dither_cfg, &len); - if (!ret) + phys->connector->state, &dither_cfg, &len); + if (ret) + return; + + if (TOPOLOGY_DUALPIPE_MERGE_MODE(topology)) { + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + hw_pp = sde_enc->hw_pp[i]; + if (hw_pp) { + phys->hw_pp->ops.setup_dither(hw_pp, dither_cfg, + len); + } + } + } else { phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len); + } } static u32 _sde_encoder_calculate_linetime(struct sde_encoder_virt *sde_enc, @@ -3770,6 +3862,20 @@ static void sde_encoder_vsync_event_handler(unsigned long data) &sde_enc->vsync_event_work); } +static void sde_encoder_esd_trigger_work_handler(struct kthread_work *work) +{ + struct sde_encoder_virt *sde_enc = container_of(work, + struct sde_encoder_virt, esd_trigger_work); + + if (!sde_enc) { + SDE_ERROR("invalid sde encoder\n"); + return; + } + + sde_encoder_resource_control(&sde_enc->base, + SDE_ENC_RC_EVENT_KICKOFF); +} + static void sde_encoder_input_event_work_handler(struct kthread_work *work) { struct sde_encoder_virt *sde_enc = container_of(work, @@ -3966,6 +4072,46 @@ int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc) return -ETIMEDOUT; } +static int _helper_flush_mixer(struct sde_encoder_phys *phys_enc) +{ + struct drm_encoder *drm_enc; + struct sde_hw_mixer_cfg mixer; + struct sde_rm_hw_iter lm_iter; + bool lm_valid = false; + + if (!phys_enc || !phys_enc->parent) { + SDE_ERROR("invalid encoder\n"); + return -EINVAL; + } + + drm_enc = phys_enc->parent; + memset(&mixer, 0, sizeof(mixer)); + + sde_rm_init_hw_iter(&lm_iter, drm_enc->base.id, SDE_HW_BLK_LM); + while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &lm_iter)) { + struct sde_hw_mixer *hw_lm = (struct sde_hw_mixer *)lm_iter.hw; + + if (!hw_lm) + continue; + + /* update LM flush */ + if (phys_enc->hw_ctl->ops.update_bitmask_mixer) + phys_enc->hw_ctl->ops.update_bitmask_mixer( + phys_enc->hw_ctl, + hw_lm->idx, 1); + + lm_valid = true; + } + + if (!lm_valid) { + SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc), + "lm not found to flush\n"); + return -EFAULT; + } + + return 0; +} + int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, struct sde_encoder_kickoff_params *params) { @@ -3977,6 +4123,7 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, uint32_t ln_cnt1, ln_cnt2; unsigned int i; int rc, ret = 0; + struct msm_display_info *disp_info; if (!drm_enc || !params || !drm_enc->dev || !drm_enc->dev->dev_private) { @@ -3986,6 +4133,7 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, sde_enc = to_sde_encoder_virt(drm_enc); priv = drm_enc->dev->dev_private; sde_kms = to_sde_kms(priv->kms); + disp_info = &sde_enc->disp_info; SDE_DEBUG_ENC(sde_enc, "\n"); SDE_EVT32(DRMID(drm_enc)); @@ -4012,6 +4160,12 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET) needs_hw_reset = true; _sde_encoder_setup_dither(phys); + + /* flush the mixer if qsync is enabled */ + if (sde_enc->cur_master && sde_connector_qsync_updated( + sde_enc->cur_master->connector)) { + _helper_flush_mixer(phys); + } } } SDE_ATRACE_END("enc_prepare_for_kickoff"); @@ -4567,6 +4721,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, sde_encoder_vblank_callback, sde_encoder_underrun_callback, sde_encoder_frame_done_callback, + sde_encoder_get_qsync_fps_callback, }; struct sde_enc_phys_init_params phys_params; @@ -4763,6 +4918,9 @@ struct drm_encoder *sde_encoder_init( kthread_init_work(&sde_enc->input_event_work, sde_encoder_input_event_work_handler); + kthread_init_work(&sde_enc->esd_trigger_work, + sde_encoder_esd_trigger_work_handler); + memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info)); SDE_DEBUG_ENC(sde_enc, "created\n"); @@ -5050,6 +5208,33 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder) int sde_encoder_display_failure_notification(struct drm_encoder *enc) { + struct msm_drm_thread *disp_thread = NULL; + struct msm_drm_private *priv = NULL; + struct sde_encoder_virt *sde_enc = NULL; + + if (!enc || !enc->dev || !enc->dev->dev_private) { + SDE_ERROR("invalid parameters\n"); + return -EINVAL; + } + + priv = enc->dev->dev_private; + sde_enc = to_sde_encoder_virt(enc); + if (!sde_enc->crtc || (sde_enc->crtc->index + >= ARRAY_SIZE(priv->disp_thread))) { + SDE_DEBUG_ENC(sde_enc, + "invalid cached CRTC: %d or crtc index: %d\n", + sde_enc->crtc == NULL, + sde_enc->crtc ? sde_enc->crtc->index : -EINVAL); + return -EINVAL; + } + + SDE_EVT32_VERBOSE(DRMID(enc)); + + disp_thread = &priv->disp_thread[sde_enc->crtc->index]; + + kthread_queue_work(&disp_thread->worker, + &sde_enc->esd_trigger_work); + kthread_flush_work(&sde_enc->esd_trigger_work); /** * panel may stop generating te signal (vsync) during esd failure. rsc * hardware may hang without vsync. Avoid rsc hang by generating the @@ -5061,3 +5246,31 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc) return 0; } + +bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder) +{ + struct sde_encoder_virt *sde_enc; + + if (!encoder) { + SDE_ERROR("invalid drm enc\n"); + return false; + } + + sde_enc = to_sde_encoder_virt(encoder); + + return sde_enc->recovery_events_enabled; +} + +void sde_encoder_recovery_events_handler(struct drm_encoder *encoder, + bool enabled) +{ + struct sde_encoder_virt *sde_enc; + + if (!encoder) { + SDE_ERROR("invalid drm enc\n"); + return; + } + + sde_enc = to_sde_encoder_virt(encoder); + sde_enc->recovery_events_enabled = enabled; +} diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index 46967367ce78331fb88a50b63950b7bb298b5e1f..881520fc536bf8d0d7284791f8b2adc56c048600 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -57,11 +57,13 @@ struct sde_encoder_hw_resources { * @is_primary: set to true if the display is primary display * @affected_displays: bitmask, bit set means the ROI of the commit lies within * the bounds of the physical display at the bit index + * @recovery_events_enabled: indicates status of client for recoovery events */ struct sde_encoder_kickoff_params { u32 inline_rotate_prefill; u32 is_primary; unsigned long affected_displays; + bool recovery_events_enabled; }; /** @@ -241,10 +243,31 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder); * sde_encoder_display_failure_notification - update sde encoder state for * esd timeout or other display failure notification. This event flows from * dsi, sde_connector to sde_encoder. + * + * This api must not be called from crtc_commit (display) thread because it + * requests the flush work on same thread. It is called from esd check thread + * based on current design. + * * TODO: manage the event at sde_kms level for forward processing. * @drm_enc: Pointer to drm encoder structure * @Return: true if successful in updating the encoder structure */ int sde_encoder_display_failure_notification(struct drm_encoder *enc); +/** + * sde_encoder_recovery_events_enabled - checks if client has enabled + * sw recovery mechanism for this connector + * @drm_enc: Pointer to drm encoder structure + * @Return: true if enabled + */ +bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder); + +/** + * sde_encoder_recovery_events_handler - handler to enable/disable the + * sw recovery for this connector + * @drm_enc: Pointer to drm encoder structure + */ +void sde_encoder_recovery_events_handler(struct drm_encoder *encoder, + bool val); + #endif /* __SDE_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 488a36fe7899b20b461f5a21cf4e5a24bd86ee8b..5c403ef6bef2941a36f3287eb36e7d13f54557a2 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -80,6 +80,7 @@ struct sde_encoder_phys; * Note: This is called from IRQ handler context. * @handle_frame_done: Notify virtual encoder that this phys encoder * completes last request frame. + * @get_qsync_fps: Returns the min fps for the qsync feature. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *, @@ -88,6 +89,8 @@ struct sde_encoder_virt_ops { struct sde_encoder_phys *phys); void (*handle_frame_done)(struct drm_encoder *, struct sde_encoder_phys *phys, u32 event); + void (*get_qsync_fps)(struct drm_encoder *, + u32 *qsync_fps); }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 5c4d9f25c935dbbb32842e0621f469b99baf3440..128b9d7a71dde4c4fee8750b0fb9b95011ae0e69 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -32,7 +32,7 @@ #define to_sde_encoder_phys_cmd(x) \ container_of(x, struct sde_encoder_phys_cmd, base) -#define PP_TIMEOUT_MAX_TRIALS 2 +#define PP_TIMEOUT_MAX_TRIALS 4 /* * Tearcheck sync start and continue thresholds are empirically found @@ -490,16 +490,20 @@ static void sde_encoder_phys_cmd_mode_set( } static int _sde_encoder_phys_cmd_handle_ppdone_timeout( - struct sde_encoder_phys *phys_enc) + struct sde_encoder_phys *phys_enc, + bool recovery_events) { struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); u32 frame_event = SDE_ENCODER_FRAME_EVENT_ERROR | SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE; + struct drm_connector *conn; + int event; if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl) return -EINVAL; + conn = phys_enc->connector; cmd_enc->pp_timeout_report_cnt++; if (sde_encoder_phys_cmd_is_master(phys_enc)) { @@ -518,21 +522,31 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout( atomic_read(&phys_enc->pending_kickoff_cnt), frame_event); - if (cmd_enc->pp_timeout_report_cnt >= PP_TIMEOUT_MAX_TRIALS) { - cmd_enc->pp_timeout_report_cnt = PP_TIMEOUT_MAX_TRIALS; - frame_event |= SDE_ENCODER_FRAME_EVENT_PANEL_DEAD; - - SDE_DBG_DUMP("panic"); - } else if (cmd_enc->pp_timeout_report_cnt == 1) { - /* to avoid flooding, only log first time, and "dead" time */ + /* to avoid flooding, only log first time, and "dead" time */ + if (cmd_enc->pp_timeout_report_cnt == 1) { SDE_ERROR_CMDENC(cmd_enc, - "pp:%d kickoff timed out ctl %d cnt %d koff_cnt %d\n", + "pp:%d kickoff timed out ctl %d koff_cnt %d\n", phys_enc->hw_pp->idx - PINGPONG_0, phys_enc->hw_ctl->idx - CTL_0, - cmd_enc->pp_timeout_report_cnt, atomic_read(&phys_enc->pending_kickoff_cnt)); SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL); + sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR); + SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus"); + sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR); + } + + /* + * if the recovery event is registered by user, don't panic + * trigger panic on first timeout if no listener registered + */ + if (recovery_events) { + event = cmd_enc->pp_timeout_report_cnt > PP_TIMEOUT_MAX_TRIALS ? + SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE; + sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY, + sizeof(uint8_t), event); + } else if (cmd_enc->pp_timeout_report_cnt) { + SDE_DBG_DUMP("panic"); } atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); @@ -677,6 +691,7 @@ static int _sde_encoder_phys_cmd_wait_for_idle( struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); struct sde_encoder_wait_info wait_info; + bool recovery_events; int ret; if (!phys_enc) { @@ -687,6 +702,8 @@ static int _sde_encoder_phys_cmd_wait_for_idle( wait_info.wq = &phys_enc->pending_kickoff_wq; wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + recovery_events = sde_encoder_recovery_events_enabled( + phys_enc->parent); /* slave encoder doesn't enable for ppsplit */ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc)) @@ -694,10 +711,20 @@ static int _sde_encoder_phys_cmd_wait_for_idle( ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG, &wait_info); - if (ret == -ETIMEDOUT) - _sde_encoder_phys_cmd_handle_ppdone_timeout(phys_enc); - else if (!ret) + if (ret == -ETIMEDOUT) { + _sde_encoder_phys_cmd_handle_ppdone_timeout(phys_enc, + recovery_events); + } else if (!ret) { + if (cmd_enc->pp_timeout_report_cnt && recovery_events) { + struct drm_connector *conn = phys_enc->connector; + + sde_connector_event_notify(conn, + DRM_EVENT_SDE_HW_RECOVERY, + sizeof(uint8_t), + SDE_RECOVERY_SUCCESS); + } cmd_enc->pp_timeout_report_cnt = 0; + } return ret; } @@ -835,6 +862,76 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc, } } +static int _get_tearcheck_threshold(struct sde_encoder_phys *phys_enc) +{ + struct drm_connector *conn = phys_enc->connector; + u32 qsync_mode; + struct drm_display_mode *mode; + u32 threshold_lines = 0; + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + + if (!conn || !conn->state) + return 0; + + mode = &phys_enc->cached_mode; + qsync_mode = sde_connector_get_property( + conn->state, CONNECTOR_PROP_QSYNC_MODE); + + if (mode && (qsync_mode == SDE_RM_QSYNC_CONTINUOUS_MODE)) { + u32 qsync_min_fps = 0; + u32 default_fps = mode->vrefresh; + u32 yres = mode->vdisplay; + u32 slow_time_ns; + u32 default_time_ns; + u32 extra_time_ns; + u32 total_extra_lines; + u32 default_line_time_ns; + + if (phys_enc->parent_ops.get_qsync_fps) + phys_enc->parent_ops.get_qsync_fps( + phys_enc->parent, &qsync_min_fps); + + if (!qsync_min_fps || !default_fps || !yres) { + SDE_ERROR_CMDENC(cmd_enc, + "wrong qsync params %d %d %d\n", + qsync_min_fps, default_fps, yres); + goto exit; + } + + if (qsync_min_fps >= default_fps) { + SDE_ERROR_CMDENC(cmd_enc, + "qsync fps:%d must be less than default:%d\n", + qsync_min_fps, default_fps); + goto exit; + } + + /* Calculate the number of extra lines*/ + slow_time_ns = (1 * 1000000000) / qsync_min_fps; + default_time_ns = (1 * 1000000000) / default_fps; + extra_time_ns = slow_time_ns - default_time_ns; + default_line_time_ns = (1 * 1000000000) / (default_fps * yres); + + total_extra_lines = extra_time_ns / default_line_time_ns; + threshold_lines += total_extra_lines; + + SDE_DEBUG_CMDENC(cmd_enc, "slow:%d default:%d extra:%d(ns)\n", + slow_time_ns, default_time_ns, extra_time_ns); + SDE_DEBUG_CMDENC(cmd_enc, "extra_lines:%d threshold:%d\n", + total_extra_lines, threshold_lines); + SDE_DEBUG_CMDENC(cmd_enc, "min_fps:%d fps:%d yres:%d\n", + qsync_min_fps, default_fps, yres); + + SDE_EVT32(qsync_mode, qsync_min_fps, extra_time_ns, default_fps, + yres, threshold_lines); + } + +exit: + threshold_lines += DEFAULT_TEARCHECK_SYNC_THRESH_START; + + return threshold_lines; +} + static void sde_encoder_phys_cmd_tearcheck_config( struct sde_encoder_phys *phys_enc) { @@ -907,7 +1004,7 @@ static void sde_encoder_phys_cmd_tearcheck_config( */ tc_cfg.sync_cfg_height = 0xFFF0; tc_cfg.vsync_init_val = mode->vdisplay; - tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START; + tc_cfg.sync_threshold_start = _get_tearcheck_threshold(phys_enc); tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; tc_cfg.start_pos = mode->vdisplay; tc_cfg.rd_ptr_irq = mode->vdisplay + 1; @@ -1201,6 +1298,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( struct sde_encoder_phys *phys_enc, struct sde_encoder_kickoff_params *params) { + struct sde_hw_tear_check tc_cfg = {0}; struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); int ret; @@ -1228,6 +1326,20 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff( SDE_ERROR("failed wait_for_idle: %d\n", ret); } + if (sde_connector_qsync_updated(phys_enc->connector)) { + tc_cfg.sync_threshold_start = + _get_tearcheck_threshold(phys_enc); + if (phys_enc->has_intf_te && + phys_enc->hw_intf->ops.update_tearcheck) + phys_enc->hw_intf->ops.update_tearcheck( + phys_enc->hw_intf, &tc_cfg); + else if (phys_enc->hw_pp->ops.update_tearcheck) + phys_enc->hw_pp->ops.update_tearcheck( + phys_enc->hw_pp, &tc_cfg); + SDE_EVT32(DRMID(phys_enc->parent), + tc_cfg.sync_threshold_start); + } + SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n", phys_enc->hw_pp->idx - PINGPONG_0, atomic_read(&phys_enc->pending_kickoff_cnt)); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 8d6787fb90f469e0cdd2d0731e31cdf83f0572a0..ea0fb886776f4b255ece0ab8bc977db244cb0261 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -341,6 +341,54 @@ static bool sde_encoder_phys_vid_mode_fixup( return true; } +/* vid_enc timing_params must be configured before calling this function */ +static void _sde_encoder_phys_vid_setup_avr( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_vid *vid_enc; + struct drm_display_mode mode; + + vid_enc = to_sde_encoder_phys_vid(phys_enc); + mode = phys_enc->cached_mode; + if (vid_enc->base.hw_intf->ops.avr_setup) { + struct intf_avr_params avr_params = {0}; + u32 qsync_min_fps = 0; + u32 default_fps = mode.vrefresh; + int ret; + + if (phys_enc->parent_ops.get_qsync_fps) + phys_enc->parent_ops.get_qsync_fps( + phys_enc->parent, &qsync_min_fps); + + if (!qsync_min_fps || !default_fps) { + SDE_ERROR_VIDENC(vid_enc, + "wrong qsync params %d %d\n", + qsync_min_fps, default_fps); + return; + } + + if (qsync_min_fps >= default_fps) { + SDE_ERROR_VIDENC(vid_enc, + "qsync fps %d must be less than default %d\n", + qsync_min_fps, default_fps); + return; + } + + avr_params.default_fps = default_fps; + avr_params.min_fps = qsync_min_fps; + + ret = vid_enc->base.hw_intf->ops.avr_setup( + vid_enc->base.hw_intf, + &vid_enc->timing_params, &avr_params); + if (ret) + SDE_ERROR_VIDENC(vid_enc, + "bad settings, can't configure AVR\n"); + + SDE_EVT32(DRMID(phys_enc->parent), default_fps, + qsync_min_fps, ret); + } +} + static void sde_encoder_phys_vid_setup_timing_engine( struct sde_encoder_phys *phys_enc) { @@ -387,7 +435,7 @@ static void sde_encoder_phys_vid_setup_timing_engine( if (phys_enc->sde_kms->splash_data.cont_splash_en) { SDE_DEBUG_VIDENC(vid_enc, "skipping intf programming since cont splash is enabled\n"); - return; + goto exit; } fmt = sde_get_sde_format(fmt_fourcc); @@ -412,6 +460,9 @@ static void sde_encoder_phys_vid_setup_timing_engine( } spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); programmable_fetch_config(phys_enc, &timing_params); + +exit: + _sde_encoder_phys_vid_setup_avr(phys_enc); } static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) @@ -846,7 +897,11 @@ static int sde_encoder_phys_vid_prepare_for_kickoff( { struct sde_encoder_phys_vid *vid_enc; struct sde_hw_ctl *ctl; + bool recovery_events; + struct drm_connector *conn; + int event; int rc; + struct intf_avr_params avr_params; if (!phys_enc || !params || !phys_enc->hw_ctl) { SDE_ERROR("invalid encoder/parameters\n"); @@ -858,6 +913,9 @@ static int sde_encoder_phys_vid_prepare_for_kickoff( if (!ctl->ops.wait_reset_status) return 0; + conn = phys_enc->connector; + recovery_events = sde_encoder_recovery_events_enabled( + phys_enc->parent); /* * hw supports hardware initiated ctl reset, so before we kickoff a new * frame, need to check and wait for hw initiated ctl reset completion @@ -868,20 +926,59 @@ static int sde_encoder_phys_vid_prepare_for_kickoff( ctl->idx, rc); ++vid_enc->error_count; - if (vid_enc->error_count >= KICKOFF_MAX_ERRORS) { - vid_enc->error_count = KICKOFF_MAX_ERRORS; - SDE_DBG_DUMP("panic"); - } else if (vid_enc->error_count == 1) { + /* to avoid flooding, only log first time, and "dead" time */ + if (vid_enc->error_count == 1) { SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL); + + sde_encoder_helper_unregister_irq( + phys_enc, INTR_IDX_VSYNC); + SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus"); + sde_encoder_helper_register_irq( + phys_enc, INTR_IDX_VSYNC); + } + + /* + * if the recovery event is registered by user, don't panic + * trigger panic on first timeout if no listener registered + */ + if (recovery_events) { + event = vid_enc->error_count > KICKOFF_MAX_ERRORS ? + SDE_RECOVERY_HARD_RESET : SDE_RECOVERY_CAPTURE; + sde_connector_event_notify(conn, + DRM_EVENT_SDE_HW_RECOVERY, + sizeof(uint8_t), event); + } else { + SDE_DBG_DUMP("panic"); } /* request a ctl reset before the next flush */ phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET; } else { + if (recovery_events && vid_enc->error_count) + sde_connector_event_notify(conn, + DRM_EVENT_SDE_HW_RECOVERY, + sizeof(uint8_t), + SDE_RECOVERY_SUCCESS); vid_enc->error_count = 0; } + if (sde_connector_qsync_updated(phys_enc->connector)) { + avr_params.avr_mode = sde_connector_get_property( + phys_enc->connector->state, + CONNECTOR_PROP_QSYNC_MODE); + + if (vid_enc->base.hw_intf->ops.avr_ctrl) { + vid_enc->base.hw_intf->ops.avr_ctrl( + vid_enc->base.hw_intf, + &avr_params); + } + + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, + avr_params.avr_mode); + } + programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill, params->is_primary); @@ -959,6 +1056,20 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) } sde_encoder_phys_vid_control_vblank_irq(phys_enc, false); } + + if (phys_enc->hw_intf->ops.bind_pingpong_blk) + phys_enc->hw_intf->ops.bind_pingpong_blk(phys_enc->hw_intf, + false, phys_enc->hw_pp->idx - PINGPONG_0); + + if (phys_enc->hw_pp && phys_enc->hw_pp->ops.reset_3d_mode) + phys_enc->hw_pp->ops.reset_3d_mode(phys_enc->hw_pp); + + if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d && + phys_enc->hw_ctl->ops.reset_post_te_disable) + phys_enc->hw_ctl->ops.reset_post_te_disable( + phys_enc->hw_ctl, &phys_enc->intf_cfg_v1, + phys_enc->hw_pp->merge_3d->idx); + exit: phys_enc->enable_state = SDE_ENC_DISABLED; } @@ -968,6 +1079,7 @@ static void sde_encoder_phys_vid_handle_post_kickoff( { unsigned long lock_flags; struct sde_encoder_phys_vid *vid_enc; + u32 avr_mode; if (!phys_enc) { SDE_ERROR("invalid encoder\n"); @@ -989,6 +1101,17 @@ static void sde_encoder_phys_vid_handle_post_kickoff( spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); phys_enc->enable_state = SDE_ENC_ENABLED; } + + avr_mode = sde_connector_get_property( + phys_enc->connector->state, + CONNECTOR_PROP_QSYNC_MODE); + + if (avr_mode && vid_enc->base.hw_intf->ops.avr_trigger) { + vid_enc->base.hw_intf->ops.avr_trigger(vid_enc->base.hw_intf); + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, + SDE_EVTLOG_FUNC_CASE9); + } } static void sde_encoder_phys_vid_irq_control(struct sde_encoder_phys *phys_enc, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 78889591f583153753b3043051457fae46a131c7..99a48a9f0433851c370d563c4b622aa2383599bf 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -3564,6 +3564,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) sde_cfg->ts_prefill_rev = 2; } else if (IS_SM8150_TARGET(hw_rev)) { sde_cfg->has_wb_ubwc = true; + sde_cfg->has_qsync = true; sde_cfg->perf.min_prefill_lines = 24; sde_cfg->vbif_qos_nlvl = 8; sde_cfg->ts_prefill_rev = 2; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index dee834c05895ae82b415b5b6366c3d3fffa327de..eef9e2110e52bf42d3fe121310db01c73528524f 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -1045,6 +1045,7 @@ struct sde_perf_cfg { * @macrotile_mode UBWC parameter for macro tile channel distribution * @pipe_order_type indicate if it is required to specify pipe order * @delay_prg_fetch_start indicates if throttling the fetch start is required + * @has_qsync Supports qsync feature * @sui_misr_supported indicate if secure-ui-misr is supported * @sui_block_xin_mask mask of all the xin-clients to be blocked during * secure-ui when secure-ui-misr feature is supported @@ -1088,6 +1089,7 @@ struct sde_mdss_cfg { u32 macrotile_mode; u32 pipe_order_type; bool delay_prg_fetch_start; + bool has_qsync; bool sui_misr_supported; u32 sui_block_xin_mask; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c index 280363063af10f51d1f3aa2d0e673179db68e7b5..ef1dc414c18a5b2051c6b212ba448f7fbc865d98 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c @@ -680,7 +680,7 @@ void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg) void __iomem *base; if (!hw_cfg || (hw_cfg->len != sizeof(*pcc) && hw_cfg->payload)) { - DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\ + DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\ exp size %zd\n", hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL), ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc)); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 4ab6941697a3d71dfdf2ec9db8045555fb51aefd..12130822cfb932ec7dfff09dbdbe1e58db9baec3 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -912,6 +912,48 @@ static int sde_hw_ctl_intf_cfg_v1(struct sde_hw_ctl *ctx, return 0; } +static int sde_hw_ctl_reset_post_te_disable(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg_v1 *cfg, u32 merge_3d_idx) +{ + struct sde_hw_blk_reg_map *c; + u32 intf_active = 0; + u32 intf_flush = 0; + u32 merge_3d_active = 0; + u32 merge_3d_flush = 0; + u32 i; + + if (!ctx || !cfg) { + SDE_ERROR("invalid hw_ctl or hw_intf blk\n"); + return -EINVAL; + } + + c = &ctx->hw; + for (i = 0; i < cfg->intf_count; i++) { + if (cfg->intf[i]) { + intf_active &= ~BIT(cfg->intf[i] - INTF_0); + intf_flush |= BIT(cfg->intf[i] - INTF_0); + } + } + + /* disable and flush merge3d_blk */ + merge_3d_flush = BIT(merge_3d_idx - MERGE_3D_0); + merge_3d_active &= ~BIT(merge_3d_idx - MERGE_3D_0); + + sde_hw_ctl_clear_all_blendstages(ctx); + SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active); + SDE_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); + SDE_REG_WRITE(c, CTL_MERGE_3D_FLUSH, merge_3d_flush); + SDE_REG_WRITE(c, CTL_INTF_FLUSH, intf_flush); + + /* flush intf, ctl, layer mixer and merge_3d in reset sequence */ + SDE_REG_WRITE(c, CTL_FLUSH, 0x809207C0); + + /* do ctl start for flushing ctl path when timing engine is disabled */ + SDE_REG_WRITE(c, CTL_START, 0x1); + + return 0; +} + static int sde_hw_ctl_dsc_cfg(struct sde_hw_ctl *ctx, struct sde_ctl_dsc_cfg *cfg) { @@ -1048,6 +1090,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, sde_hw_ctl_update_bitmask_merge3d_v1; ops->update_bitmask_cwb = sde_hw_ctl_update_bitmask_cwb_v1; ops->get_ctl_intf = sde_hw_ctl_get_intf_v1; + ops->reset_post_te_disable = sde_hw_ctl_reset_post_te_disable; } else { ops->update_pending_flush = sde_hw_ctl_update_pending_flush; ops->trigger_flush = sde_hw_ctl_trigger_flush; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h index 08167a400953c6e9da2a6bdcf3197dfaa26c7616..a4b1dd745827988b2933860efaa90fd474b80745 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h @@ -240,6 +240,16 @@ struct sde_hw_ctl_ops { int (*setup_intf_cfg)(struct sde_hw_ctl *ctx, struct sde_hw_intf_cfg *cfg); + /** + * Reset ctl_path iterface config + * @ctx + * @cfg : interface config structure pointer + * @merge_3d_idx : index of merge3d blk + * @Return: error code + */ + int (*reset_post_te_disable)(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg_v1 *cfg, u32 merge_3d_idx); + /** * Setup ctl_path interface config for SDE_CTL_ACTIVE_CFG * @ctx : ctl path ctx pointer diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 8bb99ad2f079d535cd2088c99f1c56581114ce62..0eb0a7c930a03072ae31432f2d3f06581a894d70 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -62,14 +62,14 @@ #define INTF_PROG_FETCH_START 0x170 #define INTF_PROG_ROT_START 0x174 -#define INTF_FRAME_LINE_COUNT_EN 0x0A8 -#define INTF_FRAME_COUNT 0x0AC -#define INTF_LINE_COUNT 0x0B0 - #define INTF_MISR_CTRL 0x180 #define INTF_MISR_SIGNATURE 0x184 #define INTF_MUX 0x25C +#define INTF_AVR_CONTROL 0x270 +#define INTF_AVR_MODE 0x274 +#define INTF_AVR_TRIGGER 0x278 +#define INTF_AVR_VTOTAL 0x27C #define INTF_TEAR_MDP_VSYNC_SEL 0x280 #define INTF_TEAR_TEAR_CHECK_EN 0x284 #define INTF_TEAR_SYNC_CONFIG_VSYNC 0x288 @@ -86,6 +86,9 @@ #define INTF_TEAR_AUTOREFRESH_CONFIG 0x2B4 #define INTF_TEAR_TEAR_DETECT_CTRL 0x2B8 +#define AVR_CONTINUOUS_MODE 1 +#define AVR_ONE_SHOT_MODE 2 + static struct sde_intf_cfg *_intf_offset(enum sde_intf intf, struct sde_mdss_cfg *m, void __iomem *addr, @@ -108,6 +111,75 @@ static struct sde_intf_cfg *_intf_offset(enum sde_intf intf, return ERR_PTR(-EINVAL); } +static void sde_hw_intf_avr_trigger(struct sde_hw_intf *ctx) +{ + struct sde_hw_blk_reg_map *c; + + if (!ctx) + return; + + c = &ctx->hw; + SDE_REG_WRITE(c, INTF_AVR_TRIGGER, 0x1); + SDE_DEBUG("AVR Triggered\n"); +} + +static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx, + const struct intf_timing_params *params, + const struct intf_avr_params *avr_params) +{ + struct sde_hw_blk_reg_map *c; + u32 hsync_period, vsync_period; + u32 min_fps, default_fps, diff_fps; + u32 vsync_period_slow; + u32 avr_vtotal; + u32 add_porches; + + if (!ctx || !params || !avr_params) { + SDE_ERROR("invalid input parameter(s)\n"); + return -EINVAL; + } + + c = &ctx->hw; + min_fps = avr_params->min_fps; + default_fps = avr_params->default_fps; + diff_fps = default_fps - min_fps; + hsync_period = params->hsync_pulse_width + + params->h_back_porch + params->width + + params->h_front_porch; + vsync_period = params->vsync_pulse_width + + params->v_back_porch + params->height + + params->v_front_porch; + add_porches = mult_frac(vsync_period, diff_fps, min_fps); + vsync_period_slow = vsync_period + add_porches; + avr_vtotal = vsync_period_slow * hsync_period; + + SDE_REG_WRITE(c, INTF_AVR_VTOTAL, avr_vtotal); + + return 0; +} + +static void sde_hw_intf_avr_ctrl(struct sde_hw_intf *ctx, + const struct intf_avr_params *avr_params) +{ + struct sde_hw_blk_reg_map *c; + u32 avr_mode = 0; + u32 avr_ctrl = 0; + + if (!ctx || !avr_params) + return; + + c = &ctx->hw; + if (avr_params->avr_mode) { + avr_ctrl = BIT(0); + avr_mode = (avr_params->avr_mode == AVR_ONE_SHOT_MODE) ? + (BIT(1) | BIT(8)) : 0x0; + } + + SDE_REG_WRITE(c, INTF_AVR_CONTROL, avr_ctrl); + SDE_REG_WRITE(c, INTF_AVR_MODE, avr_mode); +} + + static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx, const struct intf_timing_params *p, const struct sde_format *fmt) @@ -435,6 +507,22 @@ static int sde_hw_intf_enable_te(struct sde_hw_intf *intf, bool enable) return 0; } +static void sde_hw_intf_update_te(struct sde_hw_intf *intf, + struct sde_hw_tear_check *te) +{ + struct sde_hw_blk_reg_map *c; + int cfg; + + if (!intf || !te) + return; + + c = &intf->hw; + cfg = SDE_REG_READ(c, INTF_TEAR_SYNC_THRESH); + cfg &= ~0xFFFF; + cfg |= te->sync_threshold_start; + SDE_REG_WRITE(c, INTF_TEAR_SYNC_THRESH, cfg); +} + static int sde_hw_intf_connect_external_te(struct sde_hw_intf *intf, bool enable_external_te) { @@ -504,6 +592,9 @@ static void _setup_intf_ops(struct sde_hw_intf_ops *ops, ops->setup_misr = sde_hw_intf_setup_misr; ops->collect_misr = sde_hw_intf_collect_misr; ops->get_line_count = sde_hw_intf_get_line_count; + ops->avr_setup = sde_hw_intf_avr_setup; + ops->avr_trigger = sde_hw_intf_avr_trigger; + ops->avr_ctrl = sde_hw_intf_avr_ctrl; if (cap & BIT(SDE_INTF_ROT_START)) ops->setup_rot_start = sde_hw_intf_setup_rot_start; @@ -513,6 +604,7 @@ static void _setup_intf_ops(struct sde_hw_intf_ops *ops, if (cap & BIT(SDE_INTF_TE)) { ops->setup_tearcheck = sde_hw_intf_setup_te_config; ops->enable_tearcheck = sde_hw_intf_enable_te; + ops->update_tearcheck = sde_hw_intf_update_te; ops->connect_external_te = sde_hw_intf_connect_external_te; ops->get_vsync_info = sde_hw_intf_get_vsync_info; ops->setup_autorefresh = sde_hw_intf_setup_autorefresh_config; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h index 5fe73b471021dcf0ad277a15b791aca9ab26aaa9..387de73a84b308bbddc4ec5563c33babcec70880 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -52,6 +52,12 @@ struct intf_status { u32 line_count; /* current line count including blanking */ }; +struct intf_avr_params { + u32 default_fps; + u32 min_fps; + u32 avr_mode; /* 0 - disable, 1 - continuous, 2 - one-shot */ +}; + /** * struct sde_hw_intf_ops : Interface to the interface Hw driver functions * Assumption is these functions will be called after clocks are enabled @@ -112,6 +118,12 @@ struct sde_hw_intf_ops { int (*enable_tearcheck)(struct sde_hw_intf *intf, bool enable); + /** + * updates tearcheck configuration + */ + void (*update_tearcheck)(struct sde_hw_intf *intf, + struct sde_hw_tear_check *cfg); + /** * read, modify, write to either set or clear listening to external TE * @Return: 1 if TE was originally connected, 0 if not, or -ERROR @@ -148,6 +160,24 @@ struct sde_hw_intf_ops { * Select vsync signal for tear-effect configuration */ void (*vsync_sel)(struct sde_hw_intf *intf, u32 vsync_source); + + /** + * Program the AVR_TOTAL for min fps rate + */ + int (*avr_setup)(struct sde_hw_intf *intf, + const struct intf_timing_params *params, + const struct intf_avr_params *avr_params); + + /** + * Signal the trigger on each commit for AVR + */ + void (*avr_trigger)(struct sde_hw_intf *ctx); + + /** + * Enable AVR and select the mode + */ + void (*avr_ctrl)(struct sde_hw_intf *intf, + const struct intf_avr_params *avr_params); }; struct sde_hw_intf { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index cf77df03507350a39f13e4a191539844aa613139..68d89e43a97af78c6edfe4f5b2c7419810aa7422 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -179,7 +179,7 @@ static void sde_hw_lm_clear_dim_layer(struct sde_hw_mixer *ctx) u32 reset = BIT(16), val; reset = ~reset; - for (i = SDE_STAGE_0; i < sblk->maxblendstages; i++) { + for (i = SDE_STAGE_0; i <= sblk->maxblendstages; i++) { stage_off = _stage_offset(ctx, i); if (WARN_ON(stage_off < 0)) return; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c index 7adb7a97aa7824a35815428cbdc621db8b3cbbf6..b438e2cbaefa44d62dad7d993b7024c12baedf63 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,6 +46,7 @@ static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = { }; #define MERGE_3D_MODE 0x004 +#define MERGE_3D_MUX 0x000 static struct sde_merge_3d_cfg *_merge_3d_offset(enum sde_merge_3d idx, struct sde_mdss_cfg *m, @@ -86,10 +87,23 @@ static void _sde_hw_merge_3d_setup_blend_mode(struct sde_hw_merge_3d *ctx, SDE_REG_WRITE(c, MERGE_3D_MODE, mode); } +static void sde_hw_merge_3d_reset_blend_mode(struct sde_hw_merge_3d *ctx) +{ + struct sde_hw_blk_reg_map *c; + + if (!ctx) + return; + + c = &ctx->hw; + SDE_REG_WRITE(c, MERGE_3D_MODE, 0x0); + SDE_REG_WRITE(c, MERGE_3D_MUX, 0x0); +} + static void _setup_merge_3d_ops(struct sde_hw_merge_3d_ops *ops, const struct sde_merge_3d_cfg *hw_cap) { ops->setup_blend_mode = _sde_hw_merge_3d_setup_blend_mode; + ops->reset_blend_mode = sde_hw_merge_3d_reset_blend_mode; } static struct sde_hw_merge_3d *_sde_pp_merge_3d_init(enum sde_merge_3d idx, @@ -171,6 +185,22 @@ static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp, return 0; } +static void sde_hw_pp_update_te(struct sde_hw_pingpong *pp, + struct sde_hw_tear_check *te) +{ + struct sde_hw_blk_reg_map *c; + int cfg; + + if (!pp || !te) + return; + c = &pp->hw; + + cfg = SDE_REG_READ(c, PP_SYNC_THRESH); + cfg &= ~0xFFFF; + cfg |= te->sync_threshold_start; + SDE_REG_WRITE(c, PP_SYNC_THRESH, cfg); +} + static int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp, struct sde_hw_autorefresh *cfg) { @@ -422,6 +452,11 @@ static void sde_hw_pp_setup_3d_merge_mode(struct sde_hw_pingpong *pp, pp->merge_3d->ops.setup_blend_mode(pp->merge_3d, cfg); } +static void sde_hw_pp_reset_3d_merge_mode(struct sde_hw_pingpong *pp) +{ + if (pp->merge_3d && pp->merge_3d->ops.reset_blend_mode) + pp->merge_3d->ops.reset_blend_mode(pp->merge_3d); +} static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, const struct sde_pingpong_cfg *hw_cap) { @@ -430,6 +465,7 @@ static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, if (hw_cap->features & BIT(SDE_PINGPONG_TE)) { ops->setup_tearcheck = sde_hw_pp_setup_te_config; ops->enable_tearcheck = sde_hw_pp_enable_te; + ops->update_tearcheck = sde_hw_pp_update_te; ops->connect_external_te = sde_hw_pp_connect_external_te; ops->get_vsync_info = sde_hw_pp_get_vsync_info; ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config; @@ -453,6 +489,7 @@ static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, } if (test_bit(SDE_PINGPONG_MERGE_3D, &hw_cap->features)) ops->setup_3d_mode = sde_hw_pp_setup_3d_merge_mode; + ops->reset_3d_mode = sde_hw_pp_reset_3d_merge_mode; }; static struct sde_hw_blk_ops sde_hw_ops = { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h index d6d69fe062ef1ea8399da9756c80c44d28a9517b..9cc755237c68770c08be6b2a930c3b827c4de222 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,6 +54,12 @@ struct sde_hw_pingpong_ops { int (*enable_tearcheck)(struct sde_hw_pingpong *pp, bool enable); + /** + * updates tearcheck configuration + */ + void (*update_tearcheck)(struct sde_hw_pingpong *pp, + struct sde_hw_tear_check *cfg); + /** * read, modify, write to either set or clear listening to external TE * @Return: 1 if TE was originally connected, 0 if not, or -ERROR @@ -122,6 +128,11 @@ struct sde_hw_pingpong_ops { */ void (*setup_3d_mode)(struct sde_hw_pingpong *pp, enum sde_3d_blend_mode cfg); + + /** + * reset 3d blend configuration + */ + void (*reset_3d_mode)(struct sde_hw_pingpong *pp); }; struct sde_hw_merge_3d_ops { @@ -130,6 +141,11 @@ struct sde_hw_merge_3d_ops { */ void (*setup_blend_mode)(struct sde_hw_merge_3d *id, enum sde_3d_blend_mode cfg); + + /** + * reset 3d blend mode configuration + */ + void (*reset_blend_mode)(struct sde_hw_merge_3d *id); }; struct sde_hw_merge_3d { diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index cad89d399a87ccc221ad91b75f6f7fba1b7c2c75..95752d1b0313ea9e6b2fce2d7fbf47479225643b 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1260,6 +1260,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .check_status = dsi_display_check_status, .enable_event = dsi_conn_enable_event, .cmd_transfer = dsi_display_cmd_transfer, + .cont_splash_config = dsi_display_cont_splash_config, }; static const struct sde_connector_ops wb_ops = { .post_init = sde_wb_connector_post_init, @@ -1273,6 +1274,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .get_dst_format = NULL, .check_status = NULL, .cmd_transfer = NULL, + .cont_splash_config = NULL, }; static const struct sde_connector_ops dp_ops = { .post_init = dp_connector_post_init, @@ -1285,6 +1287,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .check_status = NULL, .config_hdr = dp_connector_config_hdr, .cmd_transfer = NULL, + .cont_splash_config = NULL, }; struct msm_display_info info; struct drm_encoder *encoder; @@ -2272,6 +2275,7 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) struct list_head *connector_list = NULL; struct drm_connector *conn_iter = NULL; struct drm_connector *connector = NULL; + struct sde_connector *sde_conn = NULL; if (!kms) { SDE_ERROR("invalid kms\n"); @@ -2383,9 +2387,26 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) sde_crtc_update_cont_splash_mixer_settings(crtc); + sde_conn = to_sde_connector(connector); + if (sde_conn && sde_conn->ops.cont_splash_config) + sde_conn->ops.cont_splash_config(sde_conn->display); + return rc; } +static bool sde_kms_check_for_splash(struct msm_kms *kms) +{ + struct sde_kms *sde_kms; + + if (!kms) { + SDE_ERROR("invalid kms\n"); + return false; + } + + sde_kms = to_sde_kms(kms); + return sde_kms->splash_data.cont_splash_en; +} + static int sde_kms_pm_suspend(struct device *dev) { struct drm_device *ddev; @@ -2609,6 +2630,7 @@ static const struct msm_kms_funcs kms_funcs = { .register_events = _sde_kms_register_events, .get_address_space = _sde_kms_get_address_space, .postopen = _sde_kms_post_open, + .check_for_splash = sde_kms_check_for_splash, }; /* the caller api needs to turn on clock before calling it */ @@ -2896,7 +2918,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) sde_kms->mmio = NULL; goto error; } - DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio); + DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio); sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys"); rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio, diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 5f8cb4c125c102e68c916819b9e08c112ff2ecb9..e0e9d10c5d98397ae7be92bd42c1ed74c9974bbe 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1138,8 +1138,9 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde, pstate->pixel_ext.num_ext_pxls_left[i] = scale_cfg->src_width[i]; } - if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) - && (src_w == dst_w)) + + if ((!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) + && (src_w == dst_w)) || pstate->multirect_mode) return; scale_cfg->dst_width = dst_w; @@ -4581,7 +4582,8 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; if (!usr) { SDE_DEBUG_PLANE(psde, "scale data removed\n"); - return; + cfg->enable = 0; + goto end; } if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) { @@ -4592,13 +4594,10 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, /* detach/ignore user data if 'disabled' */ if (!scale_v2.enable) { SDE_DEBUG_PLANE(psde, "scale data removed\n"); - return; + cfg->enable = 0; + goto end; } - /* force property to be dirty, even if the pointer didn't change */ - msm_property_set_dirty(&psde->property_info, - &pstate->property_state, PLANE_PROP_SCALER_V2); - /* populate from user space */ sde_set_scaler_v2(cfg, &scale_v2); @@ -4620,6 +4619,11 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, } pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK; +end: + /* force property to be dirty, even if the pointer didn't change */ + msm_property_set_dirty(&psde->property_info, + &pstate->property_state, PLANE_PROP_SCALER_V2); + SDE_EVT32_VERBOSE(DRMID(&psde->base), cfg->enable, cfg->de.enable, cfg->src_width[0], cfg->src_height[0], cfg->dst_width, cfg->dst_height); diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h index 78e99255e636892c06117428047186d8f355aadf..dff00543cbe8930218a8500fbadc45b94df168ca 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.h +++ b/drivers/gpu/drm/msm/sde/sde_rm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -65,6 +65,18 @@ enum sde_rm_topology_control { SDE_RM_TOPCTL_DS, }; +/** + * enum sde_rm_topology_control - HW resource use case in use by connector + * @SDE_RM_QSYNC_DISABLED: If set, Qsync feature is supported and in + * disable state. + * @SDE_RM_QSYNC_CONTINUOUS_MODE: If set, Qsync is enabled in continuous + * mode. + */ +enum sde_rm_qsync_modes { + SDE_RM_QSYNC_DISABLED, + SDE_RM_QSYNC_CONTINUOUS_MODE, +}; + /** * struct sde_rm - SDE dynamic hardware resource manager * @dev: device handle for event logging purposes diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c index 2e50a097de4f323d4259da57b7f3793bb1d4f557..4a4a34e56d1e0f167171abbf498cd57ee2dce38f 100644 --- a/drivers/gpu/drm/msm/sde_dbg.c +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -74,6 +74,11 @@ #define DBG_CTRL_RESET_HW_PANIC BIT(2) #define DBG_CTRL_MAX BIT(3) +#define DUMP_BUF_SIZE (4096 * 512) +#define DUMP_CLMN_COUNT 4 +#define DUMP_LINE_SIZE 256 +#define DUMP_MAX_LINES_PER_BLK 512 + /** * struct sde_dbg_reg_offset - tracking for start and end of region * @start: start offset @@ -169,6 +174,24 @@ struct sde_dbg_vbif_debug_bus { struct vbif_debug_bus_entry *entries; }; +/** + * struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps + * @buf: pointer to allocated memory for storing register dumps in hw recovery + * @buf_size: size of the memory allocated + * @len: size of the dump data valid in the buffer + * @rpos: cursor points to the buffer position read by client + * @dump_done: to indicate if dumping to user memory is complete + * @cur_blk: points to the current sde_dbg_reg_base block + */ +struct sde_dbg_regbuf { + char *buf; + int buf_size; + int len; + int rpos; + int dump_done; + struct sde_dbg_reg_base *cur_blk; +}; + /** * struct sde_dbg_base - global sde debug base structure * @evtlog: event log instance @@ -185,6 +208,10 @@ struct sde_dbg_vbif_debug_bus { * @dbgbus_vbif_rt: debug bus structure for the realtime vbif * @dump_all: dump all entries in register dump * @dsi_dbg_bus: dump dsi debug bus register + * @regbuf: buffer data to track the register dumping in hw recovery + * @cur_evt_index: index used for tracking event logs dump in hw recovery + * @dbgbus_dump_idx: index used for tracking dbg-bus dump in hw recovery + * @vbif_dbgbus_dump_idx: index for tracking vbif dumps in hw recovery */ static struct sde_dbg_base { struct sde_dbg_evtlog *evtlog; @@ -205,6 +232,11 @@ static struct sde_dbg_base { bool dump_all; bool dsi_dbg_bus; u32 debugfs_ctrl; + + struct sde_dbg_regbuf regbuf; + u32 cur_evt_index; + u32 dbgbus_dump_idx; + u32 vbif_dbgbus_dump_idx; } sde_dbg_base; /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */ @@ -4025,6 +4057,12 @@ static int sde_dbg_debugfs_open(struct inode *inode, struct file *file) /* non-seekable */ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); file->private_data = inode->i_private; + mutex_lock(&sde_dbg_base.mutex); + sde_dbg_base.cur_evt_index = 0; + sde_dbg_base.evtlog->first = sde_dbg_base.evtlog->curr + 1; + sde_dbg_base.evtlog->last = + sde_dbg_base.evtlog->first + SDE_EVTLOG_ENTRY; + mutex_unlock(&sde_dbg_base.mutex); return 0; } @@ -4044,8 +4082,13 @@ static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, if (!buff || !ppos) return -EINVAL; - len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf, - SDE_EVTLOG_BUF_MAX, true); + mutex_lock(&sde_dbg_base.mutex); + len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, + evtlog_buf, SDE_EVTLOG_BUF_MAX, + !sde_dbg_base.cur_evt_index, true); + sde_dbg_base.cur_evt_index++; + mutex_unlock(&sde_dbg_base.mutex); + if (len < 0 || len > count) { pr_err("len is more than user buffer size"); return 0; @@ -4235,6 +4278,329 @@ static const struct file_operations sde_evtlog_filter_fops = { .release = seq_release }; +static int sde_recovery_regdump_open(struct inode *inode, struct file *file) +{ + if (!inode || !file) + return -EINVAL; + + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + + /* initialize to start position */ + sde_dbg_base.regbuf.rpos = 0; + sde_dbg_base.regbuf.cur_blk = NULL; + sde_dbg_base.regbuf.dump_done = false; + + return 0; +} + +static ssize_t _sde_dbg_dump_reg_rows(u32 reg_start, + void *start, int count, char *buf, int buflen) +{ + int i; + int len = 0; + u32 *addr; + u32 reg_offset = 0; + int rows = min(count / DUMP_CLMN_COUNT, DUMP_MAX_LINES_PER_BLK); + + if (!start || !buf) { + pr_err("invalid address for dump\n"); + return len; + } + + if (buflen < PAGE_SIZE) { + pr_err("buffer too small for dump\n"); + return len; + } + + for (i = 0; i < rows; i++) { + addr = start + (i * DUMP_CLMN_COUNT * sizeof(u32)); + reg_offset = reg_start + (i * DUMP_CLMN_COUNT * sizeof(u32)); + if (buflen < (len + DUMP_LINE_SIZE)) + break; + + len += snprintf(buf + len, DUMP_LINE_SIZE, + "0x%.8X | %.8X %.8X %.8X %.8X\n", + reg_offset, addr[0], addr[1], addr[2], addr[3]); + } + + return len; +} + +static int _sde_dbg_recovery_dump_sub_blk(struct sde_dbg_reg_range *sub_blk, + char *buf, int buflen) +{ + int count = 0; + int len = 0; + + if (!sub_blk || (buflen < PAGE_SIZE)) { + pr_err("invalid params buflen:%d subblk valid:%d\n", + buflen, sub_blk != NULL); + return len; + } + + count = (sub_blk->offset.end - sub_blk->offset.start) / (sizeof(u32)); + if (count < DUMP_CLMN_COUNT) { + pr_err("invalid count for register dumps :%d\n", count); + return len; + } + + len += snprintf(buf + len, DUMP_LINE_SIZE, + "------------------------------------------\n"); + len += snprintf(buf + len, DUMP_LINE_SIZE, + "**** sub block [%s] - size:%d ****\n", + sub_blk->range_name, count); + len += _sde_dbg_dump_reg_rows(sub_blk->offset.start, sub_blk->reg_dump, + count, buf + len, buflen - len); + + return len; +} + +static int _sde_dbg_recovery_dump_reg_blk(struct sde_dbg_reg_base *blk, + char *buf, int buf_size, int *out_len) +{ + int ret = 0; + int len = 0; + struct sde_dbg_reg_range *sub_blk; + + if (buf_size < PAGE_SIZE) { + pr_err("buffer too small for dump\n"); + return len; + } + + if (!blk || !strlen(blk->name)) { + len += snprintf(buf + len, DUMP_LINE_SIZE, + "Found one invalid block - skip dump\n"); + *out_len = len; + return len; + } + + len += snprintf(buf + len, DUMP_LINE_SIZE, + "******************************************\n"); + len += snprintf(buf + len, DUMP_LINE_SIZE, + "==========================================\n"); + len += snprintf(buf + len, DUMP_LINE_SIZE, + "*********** DUMP of %s block *************\n", + blk->name); + len += snprintf(buf + len, DUMP_LINE_SIZE, + "count:%ld max-off:0x%lx has_sub_blk:%d\n", + blk->cnt, blk->max_offset, + !list_empty(&blk->sub_range_list)); + + if (list_empty(&blk->sub_range_list)) { + len += _sde_dbg_dump_reg_rows(0, blk->reg_dump, + blk->max_offset / sizeof(u32), buf + len, + buf_size - len); + } else { + list_for_each_entry(sub_blk, &blk->sub_range_list, head) + len += _sde_dbg_recovery_dump_sub_blk(sub_blk, + buf + len, buf_size - len); + } + *out_len = len; + + return ret; +} + +static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + int usize = 0; + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_regbuf *rbuf = &dbg_base->regbuf; + + mutex_lock(&sde_dbg_base.mutex); + if (!rbuf->dump_done && !rbuf->cur_blk) { + if (!rbuf->buf) + rbuf->buf = kzalloc(DUMP_BUF_SIZE, GFP_KERNEL); + if (!rbuf->buf) { + len = -ENOMEM; + goto err; + } + rbuf->rpos = 0; + rbuf->len = 0; + rbuf->buf_size = DUMP_BUF_SIZE; + + rbuf->cur_blk = list_first_entry(&dbg_base->reg_base_list, + struct sde_dbg_reg_base, reg_base_head); + if (rbuf->cur_blk) + _sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk, + rbuf->buf, + rbuf->buf_size, + &rbuf->len); + pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ? + rbuf->cur_blk->name : "unknown", rbuf->len); + } else if (rbuf->len == rbuf->rpos && rbuf->cur_blk) { + rbuf->rpos = 0; + rbuf->len = 0; + rbuf->buf_size = DUMP_BUF_SIZE; + + if (rbuf->cur_blk == list_last_entry(&dbg_base->reg_base_list, + struct sde_dbg_reg_base, reg_base_head)) + rbuf->cur_blk = NULL; + else + rbuf->cur_blk = list_next_entry(rbuf->cur_blk, + reg_base_head); + + if (rbuf->cur_blk) + _sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk, + rbuf->buf, + rbuf->buf_size, + &rbuf->len); + pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ? + rbuf->cur_blk->name : "unknown", rbuf->len); + } + + if ((rbuf->len - rbuf->rpos) > 0) { + usize = ((rbuf->len - rbuf->rpos) > count) ? + count : rbuf->len - rbuf->rpos; + if (copy_to_user(ubuf, rbuf->buf + rbuf->rpos, usize)) { + len = -EFAULT; + goto err; + } + + len = usize; + rbuf->rpos += usize; + *ppos += usize; + } + + if (!len && rbuf->buf) + rbuf->dump_done = true; +err: + mutex_unlock(&sde_dbg_base.mutex); + + return len; +} + +static const struct file_operations sde_recovery_reg_fops = { + .open = sde_recovery_regdump_open, + .read = sde_recovery_regdump_read, +}; + +static int sde_recovery_dbgbus_dump_open(struct inode *inode, struct file *file) +{ + if (!inode || !file) + return -EINVAL; + + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + + mutex_lock(&sde_dbg_base.mutex); + sde_dbg_base.dbgbus_dump_idx = 0; + mutex_unlock(&sde_dbg_base.mutex); + + return 0; +} + +static ssize_t sde_recovery_dbgbus_dump_read(struct file *file, + char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char evtlog_buf[SDE_EVTLOG_BUF_MAX]; + u32 *data; + struct sde_dbg_sde_debug_bus *bus; + + mutex_lock(&sde_dbg_base.mutex); + bus = &sde_dbg_base.dbgbus_sde; + if (!bus->cmn.dumped_content || !bus->cmn.entries_size) + goto dump_done; + + if (sde_dbg_base.dbgbus_dump_idx <= + ((bus->cmn.entries_size - 1) * DUMP_CLMN_COUNT)) { + data = &bus->cmn.dumped_content[ + sde_dbg_base.dbgbus_dump_idx]; + len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX, + "0x%.8X | %.8X %.8X %.8X %.8X\n", + sde_dbg_base.dbgbus_dump_idx, + data[0], data[1], data[2], data[3]); + sde_dbg_base.dbgbus_dump_idx += DUMP_CLMN_COUNT; + if ((count < len) || copy_to_user(buff, evtlog_buf, len)) { + len = -EFAULT; + goto dump_done; + } + *ppos += len; + } +dump_done: + mutex_unlock(&sde_dbg_base.mutex); + + return len; +} + +static const struct file_operations sde_recovery_dbgbus_fops = { + .open = sde_recovery_dbgbus_dump_open, + .read = sde_recovery_dbgbus_dump_read, +}; + +static int sde_recovery_vbif_dbgbus_dump_open(struct inode *inode, + struct file *file) +{ + if (!inode || !file) + return -EINVAL; + + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + + mutex_lock(&sde_dbg_base.mutex); + sde_dbg_base.vbif_dbgbus_dump_idx = 0; + mutex_unlock(&sde_dbg_base.mutex); + + return 0; +} + +static ssize_t sde_recovery_vbif_dbgbus_dump_read(struct file *file, + char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char evtlog_buf[SDE_EVTLOG_BUF_MAX]; + int i; + u32 *data; + u32 list_size = 0; + struct vbif_debug_bus_entry *head; + struct sde_dbg_vbif_debug_bus *bus; + + mutex_lock(&sde_dbg_base.mutex); + bus = &sde_dbg_base.dbgbus_vbif_rt; + if (!bus->cmn.dumped_content || !bus->cmn.entries_size) + goto dump_done; + + /* calculate total number of test point */ + for (i = 0; i < bus->cmn.entries_size; i++) { + head = bus->entries + i; + list_size += (head->block_cnt * head->test_pnt_cnt); + } + + /* 4 entries for each test point*/ + list_size *= DUMP_CLMN_COUNT; + if (sde_dbg_base.vbif_dbgbus_dump_idx < list_size) { + data = &bus->cmn.dumped_content[ + sde_dbg_base.vbif_dbgbus_dump_idx]; + len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX, + "0x%.8X | %.8X %.8X %.8X %.8X\n", + sde_dbg_base.vbif_dbgbus_dump_idx, + data[0], data[1], data[2], data[3]); + sde_dbg_base.vbif_dbgbus_dump_idx += DUMP_CLMN_COUNT; + if ((count < len) || copy_to_user(buff, evtlog_buf, len)) { + len = -EFAULT; + goto dump_done; + } + *ppos += len; + } +dump_done: + mutex_unlock(&sde_dbg_base.mutex); + + return len; +} + +static const struct file_operations sde_recovery_vbif_dbgbus_fops = { + .open = sde_recovery_vbif_dbgbus_dump_open, + .read = sde_recovery_vbif_dbgbus_dump_read, +}; + /** * sde_dbg_reg_base_release - release allocated reg dump file private data * @inode: debugfs inode @@ -4563,6 +4929,12 @@ int sde_dbg_debugfs_register(struct dentry *debugfs_root) &sde_dbg_base.panic_on_err); debugfs_create_u32("reg_dump", 0600, debugfs_root, &sde_dbg_base.enable_reg_dump); + debugfs_create_file("recovery_reg", 0400, debugfs_root, NULL, + &sde_recovery_reg_fops); + debugfs_create_file("recovery_dbgbus", 0400, debugfs_root, NULL, + &sde_recovery_dbgbus_fops); + debugfs_create_file("recovery_vbif_dbgbus", 0400, debugfs_root, NULL, + &sde_recovery_vbif_dbgbus_fops); if (dbg->dbgbus_sde.entries) { dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE; @@ -4662,6 +5034,7 @@ int sde_dbg_init(struct device *dev, struct sde_dbg_power_ctrl *power_ctrl) sde_dbg_base.work_panic = false; sde_dbg_base.panic_on_err = DEFAULT_PANIC; sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP; + memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf)); pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n", sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err, @@ -4695,6 +5068,8 @@ static void sde_dbg_reg_base_destroy(void) */ void sde_dbg_destroy(void) { + kfree(sde_dbg_base.regbuf.buf); + memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf)); _sde_dbg_debugfs_destroy(); sde_dbg_base_evtlog = NULL; sde_evtlog_destroy(sde_dbg_base.evtlog); diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h index 9efb893d52e9f5d9d0815979bc37c30fc3f47fa8..00e486bec18c28370bd08b937b81c1e7f83f974b 100644 --- a/drivers/gpu/drm/msm/sde_dbg.h +++ b/drivers/gpu/drm/msm/sde_dbg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -208,11 +208,12 @@ bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag); * @evtlog_buf: target buffer to print into * @evtlog_buf_size: size of target buffer * @update_last_entry: whether or not to stop at most recent entry + * @full_dump: whether to dump full or to limit print entries * Returns: number of bytes written to buffer */ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, char *evtlog_buf, ssize_t evtlog_buf_size, - bool update_last_entry); + bool update_last_entry, bool full_dump); /** * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c index f157b11236717487e09e911aa82c1d6784d42a4c..7026d5fb564b9e33916c1470150242f0ccec7ab5 100644 --- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c +++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -108,8 +108,10 @@ void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line, /* always dump the last entries which are not dumped yet */ static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog, - bool update_last_entry) + bool update_last_entry, bool full_dump) { + int max_entries = full_dump ? SDE_EVTLOG_ENTRY : SDE_EVTLOG_PRINT_ENTRY; + if (!evtlog) return false; @@ -127,12 +129,11 @@ static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog, evtlog->last_dump += SDE_EVTLOG_ENTRY; } - if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) { + if ((evtlog->last_dump - evtlog->first) > max_entries) { pr_info("evtlog skipping %d entries, last=%d\n", evtlog->last_dump - evtlog->first - - SDE_EVTLOG_PRINT_ENTRY, - evtlog->last_dump - 1); - evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY; + max_entries, evtlog->last_dump - 1); + evtlog->first = evtlog->last_dump - max_entries; } evtlog->next = evtlog->first + 1; @@ -141,7 +142,7 @@ static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog, ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, char *evtlog_buf, ssize_t evtlog_buf_size, - bool update_last_entry) + bool update_last_entry, bool full_dump) { int i; ssize_t off = 0; @@ -154,7 +155,7 @@ ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, spin_lock_irqsave(&evtlog->spin_lock, flags); /* update markers, exit if nothing to print */ - if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry)) + if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry, full_dump)) goto exit; log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY]; @@ -192,8 +193,8 @@ void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog) if (!evtlog) return; - while (sde_evtlog_dump_to_buffer( - evtlog, buf, sizeof(buf), update_last_entry)) { + while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf), + update_last_entry, false)) { pr_info("%s", buf); update_last_entry = false; } diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c index 117947e4db0856a75049641d233a5734c15ff715..7acaefd896fda02aebcac9de3fcca7555a154635 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_2x.c +++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c @@ -389,6 +389,15 @@ static void sde_hdcp_2x_query_stream_work(struct kthread_work *work) sde_hdcp_2x_stream(hdcp); } +static void sde_hdcp_2x_initialize_command(struct sde_hdcp_2x_ctrl *hdcp, + enum hdcp_transport_wakeup_cmd cmd, + struct hdcp_transport_wakeup_data *cdata) +{ + cdata->cmd = cmd; + cdata->timeout = hdcp->timeout_left; + cdata->buf = hdcp->app_data.request.data + 1; +} + static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) { struct hdcp_transport_wakeup_data cdata = { @@ -406,12 +415,14 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) } /* poll for link check */ - cdata.cmd = HDCP_TRANSPORT_CMD_LINK_POLL; + sde_hdcp_2x_initialize_command(hdcp, + HDCP_TRANSPORT_CMD_LINK_POLL, &cdata); break; case SKE_SEND_EKS: if (hdcp->repeater_flag && !atomic_read(&hdcp->hdcp_off)) { /* poll for link check */ - cdata.cmd = HDCP_TRANSPORT_CMD_LINK_POLL; + sde_hdcp_2x_initialize_command(hdcp, + HDCP_TRANSPORT_CMD_LINK_POLL, &cdata); } else { hdcp->app_data.response.data[0] = SKE_SEND_TYPE_ID; hdcp->app_data.response.length = 2; @@ -427,7 +438,8 @@ static void sde_hdcp_2x_msg_sent(struct sde_hdcp_2x_ctrl *hdcp) HDCP_2X_EXECUTE(stream); hdcp->update_stream = false; } else { - cdata.cmd = HDCP_TRANSPORT_CMD_LINK_POLL; + sde_hdcp_2x_initialize_command(hdcp, + HDCP_TRANSPORT_CMD_LINK_POLL, &cdata); } break; default: @@ -583,12 +595,13 @@ static void sde_hdcp_2x_msg_recvd(struct sde_hdcp_2x_ctrl *hdcp) cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS; sde_hdcp_2x_wakeup_client(hdcp, &cdata); } else { - pr_debug("failed to enable encryption (%d)\n", + pr_err("failed to enable encryption (%d)\n", rc); } } - cdata.cmd = HDCP_TRANSPORT_CMD_LINK_POLL; + sde_hdcp_2x_initialize_command(hdcp, + HDCP_TRANSPORT_CMD_LINK_POLL, &cdata); goto exit; } diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h index aa8b8211985cf5435228189e30cf3f728d4f8a35..e1ae8ca1066250c157d3cda2b0066252ba65075c 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_2x.h +++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h @@ -26,6 +26,7 @@ * @HDCP_2X_CMD_STOP: stop authentication * @HDCP_2X_CMD_MSG_SEND_SUCCESS: sending message to sink succeeded * @HDCP_2X_CMD_MSG_SEND_FAILED: sending message to sink failed + * @HDCP_2X_CMD_MSG_SEND_TIMEOUT: sending message to sink timed out * @HDCP_2X_CMD_MSG_RECV_SUCCESS: receiving message from sink succeeded * @HDCP_2X_CMD_MSG_RECV_FAILED: receiving message from sink failed * @HDCP_2X_CMD_MSG_RECV_TIMEOUT: receiving message from sink timed out @@ -38,6 +39,7 @@ enum sde_hdcp_2x_wakeup_cmd { HDCP_2X_CMD_STOP, HDCP_2X_CMD_MSG_SEND_SUCCESS, HDCP_2X_CMD_MSG_SEND_FAILED, + HDCP_2X_CMD_MSG_SEND_TIMEOUT, HDCP_2X_CMD_MSG_RECV_SUCCESS, HDCP_2X_CMD_MSG_RECV_FAILED, HDCP_2X_CMD_MSG_RECV_TIMEOUT, @@ -141,6 +143,8 @@ static inline const char *sde_hdcp_2x_cmd_to_str( return TO_STR(HDCP_2X_CMD_MSG_SEND_SUCCESS); case HDCP_2X_CMD_MSG_SEND_FAILED: return TO_STR(HDCP_2X_CMD_MSG_SEND_FAILED); + case HDCP_2X_CMD_MSG_SEND_TIMEOUT: + return TO_STR(HDCP_2X_CMD_MSG_SEND_TIMEOUT); case HDCP_2X_CMD_MSG_RECV_SUCCESS: return TO_STR(HDCP_2X_CMD_MSG_RECV_SUCCESS); case HDCP_2X_CMD_MSG_RECV_FAILED: diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index 29c868eebecf4bca2217561d471f23b882d60f58..80049ce0c9eb50070ba03f354a898923b38616d8 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -1087,6 +1087,30 @@ struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle, return clk; } +int sde_power_clk_set_flags(struct sde_power_handle *phandle, + char *clock_name, unsigned long flags) +{ + struct clk *clk; + + if (!phandle) { + pr_err("invalid input power handle\n"); + return -EINVAL; + } + + if (!clock_name) { + pr_err("invalid input clock name\n"); + return -EINVAL; + } + + clk = sde_power_clk_get_clk(phandle, clock_name); + if (!clk) { + pr_err("get_clk failed for clk: %s\n", clock_name); + return -EINVAL; + } + + return clk_set_flags(clk, flags); +} + struct sde_power_event *sde_power_handle_register_event( struct sde_power_handle *phandle, u32 event_type, void (*cb_fnc)(u32 event_type, void *usr), diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h index fb7322e382c5728115f08dec4287e48274d5f3af..b07779f0be9a5d8a37013fd627455749b3236c47 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.h +++ b/drivers/gpu/drm/msm/sde_power_handle.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -285,6 +285,17 @@ u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata, struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle, char *clock_name); +/** + * sde_power_clk_set_flags() - set the clock flags + * @pdata: power handle containing the resources + * @clock_name: clock name to get the clk pointer. + * @flags: flags to set + * + * Return: error code. + */ +int sde_power_clk_set_flags(struct sde_power_handle *pdata, + char *clock_name, unsigned long flags); + /** * sde_power_data_bus_set_quota() - set data bus quota for power client * @phandle: power handle containing the resources diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index f811ca149b36e28ca28ad520633cb6c7d8617b47..0fa9758e72ce6e0bc8488f4b74c377a964935fcd 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -1246,6 +1246,8 @@ static void sde_rsc_deinit(struct platform_device *pdev, if (rsc->pclient) sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false); + if (rsc->sw_fs_enabled) + regulator_disable(rsc->fs); if (rsc->fs) devm_regulator_put(rsc->fs); if (rsc->wrapper_io.base) @@ -1405,6 +1407,14 @@ static int sde_rsc_probe(struct platform_device *pdev) goto sde_rsc_fail; } + ret = regulator_enable(rsc->fs); + if (ret) { + pr_err("sde rsc: fs on failed ret:%d\n", ret); + goto sde_rsc_fail; + } + + rsc->sw_fs_enabled = true; + if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) { pr_err("failed to enable sde rsc power resources\n"); goto sde_rsc_fail; diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c index 509ae503eaf93300410a191a7cfa49dbdd65f81c..9999761f490c92d4d0945d9d2bacbce2901f8b99 100644 --- a/drivers/gpu/drm/msm/sde_rsc_hw.c +++ b/drivers/gpu/drm/msm/sde_rsc_hw.c @@ -609,10 +609,12 @@ static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) if (rsc->power_collapse_block) return -EINVAL; - rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST); - if (rc) { - pr_err("vdd reg fast mode set failed rc:%d\n", rc); - return rc; + if (rsc->sw_fs_enabled) { + rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST); + if (rc) { + pr_err("vdd reg fast mode set failed rc:%d\n", rc); + return rc; + } } dss_reg_w(&rsc->drv_io, SDE_RSC_SOLVER_SOLVER_MODES_ENABLED_DRV0, @@ -647,6 +649,11 @@ static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC); + if (rsc->sw_fs_enabled) { + regulator_disable(rsc->fs); + rsc->sw_fs_enabled = false; + } + return 0; end: diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h index 5c62466715b03b324c845e71211cb3c8e2b8f388..a659aca390d24f8e662400389a8eec4ace0a7007 100644 --- a/drivers/gpu/drm/msm/sde_rsc_priv.h +++ b/drivers/gpu/drm/msm/sde_rsc_priv.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -124,6 +124,7 @@ struct sde_rsc_timer_config { * @phandle: module power handle for clocks * @pclient: module power client of phandle * @fs: "MDSS GDSC" handle + * @sw_fs_enabled: track "MDSS GDSC" sw vote during probe * * @disp_rsc: display rsc handle * @drv_io: sde drv io data mapping @@ -157,6 +158,7 @@ struct sde_rsc_priv { struct sde_power_handle phandle; struct sde_power_client *pclient; struct regulator *fs; + bool sw_fs_enabled; struct rpmh_client *disp_rsc; struct dss_io_data drv_io; diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index f56f60f695e1b3e4575cb9dfdd2bbe00fb16e88c..debbbf0fd4bdda619732c67952c772f9957c4166 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div = 1025; u32 val; @@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div = 1025; u32 val = (bd->props.brightness * div) / 100; @@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div, val; div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); struct nvif_object *device = &drm->client.device.object; - int or = nv_encoder->or; + int or = ffs(nv_encoder->dcb->or) - 1; u32 div, val; div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); @@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) return -ENODEV; } - if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) + if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) return 0; if (drm->client.device.info.chipset <= 0xa0 || diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index d1755f12236ba1d43f0b2266c58182993730908a..41ebb37aaa7981c3f98df370377e5478fb4adf0e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1299,88 +1299,18 @@ static const struct soc_device_attribute dss_soc_devices[] = { static int dss_bind(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *dss_mem; - u32 rev; int r; - dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); - dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); - if (IS_ERR(dss.base)) - return PTR_ERR(dss.base); - - r = dss_get_clocks(); + r = component_bind_all(dev, NULL); if (r) return r; - r = dss_setup_default_clock(); - if (r) - goto err_setup_clocks; - - r = dss_video_pll_probe(pdev); - if (r) - goto err_pll_init; - - r = dss_init_ports(pdev); - if (r) - goto err_init_ports; - - pm_runtime_enable(&pdev->dev); - - r = dss_runtime_get(); - if (r) - goto err_runtime_get; - - dss.dss_clk_rate = clk_get_rate(dss.dss_clk); - - /* Select DPLL */ - REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); - -#ifdef CONFIG_OMAP2_DSS_VENC - REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ - REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ - REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ -#endif - dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; - dss.dispc_clk_source = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; - - rev = dss_read_reg(DSS_REVISION); - pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - - dss_runtime_put(); - - r = component_bind_all(&pdev->dev, NULL); - if (r) - goto err_component; - - dss_debugfs_create_file("dss", dss_dump_regs); - pm_set_vt_switch(0); omapdss_gather_components(dev); omapdss_set_is_initialized(true); return 0; - -err_component: -err_runtime_get: - pm_runtime_disable(&pdev->dev); - dss_uninit_ports(pdev); -err_init_ports: - if (dss.video1_pll) - dss_video_pll_uninit(dss.video1_pll); - - if (dss.video2_pll) - dss_video_pll_uninit(dss.video2_pll); -err_pll_init: -err_setup_clocks: - dss_put_clocks(); - return r; } static void dss_unbind(struct device *dev) @@ -1390,18 +1320,6 @@ static void dss_unbind(struct device *dev) omapdss_set_is_initialized(false); component_unbind_all(&pdev->dev, NULL); - - if (dss.video1_pll) - dss_video_pll_uninit(dss.video1_pll); - - if (dss.video2_pll) - dss_video_pll_uninit(dss.video2_pll); - - dss_uninit_ports(pdev); - - pm_runtime_disable(&pdev->dev); - - dss_put_clocks(); } static const struct component_master_ops dss_component_ops = { @@ -1433,10 +1351,46 @@ static int dss_add_child_component(struct device *dev, void *data) return 0; } +static int dss_probe_hardware(void) +{ + u32 rev; + int r; + + r = dss_runtime_get(); + if (r) + return r; + + dss.dss_clk_rate = clk_get_rate(dss.dss_clk); + + /* Select DPLL */ + REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); + + dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); + +#ifdef CONFIG_OMAP2_DSS_VENC + REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ + REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ + REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ +#endif + dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dispc_clk_source = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; + + rev = dss_read_reg(DSS_REVISION); + pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dss_runtime_put(); + + return 0; +} + static int dss_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc; struct component_match *match = NULL; + struct resource *dss_mem; int r; dss.pdev = pdev; @@ -1451,20 +1405,69 @@ static int dss_probe(struct platform_device *pdev) else dss.feat = of_match_device(dss_of_match, &pdev->dev)->data; - r = dss_initialize_debugfs(); + /* Map I/O registers, get and setup clocks. */ + dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dss.base = devm_ioremap_resource(&pdev->dev, dss_mem); + if (IS_ERR(dss.base)) + return PTR_ERR(dss.base); + + r = dss_get_clocks(); if (r) return r; - /* add all the child devices as components */ + r = dss_setup_default_clock(); + if (r) + goto err_put_clocks; + + /* Setup the video PLLs and the DPI and SDI ports. */ + r = dss_video_pll_probe(pdev); + if (r) + goto err_put_clocks; + + r = dss_init_ports(pdev); + if (r) + goto err_uninit_plls; + + /* Enable runtime PM and probe the hardware. */ + pm_runtime_enable(&pdev->dev); + + r = dss_probe_hardware(); + if (r) + goto err_pm_runtime_disable; + + /* Initialize debugfs. */ + r = dss_initialize_debugfs(); + if (r) + goto err_pm_runtime_disable; + + dss_debugfs_create_file("dss", dss_dump_regs); + + /* Add all the child devices as components. */ device_for_each_child(&pdev->dev, &match, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); - if (r) { - dss_uninitialize_debugfs(); - return r; - } + if (r) + goto err_uninit_debugfs; return 0; + +err_uninit_debugfs: + dss_uninitialize_debugfs(); + +err_pm_runtime_disable: + pm_runtime_disable(&pdev->dev); + dss_uninit_ports(pdev); + +err_uninit_plls: + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); + +err_put_clocks: + dss_put_clocks(); + + return r; } static int dss_remove(struct platform_device *pdev) @@ -1473,6 +1476,18 @@ static int dss_remove(struct platform_device *pdev) dss_uninitialize_debugfs(); + pm_runtime_disable(&pdev->dev); + + dss_uninit_ports(pdev); + + if (dss.video1_pll) + dss_video_pll_uninit(dss.video1_pll); + + if (dss.video2_pll) + dss_video_pll_uninit(dss.video2_pll); + + dss_put_clocks(); + return 0; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 234af81fb3d01629ae157c73276f43259f1a0e26..fc56d033febe68076c3f69c1831d07a3c92f78bb 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1561,7 +1561,7 @@ static const struct panel_desc ontat_yx700wv03 = { .width = 154, .height = 83, }, - .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; static const struct drm_display_mode ortustech_com43h4m85ulc_mode = { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 12d22f3db1af0fd992693711080f87a3ca8bee5c..6a4b8c98a719d9bfee3427b290394d0a63c69f8e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDPLLCR, pllcr); - /* - * Select the input, hardcode mode 0, enable LVDS operation and turn - * bias circuitry on. - */ - lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; + /* Select the input and set the LVDS mode. */ + lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; if (rcrtc->index == 2) lvdcr0 |= LVDCR0_DUSEL; rcar_lvds_write(lvds, LVDCR0, lvdcr0); @@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) | LVDCR1_CLKSTBY_GEN2); + /* Enable LVDS operation and turn bias circuitry on. */ + lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + /* * Turn the PLL on, wait for the startup delay, and turn the output * on. @@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, u32 lvdcr0; u32 pllcr; - /* PLL clock configuration */ + /* Set the PLL clock configuration and LVDS mode. */ if (freq < 42000) pllcr = LVDPLLCR_PLLDIVCNT_42M; else if (freq < 85000) @@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, rcar_lvds_write(lvds, LVDPLLCR, pllcr); + lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT; + rcar_lvds_write(lvds, LVDCR0, lvdcr0); + /* Turn all the channels on. */ rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) | @@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, * Turn the PLL on, set it to LVDS normal mode, wait for the startup * delay and turn the output on. */ - lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON; + lvdcr0 |= LVDCR0_PLLON; rcar_lvds_write(lvds, LVDCR0, lvdcr0); lvdcr0 |= LVDCR0_PWD; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 1869c8bb76c8173677be56c1d68a8364cf80a1f2..bde65186a3c37d16a1d06ed4418faad3f333a2a0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; - vma->vm_pgoff = 0; if (rk_obj->pages) ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); @@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) if (ret) return ret; + /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + obj = vma->vm_private_data; return rockchip_drm_gem_object_mmap(obj, vma); diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index d401156490f36c890f49f23d1eab1b3f7691d108..4460ca46a3505daad9f47e619ca165548b824125 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c @@ -129,10 +129,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) { struct sun4i_dclk *dclk = hw_to_dclk(hw); + u32 val = degrees / 120; + + val <<= 28; regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, GENMASK(29, 28), - degrees / 120); + val); return 0; } diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 597d563d636a189fa4bdb790ca842ec9a3740134..0598b4c18c253bb9f10076174a0b6c2774a86e87 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); tegra_drm_fb_exit(drm); + drm_atomic_helper_shutdown(drm); drm_mode_config_cleanup(drm); err = host1x_device_exit(device); diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b94bd5440e5779ac96ebdfa10b9b48adef53e7db..ed9c443bb8a166a64d8beaea3fb04fef9545577c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -196,6 +196,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_3D_FEATURES: value = vgdev->has_virgl_3d == true ? 1 : 0; break; + case VIRTGPU_PARAM_CAPSET_QUERY_FIX: + value = 1; + break; default: return -EINVAL; } @@ -471,7 +474,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, { struct virtio_gpu_device *vgdev = dev->dev_private; struct drm_virtgpu_get_caps *args = data; - int size; + unsigned size, host_caps_size; int i; int found_valid = -1; int ret; @@ -480,6 +483,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, if (vgdev->num_capsets == 0) return -ENOSYS; + /* don't allow userspace to pass 0 */ + if (args->size == 0) + return -EINVAL; + spin_lock(&vgdev->display_info_lock); for (i = 0; i < vgdev->num_capsets; i++) { if (vgdev->capsets[i].id == args->cap_set_id) { @@ -495,11 +502,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, return -EINVAL; } - size = vgdev->capsets[found_valid].max_size; - if (args->size > size) { - spin_unlock(&vgdev->display_info_lock); - return -EINVAL; - } + host_caps_size = vgdev->capsets[found_valid].max_size; + /* only copy to user the minimum of the host caps size or the guest caps size */ + size = min(args->size, host_caps_size); list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { if (cache_ent->id == args->cap_set_id && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h index 557a033fb610f1dfcb3568aceebd5592c97b3c60..8545488aa0cfbe1bf1b1d14514d6794c0077834a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h @@ -135,17 +135,24 @@ #else -/* In the 32-bit version of this macro, we use "m" because there is no - * more register left for bp +/* + * In the 32-bit version of this macro, we store bp in a memory location + * because we've ran out of registers. + * Now we can't reference that memory location while we've modified + * %esp or %ebp, so we first push it on the stack, just before we push + * %ebp, and then when we need it we read it from the stack where we + * just pushed it. */ #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ port_num, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%ebp;" \ - "mov %12, %%ebp;" \ + asm volatile ("push %12;" \ + "push %%ebp;" \ + "mov 0x04(%%esp), %%ebp;" \ "rep outsb;" \ - "pop %%ebp;" : \ + "pop %%ebp;" \ + "add $0x04, %%esp;" : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ @@ -167,10 +174,12 @@ port_num, magic, bp, \ eax, ebx, ecx, edx, si, di) \ ({ \ - asm volatile ("push %%ebp;" \ - "mov %12, %%ebp;" \ + asm volatile ("push %12;" \ + "push %%ebp;" \ + "mov 0x04(%%esp), %%ebp;" \ "rep insb;" \ - "pop %%ebp" : \ + "pop %%ebp;" \ + "add $0x04, %%esp;" : \ "=a"(eax), \ "=b"(ebx), \ "=c"(ecx), \ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index aacce4753a62a53d05c55b5c214de6c835f61418..205a5f4b58f30fd08844a1b1d64a44b76740a652 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); + struct drm_crtc *crtc = plane->state->crtc ? + plane->state->crtc : old_state->crtc; + if (vps->dmabuf) + vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; @@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, } size = new_state->crtc_w * new_state->crtc_h * 4; + dev_priv = vmw_priv(crtc->dev); if (vps->dmabuf) { - if (vps->dmabuf_size == size) - return 0; + if (vps->dmabuf_size == size) { + /* + * Note that this might temporarily up the pin-count + * to 2, until cleanup_fb() is called. + */ + return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, + true); + } vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; @@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, if (!vps->dmabuf) return -ENOMEM; - dev_priv = vmw_priv(crtc->dev); vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to @@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); - - if (ret != 0) + if (ret) { vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ - else - vps->dmabuf_size = size; + return ret; + } - return ret; + vps->dmabuf_size = size; + + /* + * TTM already thinks the buffer is pinned, but make sure the + * pin_count is upped. + */ + return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); } diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c860a7997cb59c981557e01021d6a4278b2470ea..1d1612e28854b89b85deaf8f6f4a8c0898c67574 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -125,11 +125,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index) if (pre_node == pre->dev->of_node) { mutex_unlock(&ipu_pre_list_mutex); device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); + of_node_put(pre_node); return pre; } } mutex_unlock(&ipu_pre_list_mutex); + of_node_put(pre_node); + return NULL; } diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index 0013ca9f72c83e8f85b3c91aa62aaf619690f234..1c36fa3a90e2586d62d04272304732bb0fdb45d9 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -101,11 +101,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id) mutex_unlock(&ipu_prg_list_mutex); device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); prg->id = ipu_id; + of_node_put(prg_node); return prg; } } mutex_unlock(&ipu_prg_list_mutex); + of_node_put(prg_node); + return NULL; } @@ -249,10 +252,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan) { int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); struct ipu_prg *prg = ipu_chan->ipu->prg_priv; - struct ipu_prg_channel *chan = &prg->chan[prg_chan]; + struct ipu_prg_channel *chan; u32 val; - if (!chan->enabled || prg_chan < 0) + if (prg_chan < 0) + return; + + chan = &prg->chan[prg_chan]; + if (!chan->enabled) return; clk_prepare_enable(prg->clk_ipg); @@ -279,13 +286,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, { int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); struct ipu_prg *prg = ipu_chan->ipu->prg_priv; - struct ipu_prg_channel *chan = &prg->chan[prg_chan]; + struct ipu_prg_channel *chan; u32 val; int ret; if (prg_chan < 0) return prg_chan; + chan = &prg->chan[prg_chan]; + if (chan->enabled) { ipu_pre_update(prg->pres[chan->used_pre], *eba); return 0; diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 30fdd69a3a7b779d34f3f47963a45dec472e997d..f461bc1649d3e0fb41a5d4409c9c5b02737e611b 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -400,4 +400,23 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .max_power = 5448, .va_padding = SZ_64K, }, + { + .gpurev = ADRENO_REV_A680, + .core = 6, + .major = 8, + .minor = 0, + .patchid = ANY_ID, + .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU, + .sqefw_name = "a630_sqe.fw", + .zap_name = "a640_zap", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = SZ_2M, + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a640_gmu.bin", + .gpmu_major = 0x2, + .gpmu_minor = 0x000, + .gpmu_tsens = 0x000C000D, + .max_power = 5448, + }, }; diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index e2b848753e56c7fed89a641f0f5f4e0f82e5134d..952e48b53006235ae12617af711faf76e401060a 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -213,6 +213,7 @@ enum adreno_gpurev { ADRENO_REV_A615 = 615, ADRENO_REV_A630 = 630, ADRENO_REV_A640 = 640, + ADRENO_REV_A680 = 680, }; #define ADRENO_START_WARM 0 @@ -1251,6 +1252,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev) ADRENO_TARGET(a615, ADRENO_REV_A615) ADRENO_TARGET(a630, ADRENO_REV_A630) ADRENO_TARGET(a640, ADRENO_REV_A640) +ADRENO_TARGET(a680, ADRENO_REV_A680) static inline int adreno_is_a630v1(struct adreno_device *adreno_dev) { @@ -1891,10 +1893,10 @@ static inline bool adreno_has_sptprac_gdsc(struct adreno_device *adreno_dev) static inline bool adreno_has_gbif(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev) || adreno_is_a640(adreno_dev)) - return true; - else + if (!adreno_is_a6xx(adreno_dev) || adreno_is_a630(adreno_dev)) return false; + else + return true; } /** diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index e5c82220175e337052c1df130c377b9a848fba86..4f989127811831030ce7e6ebcf78b03cb902c4d6 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1332,7 +1332,7 @@ static void a3xx_protect_init(struct adreno_device *adreno_dev) iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu); if (iommu_regs) adreno_set_protected_registers(adreno_dev, &index, - iommu_regs->base, iommu_regs->range); + iommu_regs->base, ilog2(iommu_regs->range)); } static void a3xx_start(struct adreno_device *adreno_dev) diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index 771d035875a00a357a4630ca71082a18be7a3cab..432e98dbed9455efd545c2a9ebd23d4482e9be9f 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -524,7 +524,7 @@ static void a4xx_protect_init(struct adreno_device *adreno_dev) iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu); if (iommu_regs) adreno_set_protected_registers(adreno_dev, &index, - iommu_regs->base, iommu_regs->range); + iommu_regs->base, ilog2(iommu_regs->range)); } static struct adreno_snapshot_sizes a4xx_snap_sizes = { diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 41c140c5440942a15e2b414843a0fd96461160ae..5b7e7aabef3a3fb811d960b126f7449ff955a0ea 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -364,7 +364,7 @@ static void a5xx_protect_init(struct adreno_device *adreno_dev) iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu); if (iommu_regs) adreno_set_protected_registers(adreno_dev, &index, - iommu_regs->base, iommu_regs->range); + iommu_regs->base, ilog2(iommu_regs->range)); } /* diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 9fb1d8de59c5bc6524a5a62fc2599c2befde12d8..c38dff8d2046367d0df179b27879772bdefa62ab 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -68,6 +68,7 @@ static const struct adreno_vbif_platform a6xx_vbif_platforms[] = { { adreno_is_a630, a630_vbif }, { adreno_is_a615, a615_gbif }, { adreno_is_a640, a640_gbif }, + { adreno_is_a680, a640_gbif }, }; struct kgsl_hwcg_reg { @@ -253,7 +254,7 @@ static const struct kgsl_hwcg_reg a640_hwcg_regs[] = { {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, - {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, @@ -308,6 +309,7 @@ static const struct { {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}, {adreno_is_a615, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)}, {adreno_is_a640, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, + {adreno_is_a680, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, }; static struct a6xx_protected_regs { @@ -486,7 +488,7 @@ static void a6xx_protect_init(struct adreno_device *adreno_dev) if (mmu_prot) { mmu_base = mmu_prot->base; - mmu_range = 1 << mmu_prot->range; + mmu_range = mmu_prot->range; req_sets += DIV_ROUND_UP(mmu_range, 0x2000); } @@ -541,12 +543,10 @@ static void a6xx_enable_64bit(struct adreno_device *adreno_dev) static inline unsigned int __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev)) - return 0x8AA8AA82; - else if (adreno_is_a640(adreno_dev)) - return 0x8AA8AA82; - else + if (adreno_is_a630(adreno_dev)) return 0x8AA8AA02; + else + return 0x8AA8AA82; } static inline unsigned int @@ -554,8 +554,6 @@ __get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a615(adreno_dev)) return 0x00000222; - else if (adreno_is_a640(adreno_dev)) - return 0x00020202; else return 0x00020202; } @@ -565,8 +563,6 @@ __get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a615(adreno_dev)) return 0x00000111; - else if (adreno_is_a640(adreno_dev)) - return 0x00010111; else return 0x00010111; } @@ -576,8 +572,6 @@ __get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev) { if (adreno_is_a615(adreno_dev)) return 0x00000555; - else if (adreno_is_a640(adreno_dev)) - return 0x00005555; else return 0x00005555; } @@ -749,7 +743,11 @@ static void a6xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804); kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4); - kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0); + /* ROQ sizes are twice as big on a640/a680 than on a630 */ + if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); + else + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0); kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); /* Setting the mem pool size */ @@ -758,6 +756,8 @@ static void a6xx_start(struct adreno_device *adreno_dev) /* Setting the primFifo thresholds values */ if (adreno_is_a640(adreno_dev)) kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x400 << 11)); + else if (adreno_is_a680(adreno_dev)) + kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x800 << 11)); else kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11)); diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index e12a8b96258d784cf6709cef19c2689788cd60f5..d3f36cfe7c69e2324d07f7d259641672a0b74ac9 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -551,7 +551,7 @@ static int a6xx_gmu_oob_set(struct adreno_device *adreno_dev, if (!gmu_core_isenabled(device)) return 0; - if (adreno_is_a640(adreno_dev)) { + if (!adreno_is_a630(adreno_dev) && !adreno_is_a615(adreno_dev)) { set = BIT(30 - req * 2); check = BIT(31 - req); @@ -604,7 +604,7 @@ static inline void a6xx_gmu_oob_clear(struct adreno_device *adreno_dev, if (!gmu_core_isenabled(device)) return; - if (adreno_is_a640(adreno_dev)) { + if (!adreno_is_a630(adreno_dev) && !adreno_is_a615(adreno_dev)) { clear = BIT(31 - req * 2); if (req >= 6) { dev_err(&gmu->pdev->dev, @@ -626,11 +626,12 @@ static int a6xx_gmu_hfi_start_msg(struct adreno_device *adreno_dev) struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct hfi_start_cmd req; - if (!adreno_is_a640(adreno_dev)) - return 0; + if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) + return hfi_send_req(KGSL_GMU_DEVICE(device), + H2F_MSG_START, &req); + + return 0; - /* Send hfi start msg */ - return hfi_send_req(KGSL_GMU_DEVICE(device), H2F_MSG_START, &req); } #define FREQ_VOTE(idx, ack) (((idx) & 0xFF) | (((ack) & 0xF) << 28)) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 7d68c7c18902c92c4e937629a627448aba1b2b8b..45fe141133b5cec772247210cd89fc7cbd80d0a4 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -423,9 +423,9 @@ static int kgsl_mem_entry_attach_process(struct kgsl_device *device, if (entry->memdesc.gpuaddr) { if (entry->memdesc.flags & KGSL_MEMFLAGS_SPARSE_VIRT) ret = kgsl_mmu_sparse_dummy_map( - entry->memdesc.pagetable, - &entry->memdesc, 0, - entry->memdesc.size); + entry->memdesc.pagetable, + &entry->memdesc, 0, + kgsl_memdesc_footprint(&entry->memdesc)); else if (entry->memdesc.gpuaddr) ret = kgsl_mmu_map(entry->memdesc.pagetable, &entry->memdesc); @@ -4349,6 +4349,8 @@ static unsigned long _get_svm_area(struct kgsl_process_private *private, else align = SZ_4K; + align = max_t(uint64_t, align, entry->memdesc.pad_to); + /* get the GPU pagetable's SVM range */ if (kgsl_mmu_svm_range(private->pagetable, &start, &end, entry->memdesc.flags)) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 3a5122953d0bd40404119335ed23841f53cad276..c27aa2dcd3680d5cae1f205e3146448f5d55a74e 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -593,7 +593,7 @@ static int gmu_memory_probe(struct kgsl_device *device, /* * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth. - * @gmu: Pointer to GMU device + * @device: Pointer to the device * @gpu_pwrlevel: index to GPU DCVS table used by KGSL * @bus_level: index to GPU bus table used by KGSL * @@ -618,9 +618,10 @@ static int gmu_dcvs_set(struct kgsl_device *device, if (bus_level < gmu->num_bwlevels && bus_level > 0) req.bw = bus_level; + /* GMU will vote for slumber levels through the sleep sequence */ if ((req.freq == INVALID_DCVS_IDX) && (req.bw == INVALID_DCVS_IDX)) - return -EINVAL; + return 0; if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) { int ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, @@ -638,6 +639,9 @@ static int gmu_dcvs_set(struct kgsl_device *device, return ret; } + if (!test_bit(GMU_HFI_ON, &gmu->flags)) + return 0; + return hfi_send_req(gmu, H2F_MSG_GX_BW_PERF_VOTE, &req); } @@ -1415,6 +1419,7 @@ static int gmu_probe(struct kgsl_device *device, tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu); INIT_LIST_HEAD(&hfi->msglist); spin_lock_init(&hfi->msglock); + hfi->kgsldev = device; /* Retrieves GMU/GPU power level configurations*/ ret = gmu_pwrlevel_probe(gmu, node); @@ -1975,4 +1980,5 @@ struct gmu_core_ops gmu_ops = { .dcvs_set = gmu_dcvs_set, .snapshot = gmu_snapshot, .regulator_isenabled = gmu_regulator_isenabled, + .suspend = gmu_suspend, }; diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 3a60a53dc3e2d787a42ff9699ae875baf82ee24a..37a9d3178e8eadda06b5e2ac113cc80fbe69a6bf 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -123,6 +123,16 @@ void gmu_core_stop(struct kgsl_device *device) gmu_core_ops->stop(device); } +int gmu_core_suspend(struct kgsl_device *device) +{ + struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); + + if (gmu_core_ops && gmu_core_ops->suspend) + return gmu_core_ops->suspend(device); + + return -EINVAL; +} + void gmu_core_snapshot(struct kgsl_device *device) { struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index 40a9eddb604fcfd353f756ce7feb2c56687eaa45..9d8b65818af8a2a5131a425d31ba020663bdc137 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -118,6 +118,7 @@ struct gmu_core_ops { int (*get_idle_level)(struct kgsl_device *device); void (*set_idle_level)(struct kgsl_device *device, unsigned int val); bool (*regulator_isenabled)(struct kgsl_device *device); + int (*suspend)(struct kgsl_device *device); }; struct gmu_dev_ops { @@ -165,6 +166,7 @@ int gmu_core_probe(struct kgsl_device *device); void gmu_core_remove(struct kgsl_device *device); int gmu_core_start(struct kgsl_device *device); void gmu_core_stop(struct kgsl_device *device); +int gmu_core_suspend(struct kgsl_device *device); void gmu_core_snapshot(struct kgsl_device *device); bool gmu_core_gpmu_isenabled(struct kgsl_device *device); bool gmu_core_isenabled(struct kgsl_device *device); diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index 69bb16c1db331cdca6f375b6c01e3a4dfb620c8e..bec11d672aa2767b95082314d67fabfeef077fd3 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -105,7 +105,6 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, uint32_t *msg) { - struct kgsl_device *device = kgsl_get_device(KGSL_DEVICE_3D0); struct hfi_queue_table *tbl = gmu->hfi_mem->hostptr; struct hfi_queue_header *hdr = &tbl->qhdr[queue_idx]; uint32_t *queue; @@ -168,7 +167,7 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, wmb(); /* Send interrupt to GMU to receive the message */ - adreno_write_gmureg(ADRENO_DEVICE(device), + adreno_write_gmureg(ADRENO_DEVICE(hfi->kgsldev), ADRENO_REG_GMU_HOST2GMU_INTR_SET, 0x1); return 0; @@ -610,7 +609,7 @@ int hfi_start(struct kgsl_device *device, if (test_bit(GMU_HFI_ON, &gmu->flags)) return 0; - if (!adreno_is_a640(adreno_dev)) { + if (!adreno_is_a640(adreno_dev) && !adreno_is_a680(adreno_dev)) { result = hfi_send_gmu_init(gmu, boot_state); if (result) return result; @@ -629,10 +628,12 @@ int hfi_start(struct kgsl_device *device, return result; /* - * Send H2F_MSG_CORE_FW_START and features for A640 devices, - * otherwise send H2F_MSG_TEST if quirk is enabled. + * If quirk is enabled send H2F_MSG_TEST and tell the GMU + * we are sending no more HFIs until the next boot otherwise + * send H2F_MSG_CORE_FW_START and features for A640 devices */ - if (adreno_is_a640(adreno_dev)) { + + if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { result = hfi_send_feature_ctrls(gmu); if (result) return result; @@ -641,17 +642,12 @@ int hfi_start(struct kgsl_device *device, if (result) return result; } else { - /* - * Tell the GMU we are sending no more HFIs - * until the next boot - */ if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) { result = hfi_send_test(gmu); if (result) return result; } } - set_bit(GMU_HFI_ON, &gmu->flags); return 0; } diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index d1100e05ebaa4d9f5e2fc76074d9f637a902b47a..372b45a3cf9d369ec5c901677a9513747c51c90e 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -590,6 +590,7 @@ struct pending_cmd { /** * struct kgsl_hfi - HFI control structure + * @kgsldev: Point to the kgsl device * @hfi_interrupt_num: number of GMU asserted HFI interrupt * @msglock: spinlock to protect access to outstanding command message list * @cmdq_mutex: mutex to protect command queue access from multiple senders @@ -601,6 +602,7 @@ struct pending_cmd { * value of the counter is used as sequence number for HFI message */ struct kgsl_hfi { + struct kgsl_device *kgsldev; int hfi_interrupt_num; spinlock_t msglock; struct mutex cmdq_mutex; diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 439e3d24a32dc0f2289fc4949cf05c4e0677bf3e..5566d3fffefc53a9b12dc2f6f16721592ef1d011 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1869,9 +1869,10 @@ static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt, struct page **pages = NULL; struct sg_table sgt; int count = size >> PAGE_SHIFT; + unsigned int map_flags; /* verify the offset is within our range */ - if (size + offset > memdesc->size) + if (size + offset > kgsl_memdesc_footprint(memdesc)) return -EINVAL; if (kgsl_dummy_page == NULL) { @@ -1881,6 +1882,10 @@ static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt, return -ENOMEM; } + map_flags = MMU_FEATURE(pt->mmu, KGSL_MMU_PAD_VA) ? + _get_protection_flags(pt, memdesc) : + IOMMU_READ | IOMMU_NOEXEC; + pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); if (pages == NULL) return -ENOMEM; @@ -1892,7 +1897,7 @@ static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt, 0, size, GFP_KERNEL); if (ret == 0) { ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset, - sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC); + sgt.sgl, sgt.nents, map_flags); sg_free_table(&sgt); } @@ -2472,7 +2477,8 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable, size = kgsl_memdesc_footprint(memdesc); - align = 1 << kgsl_memdesc_get_align(memdesc); + align = max_t(uint64_t, 1 << kgsl_memdesc_get_align(memdesc), + memdesc->pad_to); if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) { start = pt->compat_va_start; @@ -2661,7 +2667,7 @@ static int _kgsl_iommu_probe(struct kgsl_device *device, return -EINVAL; } iommu->protect.base = reg_val[0] / sizeof(u32); - iommu->protect.range = ilog2(reg_val[1] / sizeof(u32)); + iommu->protect.range = reg_val[1] / sizeof(u32); of_property_for_each_string(node, "clock-names", prop, cname) { struct clk *c = devm_clk_get(&pdev->dev, cname); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index fd53cc13e7368b11ecde4679fbc85c8a6af4b004..72722618941c96bdb1b7b6cb9a2209741ad7c34e 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -27,6 +27,7 @@ #include "kgsl_pwrscale.h" #include "kgsl_device.h" #include "kgsl_trace.h" +#include "kgsl_gmu_core.h" #define KGSL_PWRFLAGS_POWER_ON 0 #define KGSL_PWRFLAGS_CLK_ON 1 @@ -226,12 +227,10 @@ static int kgsl_bus_scale_request(struct kgsl_device *device, /* GMU scales BW */ if (gmu_core_gpmu_isenabled(device)) - return 0; - - if (pwr->pcl) { + ret = gmu_core_dcvs_set(device, INVALID_DCVS_IDX, buslevel); + else if (pwr->pcl) /* Linux bus driver scales BW */ ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel); - } if (ret) KGSL_PWR_ERR(device, "GPU BW scaling failure: %d\n", ret); @@ -2811,6 +2810,24 @@ _aware(struct kgsl_device *device) WARN_ONCE(1, "Failed to recover GMU\n"); if (device->snapshot) device->snapshot->recovered = false; + /* + * On recovery failure, we are clearing + * GMU_FAULT bit and also not keeping + * the state as RESET to make sure any + * attempt to wake GMU/GPU after this + * is treated as a fresh start. But on + * recovery failure, GMU HS, clocks and + * IRQs are still ON/enabled because of + * which next GMU/GPU wakeup results in + * multiple warnings from GMU start as HS, + * clocks and IRQ were ON while doing a + * fresh start i.e. wake from SLUMBER. + * + * Suspend the GMU on recovery failure + * to make sure next attempt to wake up + * GMU/GPU is indeed a fresh start. + */ + gmu_core_suspend(device); kgsl_pwrctrl_set_state(device, state); } else { if (device->snapshot) diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index 6811363da4db8d62ad7d28085d8bc343e4799c04..01f72d78408ea75171ae185c0a613c6b78156242 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -135,7 +135,6 @@ kgsl_memdesc_get_memtype(const struct kgsl_memdesc *memdesc) static inline int kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align) { - align = max_t(unsigned int, align, ilog2(memdesc->pad_to)); if (align > 32) align = 32; diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h index 7ea68c0e8803e422573716bd22265a923e7113ee..03f501f764c802c6ce36e64e1a06714d9d72c868 100644 --- a/drivers/gpu/msm/kgsl_sync.h +++ b/drivers/gpu/msm/kgsl_sync.h @@ -137,8 +137,8 @@ static inline void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline) } -struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, - void (*func)(void *priv), void *priv, +static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd, + bool (*func)(void *priv), void *priv, char *fence_name, int name_len) { return NULL; @@ -188,7 +188,7 @@ static inline void kgsl_syncsource_cleanup(struct kgsl_process_private *private, } -void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event, +static inline void kgsl_dump_fence(struct kgsl_drawobj_sync_event *event, char *fence_str, int len) { } diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index c219e43b8f026faa69e4617cecc2b31e7449a064..f5f3f8cf57ea66d1a95bd4511d767611fb0fb338 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev) duty_is_dc = data->REG_PWM_MODE[i] && (nct6775_read_value(data, data->REG_PWM_MODE[i]) & data->PWM_MODE_MASK[i]); - data->pwm_mode[i] = duty_is_dc; + data->pwm_mode[i] = !duty_is_dc; fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]); for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) { @@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) struct nct6775_data *data = nct6775_update_device(dev); struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr); - return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]); + return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]); } static ssize_t @@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, if (val > 1) return -EINVAL; - /* Setting DC mode is not supported for all chips/channels */ + /* Setting DC mode (0) is not supported for all chips/channels */ if (data->REG_PWM_MODE[nr] == 0) { - if (val) + if (!val) return -EINVAL; return count; } @@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, data->pwm_mode[nr] = val; reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]); reg &= ~data->PWM_MODE_MASK[nr]; - if (val) + if (!val) reg |= data->PWM_MODE_MASK[nr]; nct6775_write_value(data, data->REG_PWM_MODE[nr], reg); mutex_unlock(&data->update_lock); diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 00d6995af4c23c50ea3e0de261bc641bd2ba88ee..8a44e94d567955ee5d3c5f02ecb2afc53a58d88c 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg) const struct adm1275_data *data = to_adm1275_data(info); int ret = 0; - if (page) + if (page > 0) return -ENXIO; switch (reg) { @@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg, const struct adm1275_data *data = to_adm1275_data(info); int ret; - if (page) + if (page > 0) return -ENXIO; switch (reg) { diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c index dd4883a19045601fcccac2cebea02d987a728e02..e951f9b87abb0cd4caeb854bd0936f6175657e30 100644 --- a/drivers/hwmon/pmbus/max8688.c +++ b/drivers/hwmon/pmbus/max8688.c @@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg) { int ret; - if (page) + if (page > 0) return -ENXIO; switch (reg) { diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 6ea62c62ff27123ed21e5119deceab6d0afa0e7d..9cdb3fbc8c1f345c27ab5c6afe2e4f021757a865 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata) } pc = debug_adjust_pc(drvdata); - dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc); + dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc); if (drvdata->edcidsr_present) dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr); diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 56ecd7aff5eb9542d70300d8bcfb153b8aa0b2da..87e407482c7553bcebd4cd8921ce51307394376e 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2012,2018 The Linux Foundation. All rights reserved. * * Description: CoreSight Embedded Trace Buffer driver * @@ -686,7 +686,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) spin_lock_init(&drvdata->spinlock); drvdata->buffer_depth = etb_get_buffer_depth(drvdata); - pm_runtime_put(&adev->dev); if (drvdata->buffer_depth & 0x80000000) return -EINVAL; @@ -712,6 +711,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) ret = misc_register(&drvdata->miscdev); if (ret) goto err_misc_register; + pm_runtime_put(&adev->dev); return 0; diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c index 07ba40862b4a9c090541cf48a561a965d2f0a9b8..943e5c4ea855dd09065a1e967d2d2c3a7c0e954f 100644 --- a/drivers/hwtracing/coresight/coresight-ost.c +++ b/drivers/hwtracing/coresight/coresight-ost.c @@ -281,14 +281,13 @@ EXPORT_SYMBOL(stm_ost_packet); int stm_set_ost_params(struct stm_drvdata *drvdata, size_t bitmap_size) { - stmdrvdata = drvdata; - drvdata->chs.bitmap = devm_kzalloc(drvdata->dev, bitmap_size, GFP_KERNEL); if (!drvdata->chs.bitmap) return -ENOMEM; bitmap_fill(drvdata->entities, OST_ENTITY_MAX); + stmdrvdata = drvdata; return 0; } diff --git a/drivers/hwtracing/coresight/coresight-remote-etm.c b/drivers/hwtracing/coresight/coresight-remote-etm.c index f096ccf4481d7c4bd80b279d8308a85e4d2febcd..99ea94bd1e947f91a5580e3e097704ab3edcb363 100644 --- a/drivers/hwtracing/coresight/coresight-remote-etm.c +++ b/drivers/hwtracing/coresight/coresight-remote-etm.c @@ -95,6 +95,7 @@ static int remote_etm_enable(struct coresight_device *csdev, if (!drvdata->service_connected) { dev_err(drvdata->dev, "QMI service not connected!\n"); + ret = EINVAL; goto err; } /* diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index bc4287ad48b0545ddc7a850cc0b19df9eaf6c8c5..9b0525c272380037338c7d4bb40597928e469170 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -300,7 +300,7 @@ static void stm_generic_unlink(struct stm_data *stm_data, if (!drvdata || !drvdata->csdev) return; /* If any OST entity is enabled do not disable the device */ - if (drvdata->entities == NULL) + if (!bitmap_empty(drvdata->entities, OST_ENTITY_MAX)) return; coresight_disable(drvdata->csdev); } @@ -870,11 +870,6 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) BYTES_PER_CHANNEL), resource_size(res)); } bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); - /* Store the driver data pointer for use in exported functions */ - ret = stm_set_ost_params(drvdata, bitmap_size); - if (ret) - return ret; - guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); if (!guaranteed) @@ -904,6 +899,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) goto stm_unregister; } + /* Store the driver data pointer for use in exported functions */ + ret = stm_set_ost_params(drvdata, bitmap_size); + if (ret) + goto stm_unregister; + pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized with master %s\n", (char *)id->data, diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index b308b965dcfc1bc49be095ca36554fff149736f0..74b31597ec5ad09a31cb5836e233f6f0c467e965 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -708,8 +708,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; } - pm_runtime_put(&adev->dev); - ret = tmc_iommu_init(drvdata); if (ret) { dev_err(dev, "TMC SMMU init failed, err =%d\n", ret); @@ -792,6 +790,10 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) tmc_iommu_deinit(drvdata); coresight_unregister(drvdata->csdev); } + + if (!ret) + pm_runtime_put(&adev->dev); + return ret; out_iommu_deinit: diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c index 5d2d0879bb5578e9c490b3a89184993d844b7f53..261b8c8fc15739d39076a9e8048481465c0578e7 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.c +++ b/drivers/hwtracing/coresight/coresight-tpda.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -678,8 +678,6 @@ static int tpda_probe(struct amba_device *adev, const struct amba_id *id) if (!coresight_authstatus_enabled(drvdata->base)) goto err; - pm_runtime_put(&adev->dev); - tpda_init_default_data(drvdata); desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); @@ -695,6 +693,8 @@ static int tpda_probe(struct amba_device *adev, const struct amba_id *id) if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); + pm_runtime_put(&adev->dev); + dev_dbg(drvdata->dev, "TPDA initialized\n"); return 0; err: diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index 8458f27df8e5c4b5207362a56875f8ab900e6437..071826a1270904fa94d9c73012208a67b5c74c7a 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -4345,7 +4345,6 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1; drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1; - pm_runtime_put(&adev->dev); tpdm_setup_disable(drvdata); drvdata->traceid = traceid++; @@ -4368,6 +4367,8 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) if (boot_enable) coresight_enable(drvdata->csdev); + pm_runtime_put(&adev->dev); + return 0; } diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 1a023e30488c66d4e7e58367a58a02252e6e63b0..c1793313bb0873fea10e263062f6e580d34c7670 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -935,7 +935,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master) { - struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); if (!hubdrv->set_output) diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index dfb57eaa9f22a94d5e4a3454649f33f64f6a3f1c..58ac786634dccfce1ce27c9fec51b6a6c76964e6 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -741,8 +741,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, - win->block[i].addr); + dma_free_coherent(msc_dev(msc)->parent->parent, size, + win->block[i].bdesc, win->block[i].addr); } kfree(win); @@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win) /* Reset the page to write-back before releasing */ set_memory_wb((unsigned long)win->block[i].bdesc, 1); #endif - dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, + dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, win->block[i].bdesc, win->block[i].addr); } diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 3db362b88a7a24cb165d42db8e5c920de1eff94b..fedbaa9733cb1ad92c23a5665403bc433e3ae794 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "stm.h" #include @@ -685,7 +686,7 @@ static void stm_device_release(struct device *dev) { struct stm_device *stm = to_stm_device(dev); - kfree(stm); + vfree(stm); } int stm_register_device(struct device *parent, struct stm_data *stm_data, @@ -702,7 +703,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -EINVAL; nmasters = stm_data->sw_end - stm_data->sw_start + 1; - stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); + stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *)); if (!stm) return -ENOMEM; @@ -755,7 +756,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, /* matches device_initialize() above */ put_device(&stm->dev); err_free: - kfree(stm); + vfree(stm); return err; } diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 13e849bf9aa0c233128de4601b54a3139280afad..4915fa303a7e2a6bbb0671fa250f76831ba364a9 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -207,7 +207,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) i2c_dw_disable_int(dev); /* Enable the adapter */ - __i2c_dw_enable_and_wait(dev, true); + __i2c_dw_enable(dev, true); + + /* Dummy read to avoid the register getting stuck on Bay Trail */ + dw_readl(dev, DW_IC_ENABLE_STATUS); /* Clear and enable interrupts */ dw_readl(dev, DW_IC_CLR_INTR); diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index a832c45276a42008a0b1a9ebeac00ffb8144a519..b0fb97823d6a4006068d6105ecc7d83329575255 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -844,12 +844,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, */ if (of_device_is_compatible(np, "marvell,mv78230-i2c")) { drv_data->offload_enabled = true; - drv_data->errata_delay = true; + /* The delay is only needed in standard mode (100kHz) */ + if (bus_freq <= 100000) + drv_data->errata_delay = true; } if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { drv_data->offload_enabled = false; - drv_data->errata_delay = true; + /* The delay is only needed in standard mode (100kHz) */ + if (bus_freq <= 100000) + drv_data->errata_delay = true; } if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 6ff0be8cbdc980e1671e6a6bcd4faaf5537f0014..4de45db76756c6edd4828041a84c3b0bf98415e8 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1614,6 +1614,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) struct cdrom_info *info; int rc = -ENXIO; + check_disk_change(bdev); + mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 1c06d467d3adeaf33e0c506374a3af183e96399e..78b524735f923a5d45b97b43ba403cd155d77927 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC tristate "Atmel AT91 SAMA5D2 ADC" depends on ARCH_AT91 || COMPILE_TEST depends on HAS_IOMEM + select IIO_BUFFER select IIO_TRIGGERED_BUFFER help Say yes here to build support for Atmel SAMA5D2 ADC which is diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 47c3d7f329004d577f865d261702622149d2cc46..07246a6037e319d863b4ed35853bc6098f50e0f7 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 33, 0, 17, 16, 12, 10, 8, 6, 4}; -static ssize_t ad7793_read_frequency(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - - return sprintf(buf, "%d\n", - st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]); -} - -static ssize_t ad7793_write_frequency(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7793_state *st = iio_priv(indio_dev); - long lval; - int i, ret; - - ret = kstrtol(buf, 10, &lval); - if (ret) - return ret; - - if (lval == 0) - return -EINVAL; - - for (i = 0; i < 16; i++) - if (lval == st->chip_info->sample_freq_avail[i]) - break; - if (i == 16) - return -EINVAL; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - st->mode &= ~AD7793_MODE_RATE(-1); - st->mode |= AD7793_MODE_RATE(i); - ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode); - iio_device_release_direct_mode(indio_dev); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7793_read_frequency, - ad7793_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); @@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, NULL @@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = { }; static struct attribute *ad7797_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, NULL }; @@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val -= offset; } return IIO_VAL_INT; + case IIO_CHAN_INFO_SAMP_FREQ: + *val = st->chip_info + ->sample_freq_avail[AD7793_MODE_RATE(st->mode)]; + return IIO_VAL_INT; } return -EINVAL; } @@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, break; } break; + case IIO_CHAN_INFO_SAMP_FREQ: + if (!val) { + ret = -EINVAL; + break; + } + + for (i = 0; i < 16; i++) + if (val == st->chip_info->sample_freq_avail[i]) + break; + + if (i == 16) { + ret = -EINVAL; + break; + } + + st->mode &= ~AD7793_MODE_RATE(-1); + st->mode |= AD7793_MODE_RATE(i); + ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), + st->mode); + break; default: ret = -EINVAL; } diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c index 4de574f61570753641f16d12ae71efe21aba48cc..462320bf5a9e9e3afba63dbe10b85b0ee5ed8b65 100644 --- a/drivers/iio/adc/qcom-spmi-adc5.c +++ b/drivers/iio/adc/qcom-spmi-adc5.c @@ -654,6 +654,12 @@ static int adc_get_dt_channel_data(struct device *dev, else prop->cal_method = ADC_ABSOLUTE_CAL; + /* + * Default to using timer calibration. Using a fresh calibration value + * for every conversion will increase the overall time for a request. + */ + prop->cal_val = ADC_TIMER_CAL; + dev_dbg(dev, "%02x name %s\n", chan, name); return 0; diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index ff03324dee132f49455fd156bacbc9b450146a19..0a7289571b6809afd4425fb87df3acc974586f1f 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum); * Should be used as the set_length callback for iio_buffer_access_ops * struct for DMA buffers. */ -int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) +int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c index 047fe757ab97d6075bc920511a187dde78450b75..70c302a93d7fd3d7f8b884840c487bbcb7d2c532 100644 --- a/drivers/iio/buffer/kfifo_buf.c +++ b/drivers/iio/buffer/kfifo_buf.c @@ -22,11 +22,18 @@ struct iio_kfifo { #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, - int bytes_per_datum, int length) + size_t bytes_per_datum, unsigned int length) { if ((length == 0) || (bytes_per_datum == 0)) return -EINVAL; + /* + * Make sure we don't overflow an unsigned int after kfifo rounds up to + * the next power of 2. + */ + if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum) + return -EINVAL; + return __kfifo_alloc((struct __kfifo *)&buf->kf, length, bytes_per_datum, GFP_KERNEL); } @@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd) return 0; } -static int iio_set_length_kfifo(struct iio_buffer *r, int length) +static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length) { /* Avoid an invalid state */ if (length < 2) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 77515638c55cc1449ad09b5572bcd0813c58955e..896cfd9303b0d1e94645c40127fc99e2bf4f4e59 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, return -EINVAL; if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) - return -EAGAIN; + return -EINVAL; memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); if (attr) { diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 45f2f095f793a8f305b77de183524e3c510c9274..4eb72ff539fc94fa755286a175ceacfaa01f5cbc 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, { int ret; u16 gid_index; - u8 p; - - if (rdma_protocol_roce(device, port_num)) { - ret = ib_find_cached_gid_by_port(device, &rec->port_gid, - gid_type, port_num, - ndev, - &gid_index); - } else if (rdma_protocol_ib(device, port_num)) { - ret = ib_find_cached_gid(device, &rec->port_gid, - IB_GID_TYPE_IB, NULL, &p, - &gid_index); - } else { - ret = -EINVAL; - } + /* GID table is not based on the netdevice for IB link layer, + * so ignore ndev during search. + */ + if (rdma_protocol_ib(device, port_num)) + ndev = NULL; + else if (!rdma_protocol_roce(device, port_num)) + return -EINVAL; + + ret = ib_find_cached_gid_by_port(device, &rec->port_gid, + gid_type, port_num, + ndev, + &gid_index); if (ret) return ret; diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 9cb801d1fe5432c918d1dba398b30ca58e8902f1..1984d6cee3e0c886b53939beeeb420fd817d742a 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -486,12 +486,13 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) ret = uobject->type->type_class->remove_commit(uobject, RDMA_REMOVE_DESTROY); if (ret) - return ret; + goto out; uobject->type = &null_obj_type; +out: up_read(&ucontext->cleanup_rwsem); - return 0; + return ret; } static void alloc_commit_idr_uobject(struct ib_uobject *uobj) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index ab5e1024fea91ae5ae5b660af1899000775fce5e..b81d2597f563a59032b05c38292180b453ba4623 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, resolved_dev = dev_get_by_index(dev_addr.net, dev_addr.bound_dev_if); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); + if (!resolved_dev) { + dev_put(idev); + return -ENODEV; } ndev = ib_get_ndev_from_path(rec); rcu_read_lock(); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index c65f0e8ecbd659df0a937cdc89161857aaa35aa8..e47baf0950e3dbe2039b5e0cf9d79e585839af53 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1315,7 +1315,7 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) return -EINVAL; optval = memdup_user((void __user *) (unsigned long) cmd.optval, diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 9a4e899d94b30a843e54f3a06e975ad632428a26..2b6c9b5160705a95d779b22aec4292904ec3b040 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->length = size; umem->address = addr; umem->page_shift = PAGE_SHIFT; - umem->pid = get_task_pid(current, PIDTYPE_PID); /* * We ask for writable memory if any of the following * access flags are set. "Local write" and "remote write" @@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); if (access & IB_ACCESS_ON_DEMAND) { - put_pid(umem->pid); ret = ib_umem_odp_get(context, umem, access); if (ret) { kfree(umem); @@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, page_list = (struct page **) __get_free_page(GFP_KERNEL); if (!page_list) { - put_pid(umem->pid); kfree(umem); return ERR_PTR(-ENOMEM); } @@ -231,7 +228,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, if (ret < 0) { if (need_release) __ib_umem_release(context->device, umem, 0); - put_pid(umem->pid); kfree(umem); } else current->mm->pinned_vm = locked; @@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem) __ib_umem_release(umem->context->device, umem, 1); - task = get_pid_task(umem->pid, PIDTYPE_PID); - put_pid(umem->pid); + task = get_pid_task(umem->context->tgid, PIDTYPE_PID); if (!task) goto out; mm = get_task_mm(task); diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 8f2dc79ad4ecc09fc42e9e832a10f2de14d8ef1d..5e9f72ea45790a96dee57d7eb2005f8717b3da45 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, return 0; } + if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) + return -EINVAL; + spec = &attr_spec_bucket->attrs[attr_id]; e = &elements[attr_id]; e->uattr = uattr_ptr; diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 76ddb65645782157fd4fc56dac28efab429cf050..48a99dce976cd81cbd4ea9e2687b2441e29ccc6e 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c @@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, short min = SHRT_MAX; const void *elem; int i, j, last_stored = -1; + unsigned int equal_min = 0; for_each_element(elem, i, j, elements, num_elements, num_offset, data_offset) { @@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, */ iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; last_stored = i; + if (min == GET_ID(id)) + equal_min++; + else + equal_min = 1; min = GET_ID(id); } @@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, * Therefore, we need to clean the beginning of the array to make sure * all ids of final elements are equal to min. */ - for (i = num_iters - 1; i >= 0 && - GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) - ; - - num_iters -= i + 1; - memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); + memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min); *min_id = min; - return num_iters; + return equal_min; } #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ @@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me hash = kzalloc(sizeof(*hash) + ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), sizeof(long)) + - BITS_TO_LONGS(attr_max_bucket) * sizeof(long), + BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long), GFP_KERNEL); if (!hash) { res = -ENOMEM; @@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ * first handler which != NULL. This also defines the * set of flags used for this handler. */ - for (i = num_object_defs - 1; + for (i = num_method_defs - 1; i >= 0 && !method_defs[i]->handler; i--) ; hash->methods[min_id++] = method; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index b210495ff33c043b0c734a18931e00e81ecdc297..ef9135aa392c11026f1fdbfa747181792865ca95 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1180,7 +1180,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); - goto fail; + goto free_umem; } } @@ -1208,6 +1208,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, return &qp->ib_qp; qp_destroy: bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); +free_umem: + if (udata) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } fail: kfree(qp); return ERR_PTR(rc); @@ -1956,10 +1963,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + /* Need unconditional fence for local invalidate + * opcode to work as expected. + */ + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; - if (wr->send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -1980,8 +1990,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - if (wr->wr.send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + /* Need unconditional fence for reg_mr + * opcode to function as expected. + */ + + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index e7450ea92aa9e11ba0d28792f54fcf373e470a66..bf811b23bc95334dd6e4d7314791bbf89ea31940 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1240,9 +1240,12 @@ static void bnxt_re_task(struct work_struct *work) switch (re_work->event) { case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register with IB: %#x", rc); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } break; case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, @@ -1398,6 +1401,11 @@ static void __exit bnxt_re_mod_exit(void) list_for_each_entry(rdev, &to_be_deleted, list) { dev_info(rdev_to_dev(rdev), "Unregistering Device"); + /* + * Flush out any scheduled tasks before destroying the + * resources + */ + flush_workqueue(bnxt_re_wq); bnxt_re_dev_stop(rdev); bnxt_re_ib_unreg(rdev, true); bnxt_re_remove_one(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 2bdb1562bd2197e850f14bcc353d6ee12c3271c4..8d91733009a474dbfa4ba1899feb0cb7187fd2c7 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -457,7 +457,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); /* * VFs need not setup the HW context area, PF * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 85b16da287f99edfee396fcdaa11ccef1c7c8d3b..7c85e3c4445b7a37cb8c484087a988f123783867 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@ #define RCFW_COMM_SIZE 0x104 #define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ do { \ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index e277e54a05eb6523caeb4c26c644abd18b794b67..9536de8c5fb8b3d977b78a43af180b6d4b035b8d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -130,7 +130,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_inline_data = le32_to_cpu(sb->max_inline_data); - attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = le32_to_cpu(sb->max_gid); strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver)); diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index eeb55b2db57e3e4ded1173713a761b683d2ea6b2..480f592e5b4b75f01392ba99dbd78a53d409dcef 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1734,7 +1734,30 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - __le16 reserved16; + /* This value is (log-base-2-of-DBR-page-size - 12). + * 0 for 4KB. HW supported values are enumerated below. + */ + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M __le64 qpc_page_dir; __le64 mrw_page_dir; __le64 srq_page_dir; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 82114ba86041713de97d1611a8af09e9d143225b..2595622826688d831f52c1ff3ce858619e3bd6b1 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -5945,6 +5945,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, u64 status; u32 sw_index; int i = 0; + unsigned long irq_flags; sw_index = dd->hw_to_sw[hw_context]; if (sw_index >= dd->num_send_contexts) { @@ -5954,10 +5955,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, return; } sci = &dd->send_contexts[sw_index]; + spin_lock_irqsave(&dd->sc_lock, irq_flags); sc = sci->sc; if (!sc) { dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, sw_index, hw_context); + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); return; } @@ -5979,6 +5982,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, */ if (sc->type != SC_USER) queue_work(dd->pport->hfi1_wq, &sc->halt_work); + spin_unlock_irqrestore(&dd->sc_lock, irq_flags); /* * Update the counters for the corresponding status bits. diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index cab796341697953eb6a3599c70f24f05ac768a5b..d92f639c287f22b0fbd9f0627b830fed0aca116c 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -597,6 +597,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct wc->dlid_path_bits = 0; if (is_eth) { + wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -845,7 +846,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, } } - wc->slid = be16_to_cpu(cqe->rlid); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -854,6 +854,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { + wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -865,6 +866,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { + wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8c681a36e6c7fbfc2cb4d788628a05b976e5e300..e2beb182d54c313a1fb4ea1f1778543441408870 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; - else - memset(&gid_tbl[i].gid, 0, 12); } } @@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, if (!gids) { ret = -ENOMEM; } else { - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { + memcpy(&gids[i].gid, + &port_gid_table->gids[i].gid, + sizeof(union ib_gid)); + gids[i].gid_type = + port_gid_table->gids[i].gid_type; + } } } spin_unlock_bh(&iboe->lock); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index faedc080a5e6fdcd11d0295ec21fce6d0d10c2f2..d804880d637a418ced9c798d025708cebd8ad7b5 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -224,7 +224,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -239,10 +238,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index fb5302ee57c755c207ebda2097c71c4bee892c3a..ab70194a73db3745a21cb5bb5d30114910486b98 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -270,6 +270,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, if (err) return err; + props->active_width = IB_WIDTH_4X; + props->active_speed = IB_SPEED_QDR; + translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 749fe906a5b615d11750eafc92c30acfdc4808dd..ef9ee6c328a1d6c06322f095fb89426967ef80a5 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2881,8 +2881,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 97d033f51dc90c4e727cf0a9df2fda174c540a28..ddb05b42e5e6ad89a138f4dffb824f1991558b3a 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -782,7 +782,8 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); if (!dev->num_cnq) { - DP_ERR(dev, "not enough CNQ resources.\n"); + DP_ERR(dev, "Failed. At least one CNQ is required.\n"); + rc = -ENOMEM; goto init_err; } diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 769ac07c3c8eb72d3bd2bf95446236f9c07dc34b..7f4cc9336442f9126f360c414b354885af2fb719 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1663,14 +1663,15 @@ static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) static int qedr_update_qp_state(struct qedr_dev *dev, struct qedr_qp *qp, + enum qed_roce_qp_state cur_state, enum qed_roce_qp_state new_state) { int status = 0; - if (new_state == qp->state) + if (new_state == cur_state) return 0; - switch (qp->state) { + switch (cur_state) { case QED_ROCE_QP_STATE_RESET: switch (new_state) { case QED_ROCE_QP_STATE_INIT: @@ -1774,6 +1775,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); enum ib_qp_state old_qp_state, new_qp_state; + enum qed_roce_qp_state cur_state; int rc = 0; DP_DEBUG(dev, QEDR_MSG_QP, @@ -1903,18 +1905,23 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); - qp_params.ack_timeout = attr->timeout; - if (attr->timeout) { - u32 temp; - - temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; - /* FW requires [msec] */ - qp_params.ack_timeout = temp; - } else { - /* Infinite */ + /* The received timeout value is an exponent used like this: + * "12.7.34 LOCAL ACK TIMEOUT + * Value representing the transport (ACK) timeout for use by + * the remote, expressed as: 4.096 * 2^timeout [usec]" + * The FW expects timeout in msec so we need to divide the usec + * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2, + * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8). + * The value of zero means infinite so we use a 'max_t' to make + * sure that sub 1 msec values will be configured as 1 msec. + */ + if (attr->timeout) + qp_params.ack_timeout = + 1 << max_t(int, attr->timeout - 8, 0); + else qp_params.ack_timeout = 0; - } } + if (attr_mask & IB_QP_RETRY_CNT) { SET_FIELD(qp_params.modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); @@ -1987,13 +1994,25 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->dest_qp_num = attr->dest_qp_num; } + cur_state = qp->state; + + /* Update the QP state before the actual ramrod to prevent a race with + * fast path. Modifying the QP state to error will cause the device to + * flush the CQEs and while polling the flushed CQEs will considered as + * a potential issue if the QP isn't in error state. + */ + if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI && + !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR) + qp->state = QED_ROCE_QP_STATE_ERR; + if (qp->qp_type != IB_QPT_GSI) rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); if (attr_mask & IB_QP_STATE) { if ((qp->qp_type != IB_QPT_GSI) && (!udata)) - rc = qedr_update_qp_state(dev, qp, qp_params.new_state); + rc = qedr_update_qp_state(dev, qp, cur_state, + qp_params.new_state); qp->state = qp_params.new_state; } @@ -2832,6 +2851,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; @@ -2873,6 +2897,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; @@ -3518,7 +3547,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); - union rdma_cqe *cqe = cq->latest_cqe; + union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; @@ -3535,6 +3564,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); + cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 906bacf365d4170b57a080901b590e4b2e168d29..1cbf4e407afafc692aece425a4fc0b3101f0c264 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1206,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe) rxe->ndev->dev_addr); dev->dev.dma_ops = &dma_virt_ops; dma_coerce_mask_and_coherent(&dev->dev, - dma_get_required_mask(dev->dev.parent)); + dma_get_required_mask(&dev->dev)); dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 29663bc1f982e38202e22dfbc4616babc36102be..d853373162c764412e54c5e29b1a034bb19ff786 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -184,6 +184,15 @@ config INPUT_QPNP_POWER_ON reporting the change in status of the KPDPWR_N line (connected to the power-key) as well as reset features. +config INPUT_QTI_HAPTICS + tristate "Haptics support for QTI PMIC" + depends on MFD_SPMI_PMIC + help + This option enables device driver support for the haptics peripheral + found on Qualcomm Technologies, Inc. PMICs. The haptics peripheral + is capable of driving both LRA and ERM vibrators. This module provides + haptic feedback for user actions such as a long press on the touch screen. + config INPUT_SPARCSPKR tristate "SPARC Speaker support" depends on PCI && SPARC64 diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index e4c6e58ebe75f094fae3539d7fffb3417a35872d..137aee4ee2e31a98c8cfa30bf2115164549bf4bc 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o obj-$(CONFIG_INPUT_QPNP_POWER_ON) += qpnp-power-on.o +obj-$(CONFIG_INPUT_QTI_HAPTICS) += qti-haptics.o obj-$(CONFIG_INPUT_POWERMATE) += powermate.o obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o obj-$(CONFIG_INPUT_PWM_VIBRA) += pwm-vibra.o diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c new file mode 100644 index 0000000000000000000000000000000000000000..c5be6b1a68f621d35c101f85bc6ae538038f9762 --- /dev/null +++ b/drivers/input/misc/qti-haptics.c @@ -0,0 +1,1410 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum actutor_type { + ACT_LRA, + ACT_ERM, +}; + +enum lra_res_sig_shape { + RES_SIG_SINE, + RES_SIG_SQUARE, +}; + +enum lra_auto_res_mode { + AUTO_RES_MODE_ZXD, + AUTO_RES_MODE_QWD, +}; + +enum wf_src { + INT_WF_VMAX, + INT_WF_BUFFER, + EXT_WF_AUDIO, + EXT_WF_PWM, +}; + +enum haptics_custom_effect_param { + CUSTOM_DATA_EFFECT_IDX, + CUSTOM_DATA_TIMEOUT_SEC_IDX, + CUSTOM_DATA_TIMEOUT_MSEC_IDX, + CUSTOM_DATA_LEN, +}; + +/* common definitions */ +#define HAP_BRAKE_PATTERN_MAX 4 +#define HAP_WAVEFORM_BUFFER_MAX 8 +#define HAP_VMAX_MV_DEFAULT 1800 +#define HAP_VMAX_MV_MAX 3596 +#define HAP_ILIM_MA_DEFAULT 400 +#define HAP_ILIM_MA_MAX 800 +#define HAP_PLAY_RATE_US_DEFAULT 5715 +#define HAP_PLAY_RATE_US_MAX 20475 +#define HAP_PLAY_RATE_US_LSB 5 +#define VMAX_MIN_PLAY_TIME_US 20000 +#define HAP_SC_DET_MAX_COUNT 5 +#define HAP_SC_DET_TIME_US 1000000 +#define FF_EFFECT_COUNT_MAX 32 + +/* haptics module register definitions */ +#define REG_HAP_STATUS1 0x0A +#define HAP_SC_DET_BIT BIT(3) +#define HAP_BUSY_BIT BIT(1) + +#define REG_HAP_EN_CTL1 0x46 +#define HAP_EN_BIT BIT(7) + +#define REG_HAP_EN_CTL2 0x48 +#define HAP_AUTO_STANDBY_EN_BIT BIT(1) +#define HAP_BRAKE_EN_BIT BIT(0) + +#define REG_HAP_EN_CTL3 0x4A +#define HAP_HBRIDGE_EN_BIT BIT(7) +#define HAP_PWM_SIGNAL_EN_BIT BIT(6) +#define HAP_ILIM_EN_BIT BIT(5) +#define HAP_ILIM_CC_EN_BIT BIT(4) +#define HAP_AUTO_RES_RBIAS_EN_BIT BIT(3) +#define HAP_DAC_EN_BIT BIT(2) +#define HAP_ZX_HYST_EN_BIT BIT(1) +#define HAP_PWM_CTL_EN_BIT BIT(0) + +#define REG_HAP_AUTO_RES_CTRL 0x4B +#define HAP_AUTO_RES_EN_BIT BIT(7) +#define HAP_SEL_AUTO_RES_PERIOD BIT(6) +#define HAP_AUTO_RES_CNT_ERR_DELTA_MASK GENMASK(5, 4) +#define HAP_AUTO_RES_CNT_ERR_DELTA_SHIFT 4 +#define HAP_AUTO_RES_ERR_RECOVERY_BIT BIT(3) +#define HAP_AUTO_RES_EN_DLY_MASK GENMASK(2, 0) +#define AUTO_RES_CNT_ERR_DELTA(x) (x << HAP_AUTO_RES_CNT_ERR_DELTA_SHIFT) +#define AUTO_RES_EN_DLY(x) x + +#define REG_HAP_CFG1 0x4C +#define REG_HAP_CFG2 0x4D +#define HAP_LRA_RES_TYPE_BIT BIT(0) + +#define REG_HAP_SEL 0x4E +#define HAP_WF_SOURCE_MASK GENMASK(5, 4) +#define HAP_WF_SOURCE_SHIFT 4 +#define HAP_WF_TRIGGER_BIT BIT(0) +#define HAP_WF_SOURCE_VMAX (0 << HAP_WF_SOURCE_SHIFT) +#define HAP_WF_SOURCE_BUFFER (1 << HAP_WF_SOURCE_SHIFT) +#define HAP_WF_SOURCE_AUDIO (2 << HAP_WF_SOURCE_SHIFT) +#define HAP_WF_SOURCE_PWM (3 << HAP_WF_SOURCE_SHIFT) + +#define REG_HAP_AUTO_RES_CFG 0x4F +#define HAP_AUTO_RES_MODE_BIT BIT(7) +#define HAP_AUTO_RES_MODE_SHIFT 7 +#define HAP_AUTO_RES_CAL_DURATON_MASK GENMASK(6, 5) +#define HAP_CAL_EOP_EN_BIT BIT(3) +#define HAP_CAL_PERIOD_MASK GENMASK(2, 0) +#define HAP_CAL_OPT3_EVERY_8_PERIOD 2 + +#define REG_HAP_SLEW_CFG 0x50 +#define REG_HAP_VMAX_CFG 0x51 +#define HAP_VMAX_SIGN_BIT BIT(7) +#define HAP_VMAX_OVD_BIT BIT(6) +#define HAP_VMAX_MV_MASK GENMASK(5, 1) +#define HAP_VMAX_MV_SHIFT 1 +#define HAP_VMAX_MV_LSB 116 + +#define REG_HAP_ILIM_CFG 0x52 +#define REG_HAP_SC_DEB_CFG 0x53 +#define REG_HAP_RATE_CFG1 0x54 +#define REG_HAP_RATE_CFG2 0x55 +#define REG_HAP_INTERNAL_PWM 0x56 +#define REG_HAP_EXTERNAL_PWM 0x57 +#define REG_HAP_PWM 0x58 + +#define REG_HAP_SC_CLR 0x59 +#define HAP_SC_CLR_BIT BIT(0) + +#define REG_HAP_ZX_CFG 0x5A +#define HAP_ZX_DET_DEB_MASK GENMASK(2, 0) +#define ZX_DET_DEB_10US 0 +#define ZX_DET_DEB_20US 1 +#define ZX_DET_DEB_40US 2 +#define ZX_DET_DEB_80US 3 + +#define REG_HAP_BRAKE 0x5C +#define HAP_BRAKE_PATTERN_MASK 0x3 +#define HAP_BRAKE_PATTERN_SHIFT 2 + +#define REG_HAP_WF_REPEAT 0x5E +#define HAP_WF_REPEAT_MASK GENMASK(6, 4) +#define HAP_WF_REPEAT_SHIFT 4 +#define HAP_WF_S_REPEAT_MASK GENMASK(1, 0) + +#define REG_HAP_WF_S1 0x60 +#define HAP_WF_SIGN_BIT BIT(7) +#define HAP_WF_OVD_BIT BIT(6) +#define HAP_WF_AMP_BIT GENMASK(5, 1) +#define HAP_WF_AMP_SHIFT 1 + +#define REG_HAP_PLAY 0x70 +#define HAP_PLAY_BIT BIT(7) + +#define REG_HAP_SEC_ACCESS 0xD0 + +struct qti_hap_effect { + int id; + u8 *pattern; + int pattern_length; + u16 play_rate_us; + u8 wf_repeat_n; + u8 wf_s_repeat_n; +}; + +struct qti_hap_play_info { + struct qti_hap_effect *effect; + u16 vmax_mv; + int length_us; + int playing_pos; + bool playing_pattern; +}; + +struct qti_hap_config { + enum actutor_type act_type; + enum lra_res_sig_shape lra_shape; + enum lra_auto_res_mode lra_auto_res_mode; + enum wf_src ext_src; + u16 vmax_mv; + u16 ilim_ma; + u16 play_rate_us; + u8 brake[HAP_BRAKE_PATTERN_MAX]; + bool brake_en; + bool lra_auto_res_en; + bool use_ext_wf_src; +}; + +struct qti_hap_chip { + struct platform_device *pdev; + struct device *dev; + struct regmap *regmap; + struct input_dev *input_dev; + struct pwm_device *pwm_dev; + struct qti_hap_config config; + struct qti_hap_play_info play; + struct qti_hap_effect *predefined; + struct qti_hap_effect constant; + struct regulator *vdd_supply; + spinlock_t bus_lock; + ktime_t last_sc_time; + int play_irq; + int sc_irq; + int effects_count; + int sc_det_count; + u16 reg_base; + bool perm_disable; + bool play_irq_en; + bool vdd_enabled; +}; + +static int wf_repeat[8] = {1, 2, 4, 8, 16, 32, 64, 128}; +static int wf_s_repeat[4] = {1, 2, 4, 8}; + +static inline bool is_secure(u8 addr) +{ + return ((addr & 0xFF) > 0xD0); +} + +static int qti_haptics_read(struct qti_hap_chip *chip, + u8 addr, u8 *val, int len) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&chip->bus_lock, flags); + + rc = regmap_bulk_read(chip->regmap, chip->reg_base + addr, val, len); + if (rc < 0) + dev_err(chip->dev, "Reading addr 0x%x failed, rc=%d\n", + addr, rc); + spin_unlock_irqrestore(&chip->bus_lock, flags); + + return rc; +} + +static int qti_haptics_write(struct qti_hap_chip *chip, + u8 addr, u8 *val, int len) +{ + int rc = 0, i; + unsigned long flags; + + spin_lock_irqsave(&chip->bus_lock, flags); + if (is_secure(addr)) { + for (i = 0; i < len; i++) { + rc = regmap_write(chip->regmap, + chip->reg_base + REG_HAP_SEC_ACCESS, + 0xA5); + if (rc < 0) { + dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n", + rc); + goto unlock; + } + + rc = regmap_write(chip->regmap, + chip->reg_base + addr + i, val[i]); + if (rc < 0) { + dev_err(chip->dev, "write val 0x%x to addr 0x%x failed, rc=%d\n", + val[i], addr + i, rc); + goto unlock; + } + } + } else { + if (len > 1) + rc = regmap_bulk_write(chip->regmap, + chip->reg_base + addr, val, len); + else + rc = regmap_write(chip->regmap, + chip->reg_base + addr, *val); + + if (rc < 0) + dev_err(chip->dev, "write addr 0x%x failed, rc=%d\n", + addr, rc); + } + + for (i = 0; i < len; i++) + dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x\n", + addr + i, val[i]); + +unlock: + spin_unlock_irqrestore(&chip->bus_lock, flags); + return rc; +} + +static int qti_haptics_masked_write(struct qti_hap_chip *chip, u8 addr, + u8 mask, u8 val) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&chip->bus_lock, flags); + if (is_secure(addr)) { + rc = regmap_write(chip->regmap, + chip->reg_base + REG_HAP_SEC_ACCESS, + 0xA5); + if (rc < 0) { + dev_err(chip->dev, "write SEC_ACCESS failed, rc=%d\n", + rc); + goto unlock; + } + } + + rc = regmap_update_bits(chip->regmap, chip->reg_base + addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x failed, rc=%d\n", + addr, val, mask, rc); + + dev_dbg(chip->dev, "Update addr 0x%x to val 0x%x with mask 0x%x\n", + addr, val, mask); +unlock: + spin_unlock_irqrestore(&chip->bus_lock, flags); + + return rc; +} + +static void construct_constant_waveform_in_pattern( + struct qti_hap_play_info *play) +{ + struct qti_hap_chip *chip = container_of(play, + struct qti_hap_chip, play); + struct qti_hap_config *config = &chip->config; + struct qti_hap_effect *effect = play->effect; + int total_samples, samples, left, magnitude, i, j, k; + int delta = INT_MAX, delta_min = INT_MAX; + + /* Using play_rate_us in config for constant waveform */ + effect->play_rate_us = config->play_rate_us; + total_samples = play->length_us / effect->play_rate_us; + left = play->length_us % effect->play_rate_us; + + if (total_samples <= HAP_WAVEFORM_BUFFER_MAX) { + effect->pattern_length = total_samples; + effect->wf_s_repeat_n = 0; + effect->wf_repeat_n = 0; + } else { + /* + * Find a closest setting to achieve the constant waveform + * with the required length by using buffer waveform source: + * play_length_us = pattern_length * wf_s_repeat_n + * * wf_repeat_n * play_rate_us + */ + for (i = 0; i < ARRAY_SIZE(wf_repeat); i++) { + for (j = 0; j < ARRAY_SIZE(wf_s_repeat); j++) { + for (k = 1; k <= HAP_WAVEFORM_BUFFER_MAX; k++) { + samples = k * wf_s_repeat[j] * + wf_repeat[i]; + delta = abs(total_samples - samples); + if (delta < delta_min) { + delta_min = delta; + effect->pattern_length = k; + effect->wf_s_repeat_n = j; + effect->wf_repeat_n = i; + } + if (samples > total_samples) + break; + } + } + } + } + + if (left > 0 && effect->pattern_length < HAP_WAVEFORM_BUFFER_MAX) + effect->pattern_length++; + + play->length_us = effect->pattern_length * effect->play_rate_us; + dev_dbg(chip->dev, "total_samples = %d, pattern_length = %d, wf_s_repeat = %d, wf_repeat = %d\n", + total_samples, effect->pattern_length, + wf_s_repeat[effect->wf_s_repeat_n], + wf_repeat[effect->wf_repeat_n]); + + for (i = 0; i < effect->pattern_length; i++) { + magnitude = play->vmax_mv / HAP_VMAX_MV_LSB; + effect->pattern[i] = (u8)magnitude << HAP_WF_AMP_SHIFT; + } +} + +static int qti_haptics_config_wf_buffer(struct qti_hap_chip *chip) +{ + struct qti_hap_play_info *play = &chip->play; + struct qti_hap_effect *effect = play->effect; + u8 addr, pattern[HAP_WAVEFORM_BUFFER_MAX] = {0}; + int rc = 0; + size_t len; + + if (play->playing_pos == effect->pattern_length) { + dev_dbg(chip->dev, "pattern playing done\n"); + return 0; + } + + if (effect->pattern_length - play->playing_pos + >= HAP_WAVEFORM_BUFFER_MAX) + len = HAP_WAVEFORM_BUFFER_MAX; + else + len = effect->pattern_length - play->playing_pos; + + dev_dbg(chip->dev, "copy %d bytes start from %d\n", + (int)len, play->playing_pos); + memcpy(pattern, &effect->pattern[play->playing_pos], len); + + play->playing_pos += len; + + addr = REG_HAP_WF_S1; + rc = qti_haptics_write(chip, REG_HAP_WF_S1, pattern, + HAP_WAVEFORM_BUFFER_MAX); + if (rc < 0) + dev_err(chip->dev, "Program WF_SAMPLE failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_config_wf_repeat(struct qti_hap_chip *chip) +{ + struct qti_hap_effect *effect = chip->play.effect; + u8 addr, mask, val; + int rc = 0; + + addr = REG_HAP_WF_REPEAT; + mask = HAP_WF_REPEAT_MASK | HAP_WF_S_REPEAT_MASK; + val = effect->wf_repeat_n << HAP_WF_REPEAT_SHIFT; + val |= effect->wf_s_repeat_n; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "Program WF_REPEAT failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_play(struct qti_hap_chip *chip, bool play) +{ + int rc = 0; + u8 val = play ? HAP_PLAY_BIT : 0; + + rc = qti_haptics_write(chip, + REG_HAP_PLAY, &val, 1); + if (rc < 0) + dev_err(chip->dev, "%s playing haptics failed, rc=%d\n", + play ? "start" : "stop", rc); + + return rc; +} + +static int qti_haptics_module_en(struct qti_hap_chip *chip, bool en) +{ + int rc = 0; + u8 val = en ? HAP_EN_BIT : 0; + + rc = qti_haptics_write(chip, + REG_HAP_EN_CTL1, &val, 1); + if (rc < 0) + dev_err(chip->dev, "%s haptics failed, rc=%d\n", + en ? "enable" : "disable", rc); + + + return rc; +} + +static int qti_haptics_config_vmax(struct qti_hap_chip *chip, int vmax_mv) +{ + u8 addr, mask, val; + int rc; + + addr = REG_HAP_VMAX_CFG; + mask = HAP_VMAX_MV_MASK; + val = (vmax_mv / HAP_VMAX_MV_LSB) << HAP_VMAX_MV_SHIFT; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "write VMAX_CFG failed, rc=%d\n", + rc); + + return rc; +} + +static int qti_haptics_config_wf_src(struct qti_hap_chip *chip, + enum wf_src src) +{ + u8 addr, mask, val = 0; + int rc; + + addr = REG_HAP_SEL; + mask = HAP_WF_SOURCE_MASK | HAP_WF_TRIGGER_BIT; + val = src << HAP_WF_SOURCE_SHIFT; + if (src == EXT_WF_AUDIO || src == EXT_WF_PWM) + val |= HAP_WF_TRIGGER_BIT; + + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "set HAP_SEL failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_config_play_rate_us(struct qti_hap_chip *chip, + int play_rate_us) +{ + u8 addr, val[2]; + int tmp, rc; + + addr = REG_HAP_RATE_CFG1; + tmp = play_rate_us / HAP_PLAY_RATE_US_LSB; + val[0] = tmp & 0xff; + val[1] = (tmp >> 8) & 0xf; + rc = qti_haptics_write(chip, addr, val, 2); + if (rc < 0) + dev_err(chip->dev, "write play_rate failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake) +{ + u8 addr, mask, val; + int i, rc; + bool en = true; + + addr = REG_HAP_BRAKE; + for (val = 0, i = 0; i < HAP_BRAKE_PATTERN_MAX; i++) + val |= (brake[i] & HAP_BRAKE_PATTERN_MASK) << + i * HAP_BRAKE_PATTERN_SHIFT; + + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "write brake pattern failed, rc=%d\n", rc); + return rc; + } + + if (val == 0) + en = false; + + /* Set BRAKE_EN only if brake pattern is non-zero */ + addr = REG_HAP_EN_CTL2; + mask = HAP_BRAKE_EN_BIT; + val = en; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "set EN_CTL2 failed, rc=%d\n", rc); + + return rc; +} + +static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip) +{ + struct qti_hap_play_info *play = &chip->play; + struct qti_hap_config *config = &chip->config; + int rc = 0; + + rc = qti_haptics_module_en(chip, true); + if (rc < 0) + return rc; + + /* + * Using VMAX waveform source if playing length is >= 20ms, + * otherwise using buffer waveform source and calculate the + * pattern length and repeating times to achieve accurate + * playing time accuracy. + */ + if (play->length_us >= VMAX_MIN_PLAY_TIME_US) { + rc = qti_haptics_config_vmax(chip, play->vmax_mv); + if (rc < 0) + return rc; + + /* Set WF_SOURCE to VMAX */ + rc = qti_haptics_config_wf_src(chip, INT_WF_VMAX); + if (rc < 0) + return rc; + + play->playing_pattern = false; + } else { + rc = qti_haptics_config_vmax(chip, config->vmax_mv); + if (rc < 0) + return rc; + + play->effect = &chip->constant; + play->playing_pos = 0; + /* Format and config waveform in patterns */ + construct_constant_waveform_in_pattern(play); + rc = qti_haptics_config_wf_buffer(chip); + if (rc < 0) + return rc; + + rc = qti_haptics_config_wf_repeat(chip); + if (rc < 0) + return rc; + + /* Set WF_SOURCE to buffer */ + rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER); + if (rc < 0) + return rc; + + play->playing_pattern = true; + } + + return 0; +} + +static int qti_haptics_load_predefined_effect(struct qti_hap_chip *chip, + int effect_idx) +{ + struct qti_hap_play_info *play = &chip->play; + struct qti_hap_config *config = &chip->config; + int rc = 0; + + if (effect_idx >= chip->effects_count) + return -EINVAL; + + play->effect = &chip->predefined[effect_idx]; + play->playing_pos = 0; + + rc = qti_haptics_module_en(chip, true); + if (rc < 0) + return rc; + + rc = qti_haptics_config_vmax(chip, play->vmax_mv); + if (rc < 0) + return rc; + + /* override play-rate for ERM here, no need for LRA */ + if (config->act_type == ACT_ERM) { + rc = qti_haptics_config_play_rate_us(chip, + play->effect->play_rate_us); + if (rc < 0) + return rc; + } + + rc = qti_haptics_config_wf_buffer(chip); + if (rc < 0) + return rc; + + rc = qti_haptics_config_wf_repeat(chip); + if (rc < 0) + return rc; + + /* Set WF_SOURCE to buffer */ + rc = qti_haptics_config_wf_src(chip, INT_WF_BUFFER); + if (rc < 0) + return rc; + + play->playing_pattern = true; + + return 0; +} + +static irqreturn_t qti_haptics_play_irq_handler(int irq, void *data) +{ + struct qti_hap_chip *chip = (struct qti_hap_chip *)data; + struct qti_hap_play_info *play = &chip->play; + struct qti_hap_effect *effect = play->effect; + int rc; + + dev_dbg(chip->dev, "play_irq triggered\n"); + if (play->playing_pos == effect->pattern_length) { + dev_dbg(chip->dev, "waveform playing done\n"); + qti_haptics_play(chip, false); + if (chip->play_irq_en) { + disable_irq_nosync(chip->play_irq); + chip->play_irq_en = false; + } + + goto handled; + } + + /* Config to play remaining patterns */ + rc = qti_haptics_config_wf_repeat(chip); + if (rc < 0) + goto handled; + + rc = qti_haptics_config_wf_buffer(chip); + if (rc < 0) + goto handled; + +handled: + return IRQ_HANDLED; +} + +static irqreturn_t qti_haptics_sc_irq_handler(int irq, void *data) +{ + struct qti_hap_chip *chip = (struct qti_hap_chip *)data; + u8 addr, val; + ktime_t temp; + s64 sc_delta_time_us; + int rc; + + dev_dbg(chip->dev, "sc_irq triggered\n"); + addr = REG_HAP_STATUS1; + rc = qti_haptics_read(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "read HAP_STATUS1 failed, rc=%d\n", rc); + goto handled; + } + + if (!(val & HAP_SC_DET_BIT)) + goto handled; + + temp = ktime_get(); + sc_delta_time_us = ktime_us_delta(temp, chip->last_sc_time); + chip->last_sc_time = temp; + + if (sc_delta_time_us > HAP_SC_DET_TIME_US) + chip->sc_det_count = 0; + else + chip->sc_det_count++; + + addr = REG_HAP_SC_CLR; + val = HAP_SC_CLR_BIT; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "write SC_CLR failed, rc=%d\n", rc); + goto handled; + } + + if (chip->sc_det_count > HAP_SC_DET_MAX_COUNT) { + rc = qti_haptics_module_en(chip, false); + if (rc < 0) + goto handled; + + dev_crit(chip->dev, "Short circuit persists, disable haptics\n"); + chip->perm_disable = true; + } + +handled: + return IRQ_HANDLED; +} + +static inline void get_play_length(struct qti_hap_play_info *play, + int *length_us) +{ + struct qti_hap_chip *chip = container_of(play, + struct qti_hap_chip, play); + struct qti_hap_effect *effect = play->effect; + int tmp; + + tmp = effect->pattern_length * effect->play_rate_us; + tmp *= wf_s_repeat[effect->wf_s_repeat_n]; + tmp *= wf_repeat[effect->wf_repeat_n]; + if (chip->config.brake_en) + tmp += effect->play_rate_us * HAP_BRAKE_PATTERN_MAX; + + *length_us = tmp; +} + +static int qti_haptics_upload_effect(struct input_dev *dev, + struct ff_effect *effect, struct ff_effect *old) +{ + struct qti_hap_chip *chip = input_get_drvdata(dev); + struct qti_hap_config *config = &chip->config; + struct qti_hap_play_info *play = &chip->play; + int rc = 0, tmp, i; + s16 level, data[CUSTOM_DATA_LEN]; + + if (chip->vdd_supply && !chip->vdd_enabled) { + rc = regulator_enable(chip->vdd_supply); + if (rc < 0) { + dev_err(chip->dev, "Enable VDD supply failed, rc=%d\n", + rc); + return rc; + } + chip->vdd_enabled = true; + } + + switch (effect->type) { + case FF_CONSTANT: + play->length_us = effect->replay.length * USEC_PER_MSEC; + level = effect->u.constant.level; + tmp = level * config->vmax_mv; + play->vmax_mv = tmp / 0x7fff; + dev_dbg(chip->dev, "upload constant effect, length = %dus, vmax_mv=%d\n", + play->length_us, play->vmax_mv); + + rc = qti_haptics_load_constant_waveform(chip); + if (rc < 0) { + dev_err(chip->dev, "Play constant waveform failed, rc=%d\n", + rc); + goto disable_vdd; + } + break; + + case FF_PERIODIC: + if (chip->effects_count == 0) { + rc = -EINVAL; + goto disable_vdd; + } + + if (effect->u.periodic.waveform != FF_CUSTOM) { + dev_err(chip->dev, "Only accept custom waveforms\n"); + rc = -EINVAL; + goto disable_vdd; + } + + level = effect->u.periodic.magnitude; + tmp = level * config->vmax_mv; + play->vmax_mv = tmp / 0x7fff; + + if (copy_from_user(data, effect->u.periodic.custom_data, + sizeof(s16) * CUSTOM_DATA_LEN)) { + rc = -EFAULT; + goto disable_vdd; + } + + for (i = 0; i < chip->effects_count; i++) + if (chip->predefined[i].id == + data[CUSTOM_DATA_EFFECT_IDX]) + break; + + if (i == chip->effects_count) { + dev_err(chip->dev, "predefined effect %d is NOT supported\n", + data[0]); + rc = -EINVAL; + goto disable_vdd; + } + + dev_dbg(chip->dev, "upload effect %d, vmax_mv=%d\n", + chip->predefined[i].id, play->vmax_mv); + rc = qti_haptics_load_predefined_effect(chip, i); + if (rc < 0) { + dev_err(chip->dev, "Play predefined effect %d failed, rc=%d\n", + chip->predefined[i].id, rc); + goto disable_vdd; + } + + get_play_length(play, &play->length_us); + data[CUSTOM_DATA_TIMEOUT_SEC_IDX] = + play->length_us / USEC_PER_SEC; + data[CUSTOM_DATA_TIMEOUT_MSEC_IDX] = + (play->length_us % USEC_PER_SEC) / USEC_PER_MSEC; + + /* + * Copy the custom data contains the play length back to + * userspace so that the userspace client can wait and + * send stop playing command after it's done. + */ + if (copy_to_user(effect->u.periodic.custom_data, data, + sizeof(s16) * CUSTOM_DATA_LEN)) { + rc = -EFAULT; + goto disable_vdd; + } + break; + + default: + dev_err(chip->dev, "Unsupported effect type: %d\n", + effect->type); + break; + } + + return 0; +disable_vdd: + if (chip->vdd_supply && chip->vdd_enabled) { + rc = regulator_disable(chip->vdd_supply); + if (rc < 0) { + dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n", + rc); + return rc; + } + chip->vdd_enabled = false; + } + return rc; +} + +static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) +{ + struct qti_hap_chip *chip = input_get_drvdata(dev); + struct qti_hap_play_info *play = &chip->play; + int rc = 0; + + dev_dbg(chip->dev, "playback, val = %d\n", val); + if (!!val) { + rc = qti_haptics_play(chip, true); + if (rc < 0) + return rc; + + if (play->playing_pattern) { + if (!chip->play_irq_en) { + enable_irq(chip->play_irq); + chip->play_irq_en = true; + } + } else { + if (chip->play_irq_en) { + disable_irq_nosync(chip->play_irq); + chip->play_irq_en = false; + } + } + } else { + play->length_us = 0; + rc = qti_haptics_play(chip, false); + if (rc < 0) + return rc; + + rc = qti_haptics_module_en(chip, false); + if (rc < 0) + return rc; + + if (chip->play_irq_en) { + disable_irq_nosync(chip->play_irq); + chip->play_irq_en = false; + } + } + + return rc; +} + +static int qti_haptics_erase(struct input_dev *dev, int effect_id) +{ + struct qti_hap_chip *chip = input_get_drvdata(dev); + int rc = 0; + + if (chip->vdd_supply && chip->vdd_enabled) { + rc = regulator_disable(chip->vdd_supply); + if (rc < 0) { + dev_err(chip->dev, "Disable VDD supply failed, rc=%d\n", + rc); + return rc; + } + chip->vdd_enabled = false; + } + + return rc; +} + +static int qti_haptics_hw_init(struct qti_hap_chip *chip) +{ + struct qti_hap_config *config = &chip->config; + u8 addr, val, mask; + int rc = 0; + + /* Config actuator type */ + addr = REG_HAP_CFG1; + val = config->act_type; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "write actuator type failed, rc=%d\n", rc); + return rc; + } + + /* Config ilim_ma */ + addr = REG_HAP_ILIM_CFG; + val = config->ilim_ma == 400 ? 0 : 1; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "write ilim_ma failed, rc=%d\n", rc); + return rc; + } + + /* Set HAP_EN_CTL3 */ + addr = REG_HAP_EN_CTL3; + val = HAP_HBRIDGE_EN_BIT | HAP_PWM_SIGNAL_EN_BIT | HAP_ILIM_EN_BIT | + HAP_ILIM_CC_EN_BIT | HAP_DAC_EN_BIT | HAP_PWM_CTL_EN_BIT; + if (config->act_type == ACT_LRA && config->lra_auto_res_en) + val |= HAP_AUTO_RES_RBIAS_EN_BIT; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "set EN_CTL3 failed, rc=%d\n", rc); + return rc; + } + + /* Set ZX_CFG */ + addr = REG_HAP_ZX_CFG; + mask = HAP_ZX_DET_DEB_MASK; + val = ZX_DET_DEB_80US; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) { + dev_err(chip->dev, "write ZX_CFG failed, rc=%d\n", rc); + return rc; + } + + /* + * Config play rate: this is the resonance period for LRA, + * or the play duration of each waveform sample for ERM. + */ + rc = qti_haptics_config_play_rate_us(chip, config->play_rate_us); + if (rc < 0) + return rc; + + /* Set default brake pattern */ + rc = qti_haptics_config_brake(chip, config->brake); + if (rc < 0) + return rc; + + /* Set external waveform source if it's used */ + if (config->use_ext_wf_src) { + rc = qti_haptics_config_wf_src(chip, config->ext_src); + if (rc < 0) + return rc; + } + + /* + * Skip configurations below for ERM actuator + * as they're only for LRA actuators + */ + if (config->act_type == ACT_ERM) + return 0; + + addr = REG_HAP_CFG2; + val = config->lra_shape; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "write lra_sig_shape failed, rc=%d\n", rc); + return rc; + } + + /* Skip configurations below if auto-res-en is not set */ + if (!config->lra_auto_res_en) + return 0; + + addr = REG_HAP_AUTO_RES_CFG; + mask = HAP_AUTO_RES_MODE_BIT | HAP_CAL_EOP_EN_BIT | HAP_CAL_PERIOD_MASK; + val = config->lra_auto_res_mode << HAP_AUTO_RES_MODE_SHIFT; + val |= HAP_CAL_EOP_EN_BIT | HAP_CAL_OPT3_EVERY_8_PERIOD; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) { + dev_err(chip->dev, "set AUTO_RES_CFG failed, rc=%d\n", rc); + return rc; + } + + addr = REG_HAP_AUTO_RES_CTRL; + val = HAP_AUTO_RES_EN_BIT | HAP_SEL_AUTO_RES_PERIOD | AUTO_RES_EN_DLY(4) + | AUTO_RES_CNT_ERR_DELTA(2) | HAP_AUTO_RES_ERR_RECOVERY_BIT; + rc = qti_haptics_write(chip, addr, &val, 1); + if (rc < 0) { + dev_err(chip->dev, "set AUTO_RES_CTRL failed, rc=%d\n", + rc); + return rc; + } + + return 0; +} + +static int qti_haptics_parse_dt(struct qti_hap_chip *chip) +{ + struct qti_hap_config *config = &chip->config; + const struct device_node *node = chip->dev->of_node; + struct device_node *child_node; + struct qti_hap_effect *effect; + const char *str; + int rc = 0, tmp, i = 0, j; + u8 val; + + rc = of_property_read_u32(node, "reg", &tmp); + if (rc < 0) { + dev_err(chip->dev, "Failed to reg base, rc=%d\n", rc); + return rc; + } + chip->reg_base = (u16)tmp; + + chip->sc_irq = platform_get_irq_byname(chip->pdev, "hap-sc-irq"); + if (chip->sc_irq < 0) { + dev_err(chip->dev, "Failed to get hap-sc-irq\n"); + return chip->sc_irq; + } + + chip->play_irq = platform_get_irq_byname(chip->pdev, "hap-play-irq"); + if (chip->play_irq < 0) { + dev_err(chip->dev, "Failed to get hap-play-irq\n"); + return chip->play_irq; + } + + config->act_type = ACT_LRA; + rc = of_property_read_string(node, "qcom,actuator-type", &str); + if (!rc) { + if (strcmp(str, "erm") == 0) { + config->act_type = ACT_ERM; + } else if (strcmp(str, "lra") == 0) { + config->act_type = ACT_LRA; + } else { + dev_err(chip->dev, "Invalid actuator type: %s\n", + str); + return -EINVAL; + } + } + + config->vmax_mv = HAP_VMAX_MV_DEFAULT; + rc = of_property_read_u32(node, "qcom,vmax-mv", &tmp); + if (!rc) + config->vmax_mv = (tmp > HAP_VMAX_MV_MAX) ? + HAP_VMAX_MV_MAX : tmp; + + config->ilim_ma = HAP_ILIM_MA_DEFAULT; + rc = of_property_read_u32(node, "qcom,ilim-ma", &tmp); + if (!rc) + config->ilim_ma = (tmp >= HAP_ILIM_MA_MAX) ? + HAP_ILIM_MA_MAX : HAP_ILIM_MA_DEFAULT; + + config->play_rate_us = HAP_PLAY_RATE_US_DEFAULT; + rc = of_property_read_u32(node, "qcom,play-rate-us", &tmp); + if (!rc) + config->play_rate_us = (tmp >= HAP_PLAY_RATE_US_MAX) ? + HAP_PLAY_RATE_US_MAX : tmp; + + tmp = of_property_count_elems_of_size(node, "qcom,brake-pattern", + sizeof(u8)); + if (tmp > 0) { + if (tmp != HAP_BRAKE_PATTERN_MAX) { + dev_err(chip->dev, "brake-pattern should be %d bytes\n", + HAP_BRAKE_PATTERN_MAX); + return -EINVAL; + } + + rc = of_property_read_u8_array(node, "qcom,brake-pattern", + config->brake, HAP_BRAKE_PATTERN_MAX); + if (rc < 0) { + dev_err(chip->dev, "Failed to get brake-pattern, rc=%d\n", + rc); + return rc; + } + + for (val = 0, j = 0; j < HAP_BRAKE_PATTERN_MAX; j++) + val |= (config->brake[j] & HAP_BRAKE_PATTERN_MASK) << + j * HAP_BRAKE_PATTERN_SHIFT; + + config->brake_en = (val != 0); + } + + if (of_find_property(node, "qcom,external-waveform-source", NULL)) { + if (!of_property_read_string(node, + "qcom,external-waveform-source", &str)) { + if (strcmp(str, "audio") == 0) { + config->ext_src = EXT_WF_AUDIO; + } else if (strcmp(str, "pwm") == 0) { + config->ext_src = EXT_WF_PWM; + } else { + dev_err(chip->dev, "Invalid external waveform source: %s\n", + str); + return -EINVAL; + } + } + config->use_ext_wf_src = true; + } + + if (of_find_property(node, "vdd-supply", NULL)) { + chip->vdd_supply = devm_regulator_get(chip->dev, "vdd"); + if (IS_ERR(chip->vdd_supply)) { + rc = PTR_ERR(chip->vdd_supply); + if (rc != -EPROBE_DEFER) + dev_err(chip->dev, "Failed to get vdd regulator"); + return rc; + } + } + + if (config->act_type == ACT_LRA) { + config->lra_shape = RES_SIG_SINE; + rc = of_property_read_string(node, + "qcom,lra-resonance-sig-shape", &str); + if (!rc) { + if (strcmp(str, "sine") == 0) { + config->lra_shape = RES_SIG_SINE; + } else if (strcmp(str, "square") == 0) { + config->lra_shape = RES_SIG_SQUARE; + } else { + dev_err(chip->dev, "Invalid resonance signal shape: %s\n", + str); + return -EINVAL; + } + } + + config->lra_auto_res_en = of_property_read_bool(node, + "qcom,lra-auto-resonance-en"); + + config->lra_auto_res_mode = AUTO_RES_MODE_ZXD; + rc = of_property_read_string(node, + "qcom,lra-auto-resonance-mode", &str); + if (!rc) { + if (strcmp(str, "zxd") == 0) { + config->lra_auto_res_mode = AUTO_RES_MODE_ZXD; + } else if (strcmp(str, "qwd") == 0) { + config->lra_auto_res_mode = AUTO_RES_MODE_QWD; + } else { + dev_err(chip->dev, "Invalid auto resonance mode: %s\n", + str); + return -EINVAL; + } + } + } + + chip->constant.pattern = devm_kcalloc(chip->dev, + HAP_WAVEFORM_BUFFER_MAX, + sizeof(u8), GFP_KERNEL); + if (!chip->constant.pattern) + return -ENOMEM; + + tmp = of_get_available_child_count(node); + if (tmp == 0) + return 0; + + chip->predefined = devm_kcalloc(chip->dev, tmp, + sizeof(*chip->predefined), GFP_KERNEL); + if (!chip->predefined) + return -ENOMEM; + + chip->effects_count = tmp; + + for_each_available_child_of_node(node, child_node) { + effect = &chip->predefined[i++]; + rc = of_property_read_u32(child_node, "qcom,effect-id", + &effect->id); + if (rc < 0) { + dev_err(chip->dev, "Read qcom,effect-id failed, rc=%d\n", + rc); + return rc; + } + + rc = of_property_count_elems_of_size(child_node, + "qcom,wf-pattern", sizeof(u8)); + if (rc < 0) { + dev_err(chip->dev, "Count qcom,wf-pattern property failed, rc=%d\n", + rc); + return rc; + } else if (rc == 0) { + dev_dbg(chip->dev, "qcom,wf-pattern has no data\n"); + return -EINVAL; + } + + effect->pattern_length = rc; + effect->pattern = devm_kcalloc(chip->dev, + effect->pattern_length, sizeof(u8), GFP_KERNEL); + if (!effect->pattern) + return -ENOMEM; + + rc = of_property_read_u8_array(child_node, "qcom,wf-pattern", + effect->pattern, effect->pattern_length); + if (rc < 0) { + dev_err(chip->dev, "Read qcom,wf-pattern property failed, rc=%d\n", + rc); + return rc; + } + + for (j = 0; j < effect->pattern_length; j++) + effect->pattern[j] = effect->pattern[j] << + HAP_WF_AMP_SHIFT; + + effect->play_rate_us = config->play_rate_us; + rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us", + &tmp); + if (rc < 0) + dev_dbg(chip->dev, "Read qcom,wf-play-rate-us failed, rc=%d\n", + rc); + else + effect->play_rate_us = tmp; + + if (config->act_type == ACT_LRA && + config->play_rate_us != effect->play_rate_us) { + dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n"); + effect->play_rate_us = config->play_rate_us; + } + + rc = of_property_read_u32(child_node, "qcom,wf-repeat-count", + &tmp); + if (rc < 0) { + dev_dbg(chip->dev, "Read qcom,wf-repeat-count failed, rc=%d\n", + rc); + } else { + for (j = 0; j < ARRAY_SIZE(wf_repeat); j++) + if (tmp <= wf_repeat[j]) + break; + + effect->wf_repeat_n = j; + } + + rc = of_property_read_u32(child_node, "qcom,wf-s-repeat-count", + &tmp); + if (rc < 0) { + dev_dbg(chip->dev, "Read qcom,wf-s-repeat-count failed, rc=%d\n", + rc); + } else { + for (j = 0; j < ARRAY_SIZE(wf_s_repeat); j++) + if (tmp <= wf_s_repeat[j]) + break; + + effect->wf_s_repeat_n = j; + } + } + + return 0; +} + +static int qti_haptics_probe(struct platform_device *pdev) +{ + struct qti_hap_chip *chip; + struct input_dev *input_dev; + struct ff_device *ff; + int rc = 0, effect_count_max; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) + return -ENOMEM; + + chip->pdev = pdev; + chip->dev = &pdev->dev; + chip->regmap = dev_get_regmap(chip->dev->parent, NULL); + if (!chip->regmap) { + dev_err(chip->dev, "Failed to get regmap handle\n"); + return -ENXIO; + } + + rc = qti_haptics_parse_dt(chip); + if (rc < 0) { + dev_err(chip->dev, "parse device-tree failed, rc=%d\n", rc); + return rc; + } + + spin_lock_init(&chip->bus_lock); + + rc = qti_haptics_hw_init(chip); + if (rc < 0) { + dev_err(chip->dev, "parse device-tree failed, rc=%d\n", rc); + return rc; + } + + rc = devm_request_threaded_irq(chip->dev, chip->play_irq, NULL, + qti_haptics_play_irq_handler, + IRQF_ONESHOT, "hap_play_irq", chip); + if (rc < 0) { + dev_err(chip->dev, "request play-irq failed, rc=%d\n", rc); + return rc; + } + + disable_irq(chip->play_irq); + chip->play_irq_en = false; + + rc = devm_request_threaded_irq(chip->dev, chip->sc_irq, NULL, + qti_haptics_sc_irq_handler, + IRQF_ONESHOT, "hap_sc_irq", chip); + if (rc < 0) { + dev_err(chip->dev, "request sc-irq failed, rc=%d\n", rc); + return rc; + } + + input_dev->name = "vibrator"; + input_set_drvdata(input_dev, chip); + chip->input_dev = input_dev; + + input_set_capability(input_dev, EV_FF, FF_CONSTANT); + if (chip->effects_count != 0) { + input_set_capability(input_dev, EV_FF, FF_PERIODIC); + input_set_capability(input_dev, EV_FF, FF_CUSTOM); + } + + if (chip->effects_count + 1 > FF_EFFECT_COUNT_MAX) + effect_count_max = chip->effects_count + 1; + else + effect_count_max = FF_EFFECT_COUNT_MAX; + rc = input_ff_create(input_dev, effect_count_max); + if (rc < 0) { + dev_err(chip->dev, "create FF input device failed, rc=%d\n", + rc); + return rc; + } + + ff = input_dev->ff; + ff->upload = qti_haptics_upload_effect; + ff->playback = qti_haptics_playback; + ff->erase = qti_haptics_erase; + + rc = input_register_device(input_dev); + if (rc < 0) { + dev_err(chip->dev, "register input device failed, rc=%d\n", + rc); + goto destroy_ff; + } + + dev_set_drvdata(chip->dev, chip); + return 0; + +destroy_ff: + input_ff_destroy(chip->input_dev); + return rc; +} + +static int qti_haptics_remove(struct platform_device *pdev) +{ + struct qti_hap_chip *chip = dev_get_drvdata(&pdev->dev); + + input_ff_destroy(chip->input_dev); + dev_set_drvdata(chip->dev, NULL); + + return 0; +} + +static const struct of_device_id haptics_match_table[] = { + { .compatible = "qcom,haptics" }, + { .compatible = "qcom,pm660-haptics" }, + { .compatible = "qcom,pm8150b-haptics" }, + {}, +}; + +static struct platform_driver qti_haptics_driver = { + .driver = { + .name = "qcom,haptics", + .of_match_table = haptics_match_table, + }, + .probe = qti_haptics_probe, + .remove = qti_haptics_remove, +}; +module_platform_driver(qti_haptics_driver); + +MODULE_DESCRIPTION("QTI haptics driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index 29f99529b1876a9c995dfa16e0430ad8ca4706cb..cfcb32559925baf1acf070f908f3b91b1fc1b905 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client, bool max_baseline, u8 *value) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, max_baseline ? @@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client, bool iap, u8 *version) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_IAP_VERSION_CMD : @@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, u8 *clickpad) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_SM_VERSION_CMD, val); @@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_UNIQUEID_CMD, val); @@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client, bool iap, u16 *csum) { int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, iap ? ETP_SMBUS_FW_CHECKSUM_CMD : @@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); if (ret != 3) { @@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); if (ret != 3) { @@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client, { int ret; int error; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); if (ret != 3) { @@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client, { int error; u16 constant; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); if (error < 0) { @@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client) int len; int error; enum tp_mode mode; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; u16 password; @@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, struct device *dev = &client->dev; int error; u16 result; - u8 val[3]; + u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; /* * Due to the limitation of smbus protocol limiting diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index a246fc686bb728dbe48b2fc84b90a1734af60c66..6c4bbd38700e243b1eee15e8327532eb92af6e40 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = { "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN004a", /* W541 */ + "LEN0071", /* T480 */ + "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ + "LEN0073", /* X1 Carbon G5 (Elantech) */ + "LEN0092", /* X1 Carbon 6 */ + "LEN0096", /* X280 */ + "LEN0097", /* X280 -> ALPS trackpoint */ "LEN200f", /* T450s */ NULL }; diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index 99aa77338399ebc1fba77c0e7ef4dc09cdea56c3..e30f6e4a5dfdc560cd5e498b5c81bc32eb83248e 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -228,7 +228,7 @@ static ssize_t fts_fwupdate_store(struct device *dev, ret = flash_burn(fwD, mode, !mode); if (ret < OK && ret != (ERROR_FW_NO_UPDATE | ERROR_FLASH_BURN_FAILED)) - logError(1, "%s flashProcedure: ERROR %02X\n", + logError(0, "%s flashProcedure: ERROR %02X\n", tag, ERROR_FLASH_PROCEDURE); logError(0, "%s flashing procedure Finished!\n", tag); @@ -3020,9 +3020,9 @@ static void fts_status_event_handler(struct fts_ts_info *info, case FTS_WATER_MODE_ON: case FTS_WATER_MODE_OFF: default: - logError(1, "%s %s Received unhandled status event = ", + logError(0, "%s %s Received unhandled status event = ", tag, __func__); - logError(1, "%02X %02X %02X %02X %02X %02X %02X %02X\n", + logError(0, "%02X %02X %02X %02X %02X %02X %02X %02X\n", event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]); break; @@ -3324,12 +3324,12 @@ static int fts_interrupt_install(struct fts_ts_info *info) error = fts_disableInterrupt(); #ifdef FTS_USE_POLLING_MODE - logError(1, "%s Polling Mode\n"); + logError(0, "%s Polling Mode\n"); hrtimer_init(&info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); info->timer.function = fts_timer_func; hrtimer_start(&info->timer, ktime_set(1, 0), HRTIMER_MODE_REL); #else - logError(1, "%s Interrupt Mode\n", tag); + logError(0, "%s Interrupt Mode\n", tag); if (request_irq(info->client->irq, fts_interrupt_handler, IRQF_TRIGGER_LOW, info->client->name, info)) { logError(1, "%s Request irq failed\n", tag); @@ -3397,14 +3397,14 @@ int fts_chip_powercycle(struct fts_ts_info *info) { int error = 0; - logError(1, "%s %s: Power Cycle Starting...\n", tag, __func__); + logError(0, "%s %s: Power Cycle Starting...\n", tag, __func__); /** * if IRQ pin is short with DVDD a call to * the ISR will triggered when the regulator is turned off */ - logError(1, "%s %s: Disabling IRQ...\n", tag, __func__); + logError(0, "%s %s: Disabling IRQ...\n", tag, __func__); disable_irq_nosync(info->client->irq); if (info->pwr_reg) { error = regulator_disable(info->pwr_reg); @@ -3456,9 +3456,9 @@ int fts_chip_powercycle(struct fts_ts_info *info) release_all_touches(info); - logError(1, "%s %s: Enabling IRQ...\n", tag, __func__); + logError(0, "%s %s: Enabling IRQ...\n", tag, __func__); enable_irq(info->client->irq); - logError(1, "%s %s: Power Cycle Finished! ERROR CODE = %08x\n", + logError(0, "%s %s: Power Cycle Finished! ERROR CODE = %08x\n", tag, __func__, error); setSystemResettedUp(1); setSystemResettedDown(1); @@ -3469,7 +3469,7 @@ int fts_chip_powercycle2(struct fts_ts_info *info, unsigned long sleep) { int error = 0; - logError(1, "%s %s: Power Cycle Starting...\n", tag, __func__); + logError(0, "%s %s: Power Cycle Starting...\n", tag, __func__); if (info->pwr_reg) { error = regulator_disable(info->pwr_reg); @@ -3524,7 +3524,7 @@ int fts_chip_powercycle2(struct fts_ts_info *info, unsigned long sleep) /*before reset clear all slot */ release_all_touches(info); - logError(1, "%s %s: Power Cycle Finished! ERROR CODE = %08x\n", + logError(0, "%s %s: Power Cycle Finished! ERROR CODE = %08x\n", tag, __func__, error); setSystemResettedUp(1); setSystemResettedDown(1); @@ -3536,13 +3536,13 @@ static int fts_init_afterProbe(struct fts_ts_info *info) int error = 0; /* system reset */ - error = cleanUp(0); + error = cleanUp(1); /* enable the features and the sensing */ error |= fts_mode_handler(info, 0); /* enable the interrupt */ - error |= fts_enableInterrupt(); + /* error |= fts_enableInterrupt(); */ #if defined(CONFIG_FB_MSM) error |= fb_register_client(&info->notifier); @@ -3835,6 +3835,47 @@ static int fts_mode_handler(struct fts_ts_info *info, int force) return res; } +int fts_chip_power_switch(struct fts_ts_info *info, int on) +{ + int error = 0; + + logError(0, "%s %s:will set power mode %d...\n", tag, __func__, on); + if (on == 0) { + if (info->pwr_reg) { + error = regulator_disable(info->pwr_reg); + if (error < 0) + logError(1, "%s %s: Failed to disable DVDD\n", + tag, __func__); + } + + if (info->bus_reg) { + error = regulator_disable(info->bus_reg); + if (error < 0) + logError(1, "%s %s: Failed to disable AVDD\n", + tag, __func__); + + } + if (info->bdata->reset_gpio != GPIO_NOT_DEFINED) + gpio_set_value(info->bdata->reset_gpio, 0); + } else if (on == 1) { + if (info->bus_reg) { + error = regulator_enable(info->bus_reg); + if (error < 0) + logError(1, "%s %s: Failed to enable AVDD\n", + tag, __func__); + } + if (info->pwr_reg) { + error = regulator_enable(info->pwr_reg); + if (error < 0) + logError(1, "%s %s: Failed to enable DVDD\n", + tag, __func__); + } + + } + + return error; +} + static void fts_resume_work(struct work_struct *work) { @@ -3845,22 +3886,19 @@ static void fts_resume_work(struct work_struct *work) __pm_wakeup_event(&info->wakeup_source, HZ); info->resume_bit = 1; + fts_chip_power_switch(info, 1); #ifdef USE_NOISE_PARAM readNoiseParameters(noise_params); #endif - fts_system_reset(); - + cleanUp(1); #ifdef USE_NOISE_PARAM writeNoiseParameters(noise_params); #endif release_all_touches(info); - fts_mode_handler(info, 0); info->sensor_sleep = false; - - fts_enableInterrupt(); } @@ -3875,11 +3913,12 @@ static void fts_suspend_work(struct work_struct *work) info->resume_bit = 0; fts_mode_handler(info, 0); - release_all_touches(info); info->sensor_sleep = true; - fts_enableInterrupt(); + fts_disableInterrupt(); + fts_chip_power_switch(info, 0); + } @@ -4183,13 +4222,13 @@ static int fts_probe(struct i2c_client *client, int retval; int skip_5_1 = 0; - logError(1, "%s %s: driver probe begin!\n", tag, __func__); + logError(0, "%s %s: driver probe begin!\n", tag, __func__); - logError(1, "%s SET I2C Functionality and Dev INFO:\n", tag); + logError(0, "%s SET I2C Functionality and Dev INFO:\n", tag); openChannel(client); /* logError(1, "%s driver ver. %s (built on %s, %s)\n", tag,*/ /* FTS_TS_DRV_VERSION, __DATE__, __TIME__);*/ - logError(1, "%s driver ver. %s (built on)\n", tag, FTS_TS_DRV_VERSION); + logError(0, "%s driver ver. %s (built on)\n", tag, FTS_TS_DRV_VERSION); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { logError(1, "%s Unsupported I2C functionality\n", tag); @@ -4209,7 +4248,7 @@ static int fts_probe(struct i2c_client *client, info->client = client; i2c_set_clientdata(client, info); - logError(1, "%s i2c address: %x\n", tag, client->addr); + logError(0, "%s i2c address: %x\n", tag, client->addr); info->dev = &info->client->dev; if (dp) { info->bdata = devm_kzalloc(&client->dev, @@ -4223,7 +4262,7 @@ static int fts_probe(struct i2c_client *client, parse_dt(&client->dev, info->bdata); } - logError(1, "%s SET Regulators:\n", tag); + logError(0, "%s SET Regulators:\n", tag); retval = fts_get_reg(info, true); if (retval < 0) { logError(1, "%s ERROR: %s: Failed to get regulators\n", @@ -4238,7 +4277,7 @@ static int fts_probe(struct i2c_client *client, goto ProbeErrorExit_2; } - logError(1, "%s SET GPIOS:\n", tag); + logError(0, "%s SET GPIOS:\n", tag); retval = fts_set_gpio(info); if (retval < 0) { logError(1, "%s %s: ERROR Failed to set up GPIO's\n", @@ -4257,15 +4296,7 @@ static int fts_probe(struct i2c_client *client, goto ProbeErrorExit_3; } - error = fts_init_afterProbe(info); - if (error < OK) { - logError(1, - "%s Cannot initialize the hardware device ERROR %08X\n", - tag, error); - goto ProbeErrorExit_3; - } - - logError(1, "%s SET Event Handler:\n", tag); + logError(0, "%s SET Event Handler:\n", tag); /*wake_lock_init(&info->wakelock, WAKE_LOCK_SUSPEND, "fts_tp");*/ wakeup_source_init(&info->wakeup_source, "fts_tp"); /*info->event_wq = create_singlethread_workqueue("fts-event-queue");*/ @@ -4282,7 +4313,7 @@ static int fts_probe(struct i2c_client *client, INIT_WORK(&info->resume_work, fts_resume_work); INIT_WORK(&info->suspend_work, fts_suspend_work); - logError(1, "%s SET Input Device Property:\n", tag); + logError(0, "%s SET Input Device Property:\n", tag); /*info->dev = &info->client->dev;*/ info->input_dev = input_allocate_device(); if (!info->input_dev) { @@ -4384,7 +4415,7 @@ static int fts_probe(struct i2c_client *client, #endif /* init hardware device */ - logError(1, "%s Device Initialization:\n", tag); + logError(0, "%s Device Initialization:\n", tag); error = fts_init(info); if (error < OK) { logError(1, "%s Cannot initialize the device ERROR %08X\n", @@ -4416,7 +4447,7 @@ static int fts_probe(struct i2c_client *client, /*goto ProbeErrorExit_6;*/ /*}*/ - logError(1, "%s SET Device File Nodes:\n", tag); + logError(0, "%s SET Device File Nodes:\n", tag); /* sysfs stuff */ info->attrs.attrs = fts_attr_group; error = sysfs_create_group(&client->dev.kobj, &info->attrs); @@ -4472,6 +4503,13 @@ static int fts_probe(struct i2c_client *client, } #endif + error = fts_init_afterProbe(info); + if (error < OK) { + logError(1, + "%s Cannot initialize the hardware device ERROR %08X\n", + tag, error); + goto ProbeErrorExit_11; + } logError(1, "%s Probe Finished!\n", tag); return OK; diff --git a/drivers/input/touchscreen/st/fts_lib/ftsCompensation.c b/drivers/input/touchscreen/st/fts_lib/ftsCompensation.c index d394ca64be4067b601f1d262ea1bec76f7032a90..a2de76226197dc4c091ecb03f46b226eca89196c 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsCompensation.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsCompensation.c @@ -575,12 +575,12 @@ int readChipInfo(int doRequest) ftsInfo.u8_ftsaVer = data[index++]; ftsInfo.u8_tchRptVer = data[index++]; - logError(1, "%s External Release = ", tag); + logError(0, "%s External Release = ", tag); for (i = 0; i < EXTERNAL_RELEASE_INFO_SIZE; i++) { ftsInfo.u8_extReleaseInfo[i] = data[index++]; - logError(1, "%02X ", ftsInfo.u8_extReleaseInfo[i]); + logError(0, "%02X ", ftsInfo.u8_extReleaseInfo[i]); } - logError(1, "\n"); + logError(0, "\n"); for (i = 0; i < sizeof(ftsInfo.u8_custInfo); i++) ftsInfo.u8_custInfo[i] = data[index++]; @@ -606,10 +606,10 @@ int readChipInfo(int doRequest) index += 2; ftsInfo.u8_scrForceLen = data[index++]; - logError(1, "%s Force Len = %d\n", tag, ftsInfo.u8_scrForceLen); + logError(0, "%s Force Len = %d\n", tag, ftsInfo.u8_scrForceLen); ftsInfo.u8_scrSenseLen = data[index++]; - logError(1, "%s Sense Len = %d\n", tag, ftsInfo.u8_scrSenseLen); + logError(0, "%s Sense Len = %d\n", tag, ftsInfo.u8_scrSenseLen); for (i = 0; i < 8; i++) ftsInfo.u64_scrForceEn[i] = data[index++]; @@ -618,7 +618,7 @@ int readChipInfo(int doRequest) ftsInfo.u64_scrSenseEn[i] = data[index++]; ftsInfo.u8_msKeyLen = data[index++]; - logError(1, "%s MS Key Len = %d\n", tag, ftsInfo.u8_msKeyLen); + logError(0, "%s MS Key Len = %d\n", tag, ftsInfo.u8_msKeyLen); for (i = 0; i < 8; i++) ftsInfo.u64_msKeyForceEn[i] = data[index++]; @@ -627,7 +627,7 @@ int readChipInfo(int doRequest) ftsInfo.u64_msKeySenseEn[i] = data[index++]; ftsInfo.u8_ssKeyLen = data[index++]; - logError(1, "%s SS Key Len = %d\n", tag, ftsInfo.u8_ssKeyLen); + logError(0, "%s SS Key Len = %d\n", tag, ftsInfo.u8_ssKeyLen); for (i = 0; i < 8; i++) ftsInfo.u64_ssKeyForceEn[i] = data[index++]; @@ -647,25 +647,25 @@ int readChipInfo(int doRequest) ftsInfo.u8_msScrConfigTuneVer = data[index++]; - logError(1, "%s CFG MS TUNING VERSION = %02X\n", + logError(0, "%s CFG MS TUNING VERSION = %02X\n", tag, ftsInfo.u8_msScrConfigTuneVer); ftsInfo.u8_msScrLpConfigTuneVer = data[index++]; ftsInfo.u8_msScrHwulpConfigTuneVer = data[index++]; ftsInfo.u8_msKeyConfigTuneVer = data[index++]; ftsInfo.u8_ssTchConfigTuneVer = data[index++]; - logError(1, "%s CFG SS TUNING VERSION = %02X\n", + logError(0, "%s CFG SS TUNING VERSION = %02X\n", tag, ftsInfo.u8_ssTchConfigTuneVer); ftsInfo.u8_ssKeyConfigTuneVer = data[index++]; ftsInfo.u8_ssHvrConfigTuneVer = data[index++]; ftsInfo.u8_frcTchConfigTuneVer = data[index++]; ftsInfo.u8_msScrCxmemTuneVer = data[index++]; - logError(1, "%s CX MS TUNING VERSION = %02X\n", + logError(0, "%s CX MS TUNING VERSION = %02X\n", tag, ftsInfo.u8_msScrCxmemTuneVer); ftsInfo.u8_msScrLpCxmemTuneVer = data[index++]; ftsInfo.u8_msScrHwulpCxmemTuneVer = data[index++]; ftsInfo.u8_msKeyCxmemTuneVer = data[index++]; ftsInfo.u8_ssTchCxmemTuneVer = data[index++]; - logError(1, "%s CX SS TUNING VERSION = %02X\n", + logError(0, "%s CX SS TUNING VERSION = %02X\n", tag, ftsInfo.u8_ssTchCxmemTuneVer); ftsInfo.u8_ssKeyCxmemTuneVer = data[index++]; ftsInfo.u8_ssHvrCxmemTuneVer = data[index++]; @@ -675,7 +675,7 @@ int readChipInfo(int doRequest) ((data[index + 1] & 0x000000FF) << 8) + (data[index] & 0x000000FF); index += 4; - logError(1, "%s MP SIGNATURE = %08X\n", tag, ftsInfo.u32_mpPassFlag); + logError(0, "%s MP SIGNATURE = %08X\n", tag, ftsInfo.u32_mpPassFlag); ftsInfo.u32_featEn = ((data[index + 3] & 0x000000FF) << 24) + ((data[index + 2] & 0x000000FF) << 16) + ((data[index + 1] & 0x000000FF) << 8) + @@ -686,31 +686,31 @@ int readChipInfo(int doRequest) ((data[index + 1] & 0x000000FF) << 8) + (data[index] & 0x000000FF); index += 4; - logError(1, "%s FEATURES = %08X\n", tag, ftsInfo.u32_echoEn); + logError(0, "%s FEATURES = %08X\n", tag, ftsInfo.u32_echoEn); ftsInfo.u8_sideTchConfigTuneVer = data[index++]; ftsInfo.u8_sideTchCxmemTuneVer = data[index++]; ftsInfo.u8_sideTchForceLen = data[index++]; - logError(1, "%s Side Touch Force Len = %d\n", + logError(0, "%s Side Touch Force Len = %d\n", tag, ftsInfo.u8_sideTchForceLen); ftsInfo.u8_sideTchSenseLen = data[index++]; - logError(1, "%s Side Touch Sense Len = %d\n", + logError(0, "%s Side Touch Sense Len = %d\n", tag, ftsInfo.u8_sideTchSenseLen); for (i = 0; i < 8; i++) ftsInfo.u64_sideTchForceEn[i] = data[index++]; for (i = 0; i < 8; i++) ftsInfo.u64_sideTchSenseEn[i] = data[index++]; ftsInfo.u8_errSign = data[index++]; - logError(1, "%s ERROR SIGNATURE = %02X\n", tag, ftsInfo.u8_errSign); + logError(0, "%s ERROR SIGNATURE = %02X\n", tag, ftsInfo.u8_errSign); if (ftsInfo.u8_errSign == ERROR_SIGN_HEAD) { - logError(1, "%s Correct Error Signature found!\n", tag); + logError(0, "%s Correct Error Signature found!\n", tag); u8ToU16(&data[index], &ftsInfo.u16_errOffset); } else { logError(1, "%s Error Signature NOT FOUND!\n", tag); ftsInfo.u16_errOffset = INVALID_ERROR_OFFS; } - logError(1, "%s ERROR OFFSET = %04X\n", tag, ftsInfo.u16_errOffset); + logError(0, "%s ERROR OFFSET = %04X\n", tag, ftsInfo.u16_errOffset); index += 2; - logError(1, "%s Parsed %d bytes!\n", tag, index); + logError(0, "%s Parsed %d bytes!\n", tag, index); if (index != CHIP_INFO_SIZE + 3) { @@ -720,7 +720,7 @@ int readChipInfo(int doRequest) return ERROR_OP_NOT_ALLOW; } - logError(1, "%s Chip Info Read DONE!\n", tag); + logError(0, "%s Chip Info Read DONE!\n", tag); return OK; FAIL: diff --git a/drivers/input/touchscreen/st/fts_lib/ftsError.c b/drivers/input/touchscreen/st/fts_lib/ftsError.c index bb2f2808c45b96f6a3cdc5fb3ef56a806c5e7345..df40b147328355c3cca9d55c1b821945e7ae6883 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsError.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsError.c @@ -109,7 +109,7 @@ int dumpErrorInfo(void) tag, __func__, ret); return ret; } - logError(1, "%s %s: Error Info =\n", tag, __func__); + logError(0, "%s %s: Error Info =\n", tag, __func__); u8ToU32(data, &sign); if (sign != ERROR_SIGNATURE) logError(1, "%s %s:Wrong Signature! Data may be invalid!\n", @@ -147,7 +147,7 @@ int errorHandler(u8 *event, int size) return ERROR_OP_NOT_ALLOW; } - logError(1, "%s %s: Starting handling...\n", tag, __func__); + logError(0, "%s %s: Starting handling...\n", tag, __func__); //TODO: write an error log for undefinied command subtype 0xBA switch (event[1]) { case EVENT_TYPE_ESD_ERROR: //esd @@ -251,8 +251,7 @@ int errorHandler(u8 *event, int size) break; default: - logError(1, - "%s %s: No Action taken!\n", tag, __func__); + logError(0, "%s %s: No Action taken!\n", tag, __func__); break; } logError(1, "%s %s: handling Finished! res = %08X\n", diff --git a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c index e667968b426a28e87610009d8b6bb47bb4aad983..d46598547edc68a04d27102debe80a1d0cf07d75 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsFlash.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsFlash.c @@ -122,7 +122,7 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from) struct device *dev = NULL; int res; - logError(1, "%s %s starting...\n", tag, __func__); + logError(0, "%s %s starting...\n", tag, __func__); switch (from) { #ifdef FW_H_FILE case 1: @@ -139,7 +139,7 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from) memcpy(*data, (u8 *)FW_ARRAY_NAME, (*size)); break; #endif - default: + case 2: logError(1, "%s Read FW from BIN file!\n", tag); dev = getDev(); @@ -160,7 +160,7 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from) memcpy(*data, (u8 *)fw->data, (*size)); release_firmware(fw); } else { - logError(1, "%s %s:No File found! ERROR %08X\n", + logError(0, "%s %s:No File found! ERROR %08X\n", tag, __func__, ERROR_FILE_NOT_FOUND); return ERROR_FILE_NOT_FOUND; } @@ -169,8 +169,13 @@ int getFWdata(const char *pathToFile, u8 **data, int *size, int from) tag, __func__, ERROR_OP_NOT_ALLOW); return ERROR_OP_NOT_ALLOW; } + break; + default: + return ERROR_OP_NOT_ALLOW; + /* break; */ } - logError(1, "%s %s:Finshed!\n", tag, __func__); + + logError(0, "%s %s:Finshed!\n", tag, __func__); return OK; } @@ -183,7 +188,7 @@ int readFwFile(const char *path, struct Firmware *fw, int keep_cx) res = getFWdata(path, &orig_data, &orig_size, LOAD_FW_FROM); if (res < OK) { - logError(1, "%s %s:impossible retrieve FW... ERROR %08X\n", + logError(0, "%s %s:impossible retrieve FW... ERROR %08X\n", tag, __func__, ERROR_MEMH_READ); return (res | ERROR_MEMH_READ); @@ -208,7 +213,7 @@ int flashProcedure(const char *path, int force, int keep_cx) logError(0, "%s Reading Fw file...\n", tag); res = readFwFile(path, &fw, keep_cx); if (res < OK) { - logError(1, "%s %s: ERROR %02X\n", + logError(0, "%s %s: ERROR %02X\n", tag, __func__, (res | ERROR_FLASH_PROCEDURE)); kfree(fw.data); return (res | ERROR_FLASH_PROCEDURE); diff --git a/drivers/input/touchscreen/st/fts_lib/ftsIO.c b/drivers/input/touchscreen/st/fts_lib/ftsIO.c index a1f3862d61f4a2036765ec52596202c5a59089a1..475327411bdaeed47080441aa026b0632e38f6c0 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsIO.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsIO.c @@ -77,7 +77,7 @@ int openChannel(struct i2c_client *clt) { client = clt; I2CSAD = clt->addr; - logError(1, "%s %s: SAD: %02X\n", tag, __func__, I2CSAD); + logError(0, "%s %s: SAD: %02X\n", tag, __func__, I2CSAD); return OK; } diff --git a/drivers/input/touchscreen/st/fts_lib/ftsTest.c b/drivers/input/touchscreen/st/fts_lib/ftsTest.c index edcdee2d1dc158a73e6de0fa088597ef5fc6dbc2..c29e5ed598e308fb22cd797118d3b132b7f04d48 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsTest.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsTest.c @@ -818,9 +818,9 @@ int production_test_ms_raw(char *path_limits, int stop_on_fail, &trows, &tcolumns); if (ret < 0 || (trows != 1 || tcolumns != 2)) { - logError(1, "%s %s:MS_RAW_MIN_MAX failed...", + logError(0, "%s %s:MS_RAW_MIN_MAX failed...", tag, __func__); - logError(1, "ERROR %02X\n", + logError(0, "ERROR %02X\n", ERROR_PROD_TEST_DATA); ret |= ERROR_PROD_TEST_DATA; goto ERROR_LIMITS; @@ -832,9 +832,9 @@ int production_test_ms_raw(char *path_limits, int stop_on_fail, thresholds[0], thresholds[1]); if (ret != OK) { - logError(1, "%s %s:MS RAW failed...", + logError(0, "%s %s:MS RAW failed...", tag, __func__); - logError(1, "ERROR COUNT = %d\n", ret); + logError(0, "ERROR COUNT = %d\n", ret); logError(0, "%s MS RAW MIN MAX TEST:...", tag); logError(0, "FAIL\n\n", tag); count_fail += 1; @@ -1553,9 +1553,9 @@ int production_test_ms_key_cx(char *path_limits, int stop_on_fail, //read MS compensation data ret = readMutualSenseCompensationData(MS_KEY, &msCompData); if (ret < 0) { - logError(1, "%s production_test_data: ", tag); - logError(1, "readMutualSenseCompensationData failed... "); - logError(1, "ERROR %02X\n", ERROR_PROD_TEST_DATA); + logError(0, "%s production_test_data: ", tag); + logError(0, "readMutualSenseCompensationData failed... "); + logError(0, "ERROR %02X\n", ERROR_PROD_TEST_DATA); return (ret | ERROR_PROD_TEST_DATA); } @@ -1575,10 +1575,10 @@ int production_test_ms_key_cx(char *path_limits, int stop_on_fail, &trows, &tcolumns); if (ret < 0 || (trows != 1 || tcolumns != 2)) { - logError(1, "%s production_test_data: ", tag); - logError(1, "parseProductionTestLimits "); - logError(1, "MS_KEY_CX1_MIN_MAX failed... "); - logError(1, "ERROR %02X\n", ERROR_PROD_TEST_DATA); + logError(0, "%s production_test_data: ", tag); + logError(0, "parseProductionTestLimits "); + logError(0, "MS_KEY_CX1_MIN_MAX failed... "); + logError(0, "ERROR %02X\n", ERROR_PROD_TEST_DATA); ret |= ERROR_PROD_TEST_DATA; goto ERROR_LIMITS; } @@ -3628,9 +3628,9 @@ int production_test_data(char *path_limits, int stop_on_fail, int res = OK, ret; if (todo == NULL) { - logError(1, "%s %s: ", tag, __func__); - logError(1, "No TestToDo specified!! "); - logError(1, "ERROR = %02X\n", + logError(0, "%s %s: ", tag, __func__); + logError(0, "No TestToDo specified!! "); + logError(0, "ERROR = %02X\n", (ERROR_OP_NOT_ALLOW | ERROR_PROD_TEST_DATA)); return (ERROR_OP_NOT_ALLOW | ERROR_PROD_TEST_DATA); } @@ -3639,9 +3639,9 @@ int production_test_data(char *path_limits, int stop_on_fail, ret = production_test_ms_raw(path_limits, stop_on_fail, todo); res |= ret; if (ret < 0) { - logError(1, "%s %s: ", tag, __func__); - logError(1, "production_test_ms_raw failed... "); - logError(1, "ERROR = %02X\n", ret); + logError(0, "%s %s: ", tag, __func__); + logError(0, "production_test_ms_raw failed... "); + logError(0, "ERROR = %02X\n", ret); if (stop_on_fail == 1) goto END; } @@ -3649,9 +3649,9 @@ int production_test_data(char *path_limits, int stop_on_fail, ret = production_test_ms_cx(path_limits, stop_on_fail, todo); res |= ret; if (ret < 0) { - logError(1, "%s %s: ", tag, __func__); - logError(1, "production_test_ms_cx failed... "); - logError(1, "ERROR = %02X\n", ret); + logError(0, "%s %s: ", tag, __func__); + logError(0, "production_test_ms_cx failed... "); + logError(0, "ERROR = %02X\n", ret); if (stop_on_fail == 1) goto END; } @@ -3659,9 +3659,9 @@ int production_test_data(char *path_limits, int stop_on_fail, ret = production_test_ss_raw(path_limits, stop_on_fail, todo); res |= ret; if (ret < 0) { - logError(1, "%s %s: ", tag, __func__); - logError(1, "production_test_ss_raw failed... "); - logError(1, "ERROR = %02X\n", ret); + logError(0, "%s %s: ", tag, __func__); + logError(0, "production_test_ss_raw failed... "); + logError(0, "ERROR = %02X\n", ret); if (stop_on_fail == 1) goto END; } @@ -3669,9 +3669,9 @@ int production_test_data(char *path_limits, int stop_on_fail, ret = production_test_ss_ix_cx(path_limits, stop_on_fail, todo); res |= ret; if (ret < 0) { - logError(1, "%s %s: ", tag, __func__); - logError(1, "production_test_ss_ix_cx failed... "); - logError(1, "ERROR = %02X\n", ret); + logError(0, "%s %s: ", tag, __func__); + logError(0, "production_test_ss_ix_cx failed... "); + logError(0, "ERROR = %02X\n", ret); if (stop_on_fail == 1) goto END; } @@ -3708,7 +3708,7 @@ int save_mp_flag(u32 signature) if (res < OK) { logError(1, "%s %s: ERROR %08X ...\n", tag, __func__, res); } else { - logError(1, "%s Saving Flag DONE!\n", tag); + logError(0, "%s Saving Flag DONE!\n", tag); res = OK; } return res; @@ -3741,7 +3741,7 @@ int parseProductionTestLimits(char *path, char *label, #endif if (fd != 0) { - logError(1, "%s %s: ERROR %02X\n", + logError(0, "%s %s: ERROR %02X\n", tag, __func__, ERROR_FILE_NOT_FOUND); return ERROR_FILE_NOT_FOUND; } diff --git a/drivers/input/touchscreen/st/fts_lib/ftsTool.c b/drivers/input/touchscreen/st/fts_lib/ftsTool.c index a5b4c01fc3122eae0b5df93aba952b700f18c680..712d102e107bb57a155690bb03a166c983ca4d21 100644 --- a/drivers/input/touchscreen/st/fts_lib/ftsTool.c +++ b/drivers/input/touchscreen/st/fts_lib/ftsTool.c @@ -699,7 +699,7 @@ int pollForEvent(int *event_to_search, int event_bytes, temp = printHex("ERROR EVENT = ", readData, FIFO_EVENT_SIZE); if (temp != NULL) - logError(1, "%s %s", tag, temp); + logError(0, "%s %s", tag, temp); kfree(temp); count_err++; err_handling = errorHandler(readData, FIFO_EVENT_SIZE); @@ -715,13 +715,13 @@ int pollForEvent(int *event_to_search, int event_bytes, temp = printHex("READ EVENT = ", readData, FIFO_EVENT_SIZE); if (temp != NULL) - logError(1, "%s %s", tag, temp); + logError(0, "%s %s", tag, temp); kfree(temp); } if (readData[0] == EVENTID_CONTROL_READY && event_to_search[0] != EVENTID_CONTROL_READY) { - logError(1, "Unmanned Controller Ready Event!"); - logError(1, "%s %s:Setting reset flags...\n", + logError(0, "Unmanned Controller Ready Event!"); + logError(0, "%s %s:Setting reset flags...\n", tag, __func__); setSystemResettedUp(1); setSystemResettedDown(1); @@ -742,7 +742,7 @@ int pollForEvent(int *event_to_search, int event_bytes, } stopStopWatch(&clock); if ((retry >= time_to_count) && find != 1) { - logError(1, "%s %s: ERROR %02X\n", + logError(0, "%s %s: ERROR %02X\n", tag, __func__, ERROR_TIMEOUT); return ERROR_TIMEOUT; } @@ -756,7 +756,7 @@ int pollForEvent(int *event_to_search, int event_bytes, logError(0, "Number of errors found = %d\n", count_err); return count_err; } - logError(1, "%s %s: ERROR %02X\n", tag, __func__, ERROR_I2C_R); + logError(0, "%s %s: ERROR %02X\n", tag, __func__, ERROR_I2C_R); return ERROR_I2C_R; } @@ -905,7 +905,7 @@ int attempt_function(int(*code)(void), unsigned long wait_before_retry, void setResetGpio(int gpio) { reset_gpio = gpio; - logError(1, "%s %s: reset_gpio = %d\n", tag, __func__, reset_gpio); + logError(0, "%s %s: reset_gpio = %d\n", tag, __func__, reset_gpio); } int fts_system_reset(void) @@ -939,7 +939,7 @@ int fts_system_reset(void) res = pollForEvent(&event_to_search, 1, readData, GENERAL_TIMEOUT); if (res < OK) { - logError(1, "%s %s: ERROR %02X\n", + logError(0, "%s %s: ERROR %02X\n", tag, __func__, res); } } @@ -1073,7 +1073,7 @@ int checkEcho(u8 *cmd, int size) u8 readData[FIFO_EVENT_SIZE]; if ((ftsInfo.u32_echoEn & 0x00000001) != ECHO_ENABLED) { - logError(1, "%s ECHO Not Enabled!\n", tag); + logError(0, "%s ECHO Not Enabled!\n", tag); return OK; } if (size < 1) { @@ -1157,7 +1157,7 @@ int writeNoiseParameters(u8 *noise) ret = fts_writeCmd(cmd, NOISE_PARAMETERS_SIZE + 2); //not use writeFwCmd because this function should be fast if (ret < OK) { - logError(1, "%s %s:impossible write command... ERROR %02X\n", + logError(0, "%s %s:impossible write command... ERROR %02X\n", tag, __func__, ret); ret = (ret | ERROR_NOISE_PARAMETERS); goto ERROR; @@ -1165,7 +1165,7 @@ int writeNoiseParameters(u8 *noise) ret = pollForEvent(event_to_search, 2, readData, GENERAL_TIMEOUT); if (ret < OK) { - logError(1, "%s %s: polling FIFO ERROR %02X\n", + logError(0, "%s %s: polling FIFO ERROR %02X\n", tag, __func__, ret); ret = (ret | ERROR_NOISE_PARAMETERS); goto ERROR; @@ -1209,7 +1209,7 @@ int readNoiseParameters(u8 *noise) cmd[1] = NOISE_PARAMETERS; ret = fts_writeCmd(cmd, 2);//not use writeFwCmd should be fast if (ret < OK) { - logError(1, "%s %s:impossible write command... ERROR %02X\n", + logError(0, "%s %s:impossible write command... ERROR %02X\n", tag, __func__, ret); ret = (ret | ERROR_NOISE_PARAMETERS); goto ERROR; @@ -1217,7 +1217,7 @@ int readNoiseParameters(u8 *noise) ret = pollForEvent(event_to_search, 2, readData, GENERAL_TIMEOUT); if (ret < OK) { - logError(1, "%s %s: polling FIFO ERROR %02X\n", + logError(0, "%s %s: polling FIFO ERROR %02X\n", tag, __func__, ret); ret = (ret | ERROR_NOISE_PARAMETERS); goto ERROR; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 99a2a57b6cfdae65757ac52d77c44920ed244472..10190e361a13db31135f3a7c44cc60a8ea1a6135 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -311,6 +311,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid) if (dev_data == NULL) { dev_data = alloc_dev_data(devid); + if (!dev_data) + return NULL; if (translation_pre_enabled(iommu)) dev_data->defer_attach = true; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e4cd555ffc51d35b1c3f3b88718898d22eaf57ae..d2629d41078a3285d4aa175ced011b8870f23748 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -252,6 +252,7 @@ struct arm_smmu_device { #define ARM_SMMU_OPT_3LVL_TABLES (1 << 4) #define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5) #define ARM_SMMU_OPT_STATIC_CB (1 << 6) +#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7) u32 options; enum arm_smmu_arch_version version; enum arm_smmu_implementation model; @@ -268,7 +269,7 @@ struct arm_smmu_device { struct arm_smmu_smr *smrs; struct arm_smmu_s2cr *s2crs; struct mutex stream_map_mutex; - + struct mutex iommu_group_mutex; unsigned long va_size; unsigned long ipa_size; unsigned long pa_size; @@ -358,6 +359,7 @@ struct arm_smmu_domain { enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops */ + spinlock_t sync_lock; /* Serialises TLB syncs */ struct io_pgtable_cfg pgtbl_cfg; u32 attributes; bool slave_side_secure; @@ -387,6 +389,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" }, { ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" }, { ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"}, + { ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" }, { 0, NULL}, }; @@ -1000,10 +1003,10 @@ static void arm_smmu_tlb_sync_context(void *cookie) void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); unsigned long flags; - spin_lock_irqsave(&smmu_domain->cb_lock, flags); + spin_lock_irqsave(&smmu_domain->sync_lock, flags); __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, base + ARM_SMMU_CB_TLBSTATUS); - spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); + spin_unlock_irqrestore(&smmu_domain->sync_lock, flags); } static void arm_smmu_tlb_sync_vmid(void *cookie) @@ -1999,6 +2002,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) mutex_init(&smmu_domain->init_mutex); spin_lock_init(&smmu_domain->cb_lock); + spin_lock_init(&smmu_domain->sync_lock); INIT_LIST_HEAD(&smmu_domain->pte_info_list); INIT_LIST_HEAD(&smmu_domain->unassign_list); mutex_init(&smmu_domain->assign_lock); @@ -2175,6 +2179,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) struct iommu_group *group; int i, idx, ret; + mutex_lock(&smmu->iommu_group_mutex); mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ for_each_cfg_sme(fwspec, i, idx) { @@ -2183,12 +2188,12 @@ static int arm_smmu_master_alloc_smes(struct device *dev) if (idx != INVALID_SMENDX) { ret = -EEXIST; - goto out_err; + goto sme_err; } ret = arm_smmu_find_sme(smmu, sid, mask); if (ret < 0) - goto out_err; + goto sme_err; idx = ret; if (smrs && smmu->s2crs[idx].count == 0) { @@ -2199,13 +2204,14 @@ static int arm_smmu_master_alloc_smes(struct device *dev) smmu->s2crs[idx].count++; cfg->smendx[i] = (s16)idx; } + mutex_unlock(&smmu->stream_map_mutex); group = iommu_group_get_for_dev(dev); if (!group) group = ERR_PTR(-ENOMEM); if (IS_ERR(group)) { ret = PTR_ERR(group); - goto out_err; + goto iommu_group_err; } iommu_group_put(group); @@ -2213,15 +2219,19 @@ static int arm_smmu_master_alloc_smes(struct device *dev) for_each_cfg_sme(fwspec, i, idx) smmu->s2crs[idx].group = group; - mutex_unlock(&smmu->stream_map_mutex); + mutex_unlock(&smmu->iommu_group_mutex); return 0; -out_err: +iommu_group_err: + mutex_lock(&smmu->stream_map_mutex); + +sme_err: while (i--) { arm_smmu_free_sme(smmu, cfg->smendx[i]); cfg->smendx[i] = INVALID_SMENDX; } mutex_unlock(&smmu->stream_map_mutex); + mutex_unlock(&smmu->iommu_group_mutex); return ret; } @@ -2541,6 +2551,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t ret; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + unsigned long flags; if (!ops) return 0; @@ -2554,7 +2565,9 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, arm_smmu_secure_domain_lock(smmu_domain); + spin_lock_irqsave(&smmu_domain->cb_lock, flags); ret = ops->unmap(ops, iova, size); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); arm_smmu_domain_power_off(domain, smmu_domain->smmu); /* @@ -2709,6 +2722,10 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, phys_addr_t ret = 0; unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_device *smmu = smmu_domain->smmu; + + if (smmu->options & ARM_SMMU_OPT_DISABLE_ATOS) + return 0; if (arm_smmu_power_on(smmu_domain->smmu->pwr)) return 0; @@ -4196,6 +4213,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->num_mapping_groups = size; mutex_init(&smmu->stream_map_mutex); + mutex_init(&smmu->iommu_group_mutex); spin_lock_init(&smmu->global_sync_lock); if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { @@ -5259,7 +5277,11 @@ static int qsmmuv500_arch_init(struct arm_smmu_device *smmu) pdev = container_of(dev, struct platform_device, dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcu-base"); - data->tcu_base = devm_ioremap_resource(dev, res); + if (!res) { + dev_err(dev, "Unable to get the tcu-base\n"); + return -EINVAL; + } + data->tcu_base = devm_ioremap(dev, res->start, resource_size(res)); if (IS_ERR(data->tcu_base)) return PTR_ERR(data->tcu_base); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index c26b8e9604aa781416848fc079b5c6644f85c8db..180b5109ce16dee57e4b9897c098b31445589490 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -432,13 +432,15 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, arm_lpae_iopte curr, - struct io_pgtable_cfg *cfg) + struct io_pgtable_cfg *cfg, + int ref_count) { arm_lpae_iopte old, new; new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; + iopte_tblcnt_set(&new, ref_count); /* * Ensure the table itself is visible before its PTE can be. @@ -536,7 +538,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + pte = arm_lpae_install_table(cptep, ptep, 0, cfg, 0); if (pte) __arm_lpae_free_pages(cptep, tblsz, cfg, cookie); @@ -767,6 +769,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); int i, unmap_idx = -1; void *cookie = data->iop.cookie; + int child_cnt = 0; size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size); @@ -790,9 +793,10 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i], true); + child_cnt++; } - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg, child_cnt); if (pte != blk_pte) { __arm_lpae_free_pages(tablep, tablesz, cfg, cookie); /* diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 16d33ac19db0f77837c30f44de044a3a46b9c558..c30f6270043152bf6304821863dc8e45722c22d6 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -60,7 +60,7 @@ (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data)) #define REG_MMU_IVRP_PADDR 0x114 -#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31)) + #define REG_MMU_VLD_PA_RNG 0x118 #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) @@ -532,8 +532,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) F_INT_PRETETCH_TRANSATION_FIFO_FAULT; writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); - writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), - data->base + REG_MMU_IVRP_PADDR); + if (data->m4u_plat == M4U_MT8173) + regval = (data->protect_base >> 1) | (data->enable_4GB << 31); + else + regval = lower_32_bits(data->protect_base) | + upper_32_bits(data->protect_base); + writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); + if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { /* * If 4GB mode is enabled, the validate PA range is from @@ -688,6 +693,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); + reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); clk_disable_unprepare(data->bclk); return 0; } @@ -710,8 +716,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); - writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB), - base + REG_MMU_IVRP_PADDR); + writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); if (data->m4u_dom) writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], base + REG_MMU_PT_BASE_ADDR); diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index b4451a1c7c2f167060edac5dbe5ce53a99b21144..778498b8633fc63d4383ee0975741a8acffb3b5f 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg { u32 ctrl_reg; u32 int_control0; u32 int_main_control; + u32 ivrp_paddr; }; enum mtk_iommu_plat { diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile index f10afec6f063860201b50fbb7c7e4a740e5b8b6c..62582613438d883e3c721804c2fd321746488110 100644 --- a/drivers/irqchip/qcom/Makefile +++ b/drivers/irqchip/qcom/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_QTI_PDC) += pdc.o obj-$(CONFIG_QTI_PDC_SM8150) += pdc-sm8150.o obj-$(CONFIG_QTI_PDC_SM6150) += pdc-sm6150.o obj-$(CONFIG_QTI_MPM) += mpm.o +obj-$(CONFIG_QTI_MPM) += mpm.o mpm-8937.o diff --git a/drivers/irqchip/qcom/mpm-8937.c b/drivers/irqchip/qcom/mpm-8937.c new file mode 100644 index 0000000000000000000000000000000000000000..d6875ebadb928d79e688f2f9d2895107964316e7 --- /dev/null +++ b/drivers/irqchip/qcom/mpm-8937.c @@ -0,0 +1,74 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "mpm.h" + +const struct mpm_pin mpm_msm8937_gic_chip_data[] = { + {2, 216}, + {49, 172}, + {53, 104}, + {58, 166}, + {62, 222}, + {-1}, +}; + +const struct mpm_pin mpm_msm8937_gpio_chip_data[] = { + {3, 38}, + {4, 1}, + {5, 5}, + {6, 9}, + {8, 37}, + {9, 36}, + {10, 13}, + {11, 35}, + {12, 17}, + {13, 21}, + {14, 54}, + {15, 34}, + {16, 31}, + {17, 58}, + {18, 28}, + {19, 42}, + {20, 25}, + {21, 12}, + {22, 43}, + {23, 44}, + {24, 45}, + {25, 46}, + {26, 48}, + {27, 65}, + {28, 93}, + {29, 97}, + {30, 63}, + {31, 70}, + {32, 71}, + {33, 72}, + {34, 81}, + {35, 126}, + {36, 90}, + {37, 128}, + {38, 91}, + {39, 41}, + {40, 127}, + {41, 86}, + {50, 67}, + {51, 73}, + {52, 74}, + {53, 62}, + {54, 124}, + {55, 61}, + {56, 130}, + {57, 59}, + {59, 50}, + {-1}, +}; diff --git a/drivers/irqchip/qcom/mpm.c b/drivers/irqchip/qcom/mpm.c index 72e853d396d28abccf49b13db8151f5968f1199f..c7b610f29f323ef3dd6517a5a077af82d976b0e7 100644 --- a/drivers/irqchip/qcom/mpm.c +++ b/drivers/irqchip/qcom/mpm.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -59,6 +60,7 @@ struct msm_mpm_device_data { void __iomem *mpm_ipc_reg; irq_hw_number_t ipc_irq; struct irq_domain *gic_chip_domain; + struct irq_domain *gpio_chip_domain; }; static int msm_pm_sleep_time_override; @@ -160,13 +162,26 @@ static inline void msm_mpm_enable_irq(struct irq_data *d, bool on) } } -static inline void msm_mpm_set_type(struct irq_data *d, +static void msm_mpm_program_set_type(bool set, unsigned int reg, + unsigned int index, unsigned int mask) +{ + u32 type; + + type = msm_mpm_read(reg, index); + if (set) + type = ENABLE_TYPE(type, mask); + else + type = CLEAR_TYPE(type, mask); + + msm_mpm_write(reg, index, type); +} + +static void msm_mpm_set_type(struct irq_data *d, unsigned int flowtype) { int mpm_pin[MAX_MPM_PIN_PER_IRQ] = {-1, -1}; unsigned long flags; int i = 0; - u32 type; unsigned int index, mask; unsigned int reg = 0; @@ -178,28 +193,44 @@ static inline void msm_mpm_set_type(struct irq_data *d, index = mpm_pin[i]/32; mask = mpm_pin[i]%32; - if (flowtype & IRQ_TYPE_LEVEL_HIGH) - reg = MPM_REG_FALLING_EDGE; - + spin_lock_irqsave(&mpm_lock, flags); + reg = MPM_REG_RISING_EDGE; if (flowtype & IRQ_TYPE_EDGE_RISING) - reg = MPM_REG_RISING_EDGE; + msm_mpm_program_set_type(1, reg, index, mask); + else + msm_mpm_program_set_type(0, reg, index, mask); + reg = MPM_REG_FALLING_EDGE; if (flowtype & IRQ_TYPE_EDGE_FALLING) - reg = MPM_REG_POLARITY; - - spin_lock_irqsave(&mpm_lock, flags); - type = msm_mpm_read(reg, index); - - if (flowtype) - type = ENABLE_TYPE(type, mask); + msm_mpm_program_set_type(1, reg, index, mask); else - type = CLEAR_TYPE(type, mask); + msm_mpm_program_set_type(0, reg, index, mask); - msm_mpm_write(reg, index, type); + reg = MPM_REG_POLARITY; + if (flowtype & IRQ_TYPE_LEVEL_HIGH) + msm_mpm_program_set_type(1, reg, index, mask); + else + msm_mpm_program_set_type(0, reg, index, mask); spin_unlock_irqrestore(&mpm_lock, flags); } } +static void msm_mpm_gpio_chip_mask(struct irq_data *d) +{ + msm_mpm_enable_irq(d, false); +} + +static void msm_mpm_gpio_chip_unmask(struct irq_data *d) +{ + msm_mpm_enable_irq(d, true); +} + +static int msm_mpm_gpio_chip_set_type(struct irq_data *d, unsigned int type) +{ + msm_mpm_set_type(d, type); + return 0; +} + static void msm_mpm_gic_chip_mask(struct irq_data *d) { msm_mpm_enable_irq(d, false); @@ -227,9 +258,58 @@ static struct irq_chip msm_mpm_gic_chip = { .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_set_type = msm_mpm_gic_chip_set_type, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, -#ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, -#endif +}; + +static struct irq_chip msm_mpm_gpio_chip = { + .name = "mpm-gpio", + .irq_mask = msm_mpm_gpio_chip_mask, + .irq_disable = msm_mpm_gpio_chip_mask, + .irq_unmask = msm_mpm_gpio_chip_unmask, + .irq_set_type = msm_mpm_gpio_chip_set_type, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, + .irq_retrigger = irq_chip_retrigger_hierarchy, +}; + +static int msm_mpm_gpio_chip_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; + } + return -EINVAL; +} + +static int msm_mpm_gpio_chip_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *data) +{ + int ret = 0; + struct irq_fwspec *fwspec = data; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + + ret = msm_mpm_gpio_chip_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &msm_mpm_gpio_chip, NULL); + + return 0; +} + +static const struct irq_domain_ops msm_mpm_gpio_chip_domain_ops = { + .translate = msm_mpm_gpio_chip_translate, + .alloc = msm_mpm_gpio_chip_alloc, + .free = irq_domain_free_irqs_common, }; static int msm_mpm_gic_chip_translate(struct irq_domain *d, @@ -240,10 +320,34 @@ static int msm_mpm_gic_chip_translate(struct irq_domain *d, if (is_of_node(fwspec->fwnode)) { if (fwspec->param_count < 3) return -EINVAL; - *hwirq = fwspec->param[1]; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + default: + return -EINVAL; + } + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; return 0; } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; + } + return -EINVAL; } @@ -296,15 +400,20 @@ static void msm_mpm_enter_sleep(struct cpumask *cpumask) irq_set_affinity(msm_mpm_dev_data.ipc_irq, cpumask); } -static int msm_get_mpm_pin_map(unsigned int mpm_irq) +static int msm_get_apps_irq(unsigned int mpm_irq) { - struct mpm_pin *mpm_gic_pin_map = NULL; + struct mpm_pin *mpm_pin = NULL; int apps_irq; - mpm_gic_pin_map = (struct mpm_pin *) + mpm_pin = (struct mpm_pin *) msm_mpm_dev_data.gic_chip_domain->host_data; - apps_irq = msm_get_irq_pin(mpm_irq, mpm_gic_pin_map); - return apps_irq; + apps_irq = msm_get_irq_pin(mpm_irq, mpm_pin); + if (apps_irq >= 0) + return apps_irq; + + mpm_pin = (struct mpm_pin *) + msm_mpm_dev_data.gpio_chip_domain->host_data; + return msm_get_irq_pin(mpm_irq, mpm_pin); } @@ -357,6 +466,8 @@ static int system_pm_update_wakeup(bool from_idle) } msm_mpm_timer_write((uint32_t *)&wakeup); + trace_mpm_wakeup_time(from_idle, wakeup, arch_counter_get_cntvct()); + return 0; } @@ -407,7 +518,7 @@ static irqreturn_t msm_mpm_irq(int irq, void *dev_id) trace_mpm_wakeup_pending_irqs(i, pending); for_each_set_bit(k, &pending, 32) { mpm_irq = 32 * i + k; - apps_irq = msm_get_mpm_pin_map(mpm_irq); + apps_irq = msm_get_apps_irq(mpm_irq); desc = apps_irq ? irq_to_desc(apps_irq) : NULL; @@ -416,11 +527,13 @@ static irqreturn_t msm_mpm_irq(int irq, void *dev_id) IRQCHIP_STATE_PENDING, true); } + + msm_mpm_write(MPM_REG_STATUS, i, 0); } return IRQ_HANDLED; } -static int msm_mpm_probe(struct device_node *node) +static int msm_mpm_init(struct device_node *node) { struct msm_mpm_device_data *dev = &msm_mpm_dev_data; int ret = 0; @@ -488,16 +601,30 @@ static int msm_mpm_probe(struct device_node *node) } static const struct of_device_id mpm_gic_chip_data_table[] = { + { + .compatible = "qcom,mpm-gic-msm8937", + .data = mpm_msm8937_gic_chip_data, + }, {} }; MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table); +static const struct of_device_id mpm_gpio_chip_data_table[] = { + { + .compatible = "qcom,mpm-gpio-msm8937", + .data = mpm_msm8937_gpio_chip_data, + }, + {} +}; + +MODULE_DEVICE_TABLE(of, mpm_gpio_chip_data_table); + static int __init mpm_gic_chip_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain; const struct of_device_id *id; - struct device_node *parent_node; + int ret; if (!parent) { pr_err("%s(): no parent for mpm-gic\n", node->full_name); @@ -514,12 +641,13 @@ static int __init mpm_gic_chip_init(struct device_node *node, mpm_to_irq = kcalloc(num_mpm_irqs, sizeof(*mpm_to_irq), GFP_KERNEL); if (!mpm_to_irq) - return -ENOMEM; + return -ENOMEM; id = of_match_node(mpm_gic_chip_data_table, node); if (!id) { pr_err("can not find mpm_gic_data_table of_node\n"); - return -ENODEV; + ret = -ENODEV; + goto mpm_map_err; } msm_mpm_dev_data.gic_chip_domain = irq_domain_add_hierarchy( @@ -527,13 +655,45 @@ static int __init mpm_gic_chip_init(struct device_node *node, &msm_mpm_gic_chip_domain_ops, (void *)id->data); if (!msm_mpm_dev_data.gic_chip_domain) { pr_err("gic domain add failed\n"); - return -ENOMEM; + ret = -ENOMEM; + goto mpm_map_err; } msm_mpm_dev_data.gic_chip_domain->name = "qcom,mpm-gic"; - parent_node = of_get_parent(node); - return msm_mpm_probe(parent_node); + ret = msm_mpm_init(node); + if (!ret) + return ret; + irq_domain_remove(msm_mpm_dev_data.gic_chip_domain); + +mpm_map_err: + kfree(mpm_to_irq); + return ret; } IRQCHIP_DECLARE(mpm_gic_chip, "qcom,mpm-gic", mpm_gic_chip_init); + +static int __init mpm_gpio_chip_init(struct device_node *node, + struct device_node *parent) +{ + const struct of_device_id *id; + + id = of_match_node(mpm_gpio_chip_data_table, node); + if (!id) { + pr_err("match_table not found for mpm-gpio\n"); + return -ENODEV; + } + + msm_mpm_dev_data.gpio_chip_domain = irq_domain_create_linear( + of_node_to_fwnode(node), num_mpm_irqs, + &msm_mpm_gpio_chip_domain_ops, (void *)id->data); + + if (!msm_mpm_dev_data.gpio_chip_domain) + return -ENOMEM; + + msm_mpm_dev_data.gpio_chip_domain->name = "qcom,mpm-gpio"; + + return 0; +} + +IRQCHIP_DECLARE(mpm_gpio_chip, "qcom,mpm-gpio", mpm_gpio_chip_init); diff --git a/drivers/irqchip/qcom/mpm.h b/drivers/irqchip/qcom/mpm.h index 3d43513b7a289b48e87fdf4e70f4fc78e5b7af85..c2d5eae179fe34450c1dea4f1f92c2f9cc1f1b77 100644 --- a/drivers/irqchip/qcom/mpm.h +++ b/drivers/irqchip/qcom/mpm.h @@ -21,4 +21,7 @@ struct mpm_pin { irq_hw_number_t hwirq; }; +extern const struct mpm_pin mpm_msm8937_gic_chip_data[]; +extern const struct mpm_pin mpm_msm8937_gpio_chip_data[]; + #endif /* __QCOM_MPM_H__ */ diff --git a/drivers/irqchip/qcom/pdc-sm6150.c b/drivers/irqchip/qcom/pdc-sm6150.c index 4a471059e3d1b9749801b11c5847252f56e4edb9..b05be12f5c78c1790148d0f385abc94884c4ac87 100644 --- a/drivers/irqchip/qcom/pdc-sm6150.c +++ b/drivers/irqchip/qcom/pdc-sm6150.c @@ -141,6 +141,7 @@ static struct pdc_pin sm6150_data[] = { {123, 670},/*core_bi_px_gpio_51*/ {124, 671},/*core_bi_px_gpio_88*/ {125, 95},/*core_bi_px_gpio_39*/ + {-1}, }; static int __init qcom_pdc_gic_init(struct device_node *node, diff --git a/drivers/irqchip/qcom/pdc-sm8150.c b/drivers/irqchip/qcom/pdc-sm8150.c index fa09c4e13186f1374b292829dfd223829eb5a926..8e7a63a3cc61f775d1a5f02e72d3199996ad67fc 100644 --- a/drivers/irqchip/qcom/pdc-sm8150.c +++ b/drivers/irqchip/qcom/pdc-sm8150.c @@ -28,13 +28,13 @@ static struct pdc_pin sm8150_data[] = { {10, 522},/*eud_p1_dmse_int_mx*/ {11, 523},/*eud_p1_dpse_int_mx*/ {12, 524},/*eud_int_mx[1]*/ - {13, 525},/*ssc_xpu_irq_summary*/ + {13, 525},/*xpu_irq_summary*/ {14, 526},/*wd_bite_apps*/ - {15, 527},/*ssc_vmidmt_irq_summary*/ + {15, 527},/*vmidmt_irq_summary*/ {16, 528},/*q6ss_irq_out_apps_ipc[4]*/ {17, 529},/*not-connected*/ {18, 530},/*aoss_pmic_arb_mpu_xpu_summary_irq*/ - {19, 531},/*apps_pdc_irq_in_19*/ + {19, 531},/*rpmh_wake_2*/ {20, 532},/*apps_pdc_irq_in_20*/ {21, 533},/*apps_pdc_irq_in_21*/ {22, 534},/*pdc_apps_epcb_timeout_summary_irq*/ @@ -45,102 +45,102 @@ static struct pdc_pin sm8150_data[] = { {27, 539},/*tsense1_upper_lower_intr*/ {28, 540},/*tsense0_critical_intr*/ {29, 541},/*tsense1_critical_intr*/ - {30, 542},/*core_bi_px_gpio_1*/ - {31, 543},/*core_bi_px_gpio_3*/ - {32, 544},/*core_bi_px_gpio_5*/ - {33, 545},/*core_bi_px_gpio_10*/ - {34, 546},/*core_bi_px_gpio_11*/ - {35, 547},/*core_bi_px_gpio_20*/ - {36, 548},/*core_bi_px_gpio_22*/ - {37, 549},/*core_bi_px_gpio_24*/ - {38, 550},/*core_bi_px_gpio_26*/ - {39, 551},/*core_bi_px_gpio_30*/ - {40, 552},/*gp_irq_hv[10]*/ - {41, 553},/*core_bi_px_gpio_32*/ - {42, 554},/*core_bi_px_gpio_34*/ - {43, 555},/*core_bi_px_gpio_36*/ - {44, 556},/*core_bi_px_gpio_37*/ - {45, 557},/*core_bi_px_gpio_38*/ - {46, 558},/*core_bi_px_gpio_39*/ - {47, 559},/*core_bi_px_gpio_40*/ - {48, 560},/*gp_irq_hv[18]*/ - {49, 561},/*core_bi_px_gpio_43*/ - {50, 562},/*core_bi_px_gpio_44*/ - {51, 563},/*core_bi_px_gpio_46*/ - {52, 564},/*core_bi_px_gpio_48*/ - {53, 565},/*gp_irq_hv[23]*/ - {54, 566},/*core_bi_px_gpio_52*/ - {55, 567},/*core_bi_px_gpio_53*/ - {56, 568},/*core_bi_px_gpio_54*/ - {57, 569},/*core_bi_px_gpio_56*/ - {58, 570},/*core_bi_px_gpio_57*/ - {59, 571},/*core_bi_px_gpio_58*/ - {60, 572},/*core_bi_px_gpio_59*/ - {61, 573},/*core_bi_px_gpio_60*/ - {62, 574},/*core_bi_px_gpio_61*/ - {63, 575},/*core_bi_px_gpio_62*/ - {64, 576},/*core_bi_px_gpio_63*/ - {65, 577},/*core_bi_px_gpio_64*/ - {66, 578},/*core_bi_px_gpio_66*/ - {67, 579},/*core_bi_px_gpio_68*/ - {68, 580},/*core_bi_px_gpio_71*/ - {69, 581},/*core_bi_px_gpio_73*/ - {70, 582},/*core_bi_px_gpio_77*/ - {71, 583},/*core_bi_px_gpio_78*/ - {72, 584},/*core_bi_px_gpio_79*/ - {73, 585},/*core_bi_px_gpio_80*/ - {74, 586},/*core_bi_px_gpio_84*/ - {75, 587},/*core_bi_px_gpio_85*/ - {76, 588},/*core_bi_px_gpio_86*/ - {77, 589},/*core_bi_px_gpio_88*/ - {78, 590},/*gp_irq_hv[48]*/ - {79, 591},/*core_bi_px_gpio_91*/ - {80, 592},/*core_bi_px_gpio_92*/ - {81, 593},/*core_bi_px_gpio_95*/ - {82, 594},/*core_bi_px_gpio_96*/ - {83, 595},/*core_bi_px_gpio_97*/ - {84, 596},/*core_bi_px_gpio_101*/ - {85, 597},/*core_bi_px_gpio_103*/ - {86, 598},/*core_bi_px_gpio_104*/ - {87, 599},/*core_bi_px_to_mpm[6]*/ - {88, 600},/*core_bi_px_to_mpm[0]*/ - {89, 601},/*core_bi_px_to_mpm[1]*/ - {90, 602},/*core_bi_px_gpio_115*/ - {91, 603},/*core_bi_px_gpio_116*/ - {92, 604},/*core_bi_px_gpio_117*/ - {93, 605},/*core_bi_px_gpio_118*/ - {94, 641},/*core_bi_px_gpio_119*/ - {95, 642},/*core_bi_px_gpio_120*/ - {96, 643},/*core_bi_px_gpio_121*/ - {97, 644},/*core_bi_px_gpio_122*/ - {98, 645},/*core_bi_px_gpio_123*/ - {99, 646},/*core_bi_px_gpio_124*/ - {100, 647},/*core_bi_px_gpio_125*/ - {101, 648},/*core_bi_px_to_mpm[5]*/ - {102, 649},/*core_bi_px_gpio_127*/ - {103, 650},/*core_bi_px_gpio_128*/ - {104, 651},/*core_bi_px_gpio_129*/ - {105, 652},/*core_bi_px_gpio_130*/ - {106, 653},/*core_bi_px_gpio_132*/ - {107, 654},/*core_bi_px_gpio_133*/ - {108, 655},/*core_bi_px_gpio_145*/ - {109, 656},/*gp_irq_hv[79]*/ - {110, 657},/*gp_irq_hv[80]*/ - {111, 658},/*gp_irq_hv[81]*/ - {112, 659},/*gp_irq_hv[82]*/ - {113, 660},/*gp_irq_hv[83]*/ - {114, 661},/*gp_irq_hv[84]*/ - {115, 662},/*core_bi_px_gpio_41*/ - {116, 663},/*core_bi_px_gpio_89*/ - {117, 664},/*core_bi_px_gpio_31*/ - {118, 665},/*core_bi_px_gpio_49*/ - {119, 666},/*core_bi_px_to_mpm[2]*/ - {120, 667},/*core_bi_px_to_mpm[3]*/ - {121, 668},/*core_bi_px_to_mpm[4]*/ - {122, 669},/*core_bi_px_gpio_41*/ - {123, 670},/*core_bi_px_gpio_89*/ - {124, 671},/*core_bi_px_gpio_31*/ - {125, 95},/*core_bi_px_gpio_49*/ + {30, 542},/*core_bi_px_core_in_mx_gpio_38*/ + {31, 543},/*core_bi_px_core_in_mx_gpio_3*/ + {32, 544},/*core_bi_px_core_in_mx_gpio_5*/ + {33, 545},/*core_bi_px_core_in_mx_gpio_8*/ + {34, 546},/*core_bi_px_core_in_mx_gpio_9*/ + {35, 547},/*gp_irq_hvm[5]*/ + {36, 548},/*core_bi_px_core_in_mx_gpio_134*/ + {37, 549},/*core_bi_px_core_in_mx_gpio_24*/ + {38, 550},/*core_bi_px_core_in_mx_gpio_26*/ + {39, 551},/*core_bi_px_core_in_mx_gpio_30*/ + {40, 552},/*core_bi_px_core_in_mx_gpio_101*/ + {41, 553},/*core_bi_px_core_in_mx_gpio_27*/ + {42, 554},/*core_bi_px_core_in_mx_gpio_28*/ + {43, 555},/*core_bi_px_core_in_mx_gpio_36*/ + {44, 556},/*core_bi_px_core_in_mx_gpio_37*/ + {45, 557},/*gp_irq_hvm[15]*/ + {46, 558},/*gp_irq_hvm[16]*/ + {47, 559},/*core_bi_px_core_in_mx_gpio_41*/ + {48, 560},/*core_bi_px_core_in_mx_gpio_42*/ + {49, 561},/*core_bi_px_core_in_mx_gpio_47*/ + {50, 562},/*core_bi_px_core_in_mx_gpio_46*/ + {51, 563},/*core_bi_px_core_in_mx_gpio_48*/ + {52, 564},/*core_bi_px_core_in_mx_gpio_50*/ + {53, 565},/*core_bi_px_core_in_mx_gpio_49*/ + {54, 566},/*core_bi_px_core_in_mx_gpio_53*/ + {55, 567},/*core_bi_px_core_in_mx_gpio_54*/ + {56, 568},/*core_bi_px_core_in_mx_gpio_55*/ + {57, 569},/*core_bi_px_core_in_mx_gpio_56*/ + {58, 570},/*core_bi_px_core_in_mx_gpio_58*/ + {59, 571},/*gp_irq_hvm[29]*/ + {60, 572},/*core_bi_px_core_in_mx_gpio_60*/ + {61, 573},/*core_bi_px_core_in_mx_gpio_61_from_and_gate_to_mpm*/ + {62, 574},/*core_bi_px_core_in_mx_gpio_68*/ + {63, 575},/*core_bi_px_core_in_mx_gpio_70*/ + {64, 576},/*core_bi_px_core_in_mx_gpio_81*/ + {65, 577},/*core_bi_px_core_in_mx_gpio_83*/ + {66, 578},/*core_bi_px_core_in_mx_gpio_77*/ + {67, 579},/*core_bi_px_core_in_mx_gpio_86*/ + {68, 580},/*gp_irq_hvm[38]*/ + {69, 581},/*core_bi_px_core_in_mx_gpio_90*/ + {70, 582},/*core_bi_px_core_in_mx_gpio_91*/ + {71, 583},/*core_bi_px_core_in_mx_gpio_76*/ + {72, 584},/*core_bi_px_core_in_mx_gpio_95*/ + {73, 585},/*core_bi_px_core_in_mx_gpio_96_from_and_gate_to_mpm*/ + {74, 586},/*core_bi_px_core_in_mx_gpio_97*/ + {75, 587},/*core_bi_px_core_in_mx_gpio_93*/ + {76, 588},/*gp_irq_hvm[46]*/ + {77, 589},/*core_bi_px_core_in_mx_gpio_103*/ + {78, 590},/*core_bi_px_core_in_mx_gpio_104*/ + {79, 591},/*core_bi_px_core_in_mx_gpio_108_from_and_gate_to_mpm*/ + {80, 592},/*core_bi_px_core_in_mx_gpio_112_from_and_gate_to_mpm*/ + {81, 593},/*core_bi_px_core_in_mx_gpio_113_from_and_gate_to_mpm*/ + {82, 594},/*core_bi_px_core_in_mx_gpio_114*/ + {83, 595},/*core_bi_px_core_in_mx_gpio_133*/ + {84, 596},/*core_bi_px_core_in_mx_gpio_87*/ + {85, 597},/*core_bi_px_core_in_mx_gpio_117*/ + {86, 598},/*gp_irq_hvm[56]*/ + {87, 599},/*core_bi_px_core_in_mx_gpio_119*/ + {88, 600},/*core_bi_px_core_in_mx_gpio_120*/ + {89, 601},/*core_bi_px_core_in_mx_gpio_121*/ + {90, 602},/*core_bi_px_core_in_mx_gpio_122*/ + {91, 603},/*core_bi_px_core_in_mx_gpio_123*/ + {92, 604},/*core_bi_px_core_in_mx_gpio_124*/ + {93, 605},/*core_bi_px_core_in_mx_gpio_125*/ + {94, 641},/*core_bi_px_core_in_mx_gpio_129*/ + {95, 642},/*gp_irq_hvm[65]*/ + {96, 643},/*gp_irq_hvm[66]*/ + {97, 644},/*core_bi_px_core_in_mx_gpio_136*/ + {98, 645},/*gp_irq_hvm[68]*/ + {99, 646},/*gp_irq_hvm[69]*/ + {100, 647},/*core_bi_px_core_in_mx_gpio_10*/ + {101, 648},/*core_bi_px_core_in_mx_gpio_118*/ + {102, 649},/*core_bi_px_core_in_mx_gpio_147*/ + {103, 650},/*core_bi_px_core_in_mx_gpio_142*/ + {104, 651},/*core_bi_px_core_in_mx_gpio_12*/ + {105, 652},/*core_bi_px_core_in_mx_gpio_132*/ + {106, 653},/*gp_irq_hvm[76]*/ + {107, 654},/*core_bi_px_core_in_mx_gpio_150*/ + {108, 655},/*core_bi_px_core_in_mx_gpio_152*/ + {109, 656},/*core_bi_px_core_in_mx_gpio_153*/ + {110, 657},/*gp_irq_hvm[80]*/ + {111, 658},/*gp_irq_hvm[81]*/ + {112, 659},/*gp_irq_hvm[82]*/ + {113, 660},/*gp_irq_hvm[83]*/ + {114, 661},/*gp_irq_hvm[84]*/ + {115, 662},/*core_bi_px_core_in_mx_gpio_144*/ + {116, 663},/*core_bi_px_core_in_mx_gpio_51*/ + {117, 664},/*core_bi_px_core_in_mx_gpio_88*/ + {118, 665},/*core_bi_px_core_in_mx_gpio_39*/ + {119, 666},/*core_bi_px_core_in_mx_gpio_sdc2_data_1_from_and_to_mpm*/ + {120, 667},/*core_bi_px_core_in_mx_gpio_sdc2_data_3_from_and__to_mpm*/ + {121, 668},/*core_bi_px_core_in_mx_gpio_sdc2_cmd_from_and_gate_to_mpm*/ + {122, 669},/*core_bi_px_core_in_mx_gpio_144*/ + {123, 670},/*core_bi_px_core_in_mx_gpio_51*/ + {124, 671},/*core_bi_px_core_in_mx_gpio_88*/ + {125, 95},/*core_bi_px_core_in_mx_gpio_39*/ {-1}, }; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 910b5b6f96b1439a5843b8b08aa81b86e6ca5e9b..eb65b6e78d57d07a8141825280dd63dfcd5058dd 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause) DBDMA_DO_STOP(rm->dma_regs); return; } - memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1)); - memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2)); + memset(rdma->buf1, 0, sizeof(rdma->buf1)); + memset(rdma->buf2, 0, sizeof(rdma->buf2)); rm->dma_buf_v->mark = 0; diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index f34ad8720756043f0078dd0721874d477eddfe71..5b63afff46d583d1937a5359156e518b6ce8a9e6 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -651,11 +651,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) static void search_free(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); - bio_complete(s); if (s->iop.bio) bio_put(s->iop.bio); + bio_complete(s); closure_debug_destroy(cl); mempool_free(s, s->d->c->search); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index f046dedc59ab979f5200cd1509d9d7813e38e902..930b00f6a3a2a4b780d7c6ea166b7b36181caf22 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -421,9 +421,15 @@ static int bch_writeback_thread(void *arg) while (!kthread_should_stop()) { down_write(&dc->writeback_lock); set_current_state(TASK_INTERRUPTIBLE); - if (!atomic_read(&dc->has_dirty) || - (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && - !dc->writeback_running)) { + /* + * If the bache device is detaching, skip here and continue + * to perform writeback. Otherwise, if no dirty data on cache, + * or there is dirty data on cache but writeback is disabled, + * the writeback thread should sleep here and wait for others + * to wake up it. + */ + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); if (kthread_should_stop()) { @@ -444,6 +450,14 @@ static int bch_writeback_thread(void *arg) cached_dev_put(dc); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); + /* + * If bcache device is detaching via sysfs interface, + * writeback thread should stop after there is no dirty + * data on cache. BCACHE_DEV_DETACHING flag is set in + * bch_cached_dev_detach(). + */ + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + break; } up_write(&dc->writeback_lock); diff --git a/drivers/md/md.c b/drivers/md/md.c index 7671b14756075732b749792f83f5956fb9557446..636103a8fa5017abde238ed7ddc46a71ad0cff6c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -779,6 +779,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio; int ff = 0; + if (!page) + return; + if (test_bit(Faulty, &rdev->flags)) return; @@ -5434,6 +5437,7 @@ int md_run(struct mddev *mddev) * the only valid external interface is through the md * device. */ + mddev->has_superblocks = false; rdev_for_each(rdev, mddev) { if (test_bit(Faulty, &rdev->flags)) continue; @@ -5447,6 +5451,9 @@ int md_run(struct mddev *mddev) set_disk_ro(mddev->gendisk, 1); } + if (rdev->sb_page) + mddev->has_superblocks = true; + /* perform some consistency tests on the device. * We don't want the data to overlap the metadata, * Internal Bitmap issues have been handled elsewhere. @@ -5479,8 +5486,10 @@ int md_run(struct mddev *mddev) } if (mddev->sync_set == NULL) { mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); - if (!mddev->sync_set) - return -ENOMEM; + if (!mddev->sync_set) { + err = -ENOMEM; + goto abort; + } } spin_lock(&pers_lock); @@ -5493,7 +5502,8 @@ int md_run(struct mddev *mddev) else pr_warn("md: personality for level %s is not loaded!\n", mddev->clevel); - return -EINVAL; + err = -EINVAL; + goto abort; } spin_unlock(&pers_lock); if (mddev->level != pers->level) { @@ -5506,7 +5516,8 @@ int md_run(struct mddev *mddev) pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ module_put(pers->owner); - return -EINVAL; + err = -EINVAL; + goto abort; } if (pers->sync_request) { @@ -5580,7 +5591,7 @@ int md_run(struct mddev *mddev) mddev->private = NULL; module_put(pers->owner); bitmap_destroy(mddev); - return err; + goto abort; } if (mddev->queue) { bool nonrot = true; @@ -5642,6 +5653,18 @@ int md_run(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_action); sysfs_notify(&mddev->kobj, NULL, "degraded"); return 0; + +abort: + if (mddev->bio_set) { + bioset_free(mddev->bio_set); + mddev->bio_set = NULL; + } + if (mddev->sync_set) { + bioset_free(mddev->sync_set); + mddev->sync_set = NULL; + } + + return err; } EXPORT_SYMBOL_GPL(md_run); @@ -8006,6 +8029,7 @@ EXPORT_SYMBOL(md_done_sync); bool md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; + if (bio_data_dir(bi) != WRITE) return true; @@ -8038,6 +8062,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) rcu_read_unlock(); if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); + if (!mddev->has_superblocks) + return true; wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || mddev->suspended); @@ -8496,6 +8522,19 @@ void md_do_sync(struct md_thread *thread) set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && + mddev->delta_disks > 0 && + mddev->pers->finish_reshape && + mddev->pers->size && + mddev->queue) { + mddev_lock_nointr(mddev); + md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); + mddev_unlock(mddev); + set_capacity(mddev->gendisk, mddev->array_sectors); + revalidate_disk(mddev->gendisk); + } + spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { /* We completed so min/max setting can be forgotten if used. */ diff --git a/drivers/md/md.h b/drivers/md/md.h index d8287d3cd1bf81b048e90166d91afe76566202b3..9b0a896890eff01e4e7629d2fe91d0c6dfe6be57 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -462,6 +462,8 @@ struct mddev { void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; unsigned int good_device_nr; /* good device num within cluster raid */ + + bool has_superblocks:1; }; enum recovery_flags { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 788fc0800465dd23e962e4e883458c2940831651..e4e01d3bab8195718456676eba774f668577d6b2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1813,6 +1813,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct md_rdev *repl = conf->mirrors[conf->raid_disks + number].rdev; freeze_array(conf, 0); + if (atomic_read(&repl->nr_pending)) { + /* It means that some queued IO of retry_list + * hold repl. Thus, we cannot set replacement + * as NULL, avoiding rdev NULL pointer + * dereference in sync_request_write and + * handle_write_finished. + */ + err = -EBUSY; + unfreeze_array(conf); + goto abort; + } clear_bit(Replacement, &repl->flags); p->rdev = repl; conf->mirrors[conf->raid_disks + number].rdev = NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0d18d3b952017f8f1518c502d562ad88be31802f..5fb31ef529454c6296057d683854b10e10400247 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2625,7 +2625,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) for (m = 0; m < conf->copies; m++) { int dev = r10_bio->devs[m].devnum; rdev = conf->mirrors[dev].rdev; - if (r10_bio->devs[m].bio == NULL) + if (r10_bio->devs[m].bio == NULL || + r10_bio->devs[m].bio->bi_end_io == NULL) continue; if (!r10_bio->devs[m].bio->bi_status) { rdev_clear_badblocks( @@ -2640,7 +2641,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) md_error(conf->mddev, rdev); } rdev = conf->mirrors[dev].replacement; - if (r10_bio->devs[m].repl_bio == NULL) + if (r10_bio->devs[m].repl_bio == NULL || + r10_bio->devs[m].repl_bio->bi_end_io == NULL) continue; if (!r10_bio->devs[m].repl_bio->bi_status) { @@ -4691,17 +4693,11 @@ static void raid10_finish_reshape(struct mddev *mddev) return; if (mddev->delta_disks > 0) { - sector_t size = raid10_size(mddev, 0, 0); - md_set_array_sectors(mddev, size); if (mddev->recovery_cp > mddev->resync_max_sectors) { mddev->recovery_cp = mddev->resync_max_sectors; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } - mddev->resync_max_sectors = size; - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } + mddev->resync_max_sectors = mddev->array_sectors; } else { int d; rcu_read_lock(); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7ec822ced80b31687f59d5ea699b9b6a305f342f..de1ef6264ee7335fd33d51e17ed5736a1710c6a7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2197,15 +2197,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) static int grow_stripes(struct r5conf *conf, int num) { struct kmem_cache *sc; + size_t namelen = sizeof(conf->cache_name[0]); int devs = max(conf->raid_disks, conf->previous_raid_disks); if (conf->mddev->gendisk) - sprintf(conf->cache_name[0], + snprintf(conf->cache_name[0], namelen, "raid%d-%s", conf->level, mdname(conf->mddev)); else - sprintf(conf->cache_name[0], + snprintf(conf->cache_name[0], namelen, "raid%d-%p", conf->level, conf->mddev); - sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); + snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); conf->active_name = 0; sc = kmem_cache_create(conf->cache_name[conf->active_name], @@ -8000,13 +8001,7 @@ static void raid5_finish_reshape(struct mddev *mddev) if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { - if (mddev->delta_disks > 0) { - md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); - if (mddev->queue) { - set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); - } - } else { + if (mddev->delta_disks <= 0) { int d; spin_lock_irq(&conf->device_lock); mddev->degraded = raid5_calc_degraded(conf); diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 18e4230865be3498e14e0c134b918fbeff0c47ff..51009b2718a3e0c9ce5f865f1a4bcab5737498ca 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1055,7 +1055,7 @@ static int dvb_demux_do_ioctl(struct file *file, break; default: - ret = -EINVAL; + ret = -ENOTTY; break; } mutex_unlock(&dmxdev->mutex); diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index c9b1eb38444e8cc199aeffa2c96244b141663b7d..fbb3b2f49d2d22208640c1637b441961b0d1cfda 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1767,7 +1767,13 @@ static void lgdt3306a_release(struct dvb_frontend *fe) struct lgdt3306a_state *state = fe->demodulator_priv; dbg_info("\n"); - kfree(state); + + /* + * If state->muxc is not NULL, then we are an i2c device + * and lgdt3306a_remove will clean up state + */ + if (!state->muxc) + kfree(state); } static const struct dvb_frontend_ops lgdt3306a_ops; @@ -2168,7 +2174,7 @@ static int lgdt3306a_probe(struct i2c_client *client, sizeof(struct lgdt3306a_config)); config->i2c_addr = client->addr; - fe = lgdt3306a_attach(config, client->adapter); + fe = dvb_attach(lgdt3306a_attach, config, client->adapter); if (fe == NULL) { ret = -ENODEV; goto err_fe; diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c index 4da4253553fcf1809ee2c1a4329993d580831c63..10d229a4f08868f74c714e52251627763f1b2a6d 100644 --- a/drivers/media/i2c/adv748x/adv748x-hdmi.c +++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c @@ -105,6 +105,9 @@ static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi, fmt->width = hdmi->timings.bt.width; fmt->height = hdmi->timings.bt.height; + + if (fmt->field == V4L2_FIELD_ALTERNATE) + fmt->height /= 2; } static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings) diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c index d28845f7356f0541684c31234bc2fbf30477f6bf..a31fe18c71d6d7c7fd9278fdde6113c233e08856 100644 --- a/drivers/media/i2c/ov5645.c +++ b/drivers/media/i2c/ov5645.c @@ -1131,13 +1131,14 @@ static int ov5645_probe(struct i2c_client *client, ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &ov5645->ep); + + of_node_put(endpoint); + if (ret < 0) { dev_err(dev, "parsing endpoint node failed\n"); return ret; } - of_node_put(endpoint); - if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) { dev_err(dev, "invalid bus type, must be CSI2\n"); return -EINVAL; diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 7b79a7498751981e424bbe4d515f3463c2953975..698fa764999c7a0dab55e13ef1cf363c19f45a67 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c @@ -506,80 +506,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] = /* FIXME: Current api doesn't handle all VBI types, those not yet supported are placed under #if 0 */ #if 0 - {0x010, /* Teletext, SECAM, WST System A */ + [0] = {0x010, /* Teletext, SECAM, WST System A */ {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } }, #endif - {0x030, /* Teletext, PAL, WST System B */ + [1] = {0x030, /* Teletext, PAL, WST System B */ {V4L2_SLICED_TELETEXT_B,6,22,1}, { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } }, #if 0 - {0x050, /* Teletext, PAL, WST System C */ + [2] = {0x050, /* Teletext, PAL, WST System C */ {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x070, /* Teletext, NTSC, WST System B */ + [3] = {0x070, /* Teletext, NTSC, WST System B */ {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x090, /* Tetetext, NTSC NABTS System C */ + [4] = {0x090, /* Tetetext, NTSC NABTS System C */ {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } }, - {0x0b0, /* Teletext, NTSC-J, NABTS System D */ + [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */ {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } }, - {0x0d0, /* Closed Caption, PAL/SECAM */ + [6] = {0x0d0, /* Closed Caption, PAL/SECAM */ {V4L2_SLICED_CAPTION_625,22,22,1}, { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } }, #endif - {0x0f0, /* Closed Caption, NTSC */ + [7] = {0x0f0, /* Closed Caption, NTSC */ {V4L2_SLICED_CAPTION_525,21,21,1}, { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } }, - {0x110, /* Wide Screen Signal, PAL/SECAM */ + [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */ {V4L2_SLICED_WSS_625,23,23,1}, { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } }, #if 0 - {0x130, /* Wide Screen Signal, NTSC C */ + [9] = {0x130, /* Wide Screen Signal, NTSC C */ {V4L2_SLICED_WSS_525,20,20,1}, { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } }, - {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ + [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ {V4l2_SLICED_VITC_625,6,22,0}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } }, - {0x170, /* Vertical Interval Timecode (VITC), NTSC */ + [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */ {V4l2_SLICED_VITC_525,10,20,0}, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } }, #endif - {0x190, /* Video Program System (VPS), PAL */ + [12] = {0x190, /* Video Program System (VPS), PAL */ {V4L2_SLICED_VPS,16,16,0}, { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } }, /* 0x1d0 User programmable */ - - /* End of struct */ - { (u16)-1 } }; static int tvp5150_write_inittab(struct v4l2_subdev *sd, @@ -592,10 +589,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd, return 0; } -static int tvp5150_vdp_init(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs) +static int tvp5150_vdp_init(struct v4l2_subdev *sd) { unsigned int i; + int j; /* Disable Full Field */ tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); @@ -605,14 +602,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, tvp5150_write(sd, i, 0xff); /* Load Ram Table */ - while (regs->reg != (u16)-1) { + for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j]; + + if (!regs->type.vbi_type) + continue; + tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); for (i = 0; i < 16; i++) tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); - - regs++; } return 0; } @@ -621,19 +621,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap) { - const struct i2c_vbi_ram_value *regs = vbi_ram_default; - int line; + int line, i; dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); memset(cap, 0, sizeof *cap); - while (regs->reg != (u16)-1 ) { - for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { + for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; + + if (!regs->type.vbi_type) + continue; + + for (line = regs->type.ini_line; + line <= regs->type.end_line; + line++) { cap->service_lines[0][line] |= regs->type.vbi_type; } cap->service_set |= regs->type.vbi_type; - - regs++; } return 0; } @@ -652,14 +656,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, * MSB = field2 */ static int tvp5150_set_vbi(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs, unsigned int type,u8 flags, int line, const int fields) { struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; u8 reg; - int pos = 0; + int i, pos = 0; if (std == V4L2_STD_ALL) { dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); @@ -672,19 +675,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, if (line < 6 || line > 27) return 0; - while (regs->reg != (u16)-1) { + for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { + const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; + + if (!regs->type.vbi_type) + continue; + if ((type & regs->type.vbi_type) && (line >= regs->type.ini_line) && (line <= regs->type.end_line)) break; - - regs++; pos++; } - if (regs->reg == (u16)-1) - return 0; - type = pos | (flags & 0xf0); reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; @@ -697,8 +700,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, return type; } -static int tvp5150_get_vbi(struct v4l2_subdev *sd, - const struct i2c_vbi_ram_value *regs, int line) +static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line) { struct tvp5150 *decoder = to_tvp5150(sd); v4l2_std_id std = decoder->norm; @@ -727,8 +729,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd, return 0; } pos = ret & 0x0f; - if (pos < 0x0f) - type |= regs[pos].type.vbi_type; + if (pos < ARRAY_SIZE(vbi_ram_default)) + type |= vbi_ram_default[pos].type.vbi_type; } return type; @@ -789,7 +791,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val) tvp5150_write_inittab(sd, tvp5150_init_default); /* Initializes VDP registers */ - tvp5150_vdp_init(sd, vbi_ram_default); + tvp5150_vdp_init(sd); /* Selects decoder input */ tvp5150_selmux(sd); @@ -1122,8 +1124,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f for (i = 0; i <= 23; i++) { svbi->service_lines[1][i] = 0; svbi->service_lines[0][i] = - tvp5150_set_vbi(sd, vbi_ram_default, - svbi->service_lines[0][i], 0xf0, i, 3); + tvp5150_set_vbi(sd, svbi->service_lines[0][i], + 0xf0, i, 3); } /* Enables FIFO */ tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); @@ -1149,7 +1151,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f for (i = 0; i <= 23; i++) { svbi->service_lines[0][i] = - tvp5150_get_vbi(sd, vbi_ram_default, i); + tvp5150_get_vbi(sd, i); mask |= svbi->service_lines[0][i]; } svbi->service_set = mask; diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index 78a8836d03e428d11998cc39de71a3dad735ebdd..6c0fd9438dd90f73e33b920acf9bad50d30d3cf8 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -2286,6 +2286,10 @@ void cx23885_card_setup(struct cx23885_dev *dev) &dev->i2c_bus[2].i2c_adap, "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840) { + /* set host data for clk_freq configuration */ + v4l2_set_subdev_hostdata(dev->sd_cx25840, + &dev->clk_freq); + dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; v4l2_subdev_call(dev->sd_cx25840, core, load_fw); } diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 8f63df1cb418767e1a6627872cb461d477de404f..4612f26fcd6d5c1b183c3ed441e5e2c7e12b7f07 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -873,6 +873,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev) if (cx23885_boards[dev->board].clk_freq > 0) dev->clk_freq = cx23885_boards[dev->board].clk_freq; + if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE && + dev->pci->subsystem_device == 0x7137) { + /* Hauppauge ImpactVCBe device ID 0x7137 is populated + * with an 888, and a 25Mhz crystal, instead of the + * usual third overtone 50Mhz. The default clock rate must + * be overridden so the cx25840 is properly configured + */ + dev->clk_freq = 25000000; + } + dev->pci_bus = dev->pci->bus->number; dev->pci_slot = PCI_SLOT(dev->pci->devfn); cx23885_irq_add(dev, 0x001f00); diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index 04aa4a68a0aefa11132182e56d7231122158b93e..040c6c251d3a34c00e33c1c2ef42e5e28abef0cd 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -867,6 +867,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) dev->nr = ++cx25821_devcount; sprintf(dev->name, "cx25821[%d]", dev->nr); + if (dev->nr >= ARRAY_SIZE(card)) { + CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card)); + return -ENODEV; + } if (dev->pci->device != 0x8210) { pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n", __func__, dev->pci->device); @@ -882,9 +886,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev) dev->channels[i].sram_channels = &cx25821_sram_channels[i]; } - if (dev->nr > 1) - CX25821_INFO("dev->nr > 1!"); - /* board config */ dev->board = 1; /* card[dev->nr]; */ dev->_max_num_decoders = MAX_DECODERS; diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index fc72013c7bc73a8fd5d46704380df5ceca554aca..d38cc87b13d99fd5e9f3c8f9909199d3245b7077 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -185,6 +185,13 @@ struct npu_thermalctrl { uint32_t pwr_level; }; +#define NPU_MAX_IRQ 3 + +struct npu_irq { + char *name; + int irq; +}; + struct npu_device { struct mutex ctx_lock; @@ -205,7 +212,7 @@ struct npu_device { uint32_t regulator_num; struct npu_regulator regulators[NPU_MAX_DT_NAME_LEN]; - uint32_t irq; + struct npu_irq irq[NPU_MAX_IRQ]; struct npu_ion_buf mapped_buffers; @@ -242,6 +249,6 @@ int npu_set_uc_power_level(struct npu_device *npu_dev, uint32_t pwr_level); int fw_init(struct npu_device *npu_dev); -void fw_deinit(struct npu_device *npu_dev); +void fw_deinit(struct npu_device *npu_dev, bool fw_alive); #endif /* _NPU_COMMON_H */ diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c index 4c2aace68eed75974395487745852356d243da96..996ca91ed44d4e8748cf8078d1ea3425fe76e079 100644 --- a/drivers/media/platform/msm/npu/npu_debugfs.c +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -18,6 +18,7 @@ */ #include +#include "npu_hw.h" #include "npu_hw_access.h" #include "npu_common.h" @@ -372,7 +373,14 @@ static ssize_t npu_debug_ctrl_write(struct file *file, pr_info("error in fw_init\n"); } else if (strcmp(buf, "off") == 0) { pr_info("triggering fw_deinit\n"); - fw_deinit(npu_dev); + fw_deinit(npu_dev, true); + } else if (strcmp(buf, "ssr") == 0) { + pr_info("trigger error irq\n"); + if (npu_enable_core_power(npu_dev)) + return -EPERM; + + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2); + npu_disable_core_power(npu_dev); } else if (strcmp(buf, "0") == 0) { pr_info("setting power state to 0\n"); npu_dev->pwrctrl.active_pwrlevel = 0; @@ -446,8 +454,8 @@ int npu_debugfs_init(struct npu_device *npu_dev) goto err; } - if (!debugfs_create_bool("fw_state", 0444, - debugfs->root, &(host_ctx->fw_enabled))) { + if (!debugfs_create_u32("fw_state", 0444, + debugfs->root, &(host_ctx->fw_state))) { pr_err("debugfs_creat_bool fail for fw_state\n"); goto err; } diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index bcdbf070bc2dbe741ebf451477dcf7716c3fb6d6..2cc21518989c3bdaea8addd90264904eda0b42cf 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -18,6 +18,7 @@ */ #include #include +#include #include #include #include @@ -155,6 +156,12 @@ static struct npu_reg npu_saved_bw_registers[] = { { BWMON2_ZONE_COUNT_THRESHOLD, 0, false }, }; +static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = { + {"ipc_irq", 0}, + {"error_irq", 0}, + {"wdg_bite_irq", 0}, +}; + /* ------------------------------------------------------------------------- * Entry Points for Probe * ------------------------------------------------------------------------- @@ -297,7 +304,7 @@ void npu_disable_core_power(struct npu_device *npu_dev) return; pwr->pwr_vote_num--; if (!pwr->pwr_vote_num) { - if (!npu_dev->host_ctx.fw_enabled) + if (npu_dev->host_ctx.fw_state == FW_DISABLED) npu_suspend_devbw(npu_dev); npu_disable_core_clocks(npu_dev); npu_disable_regulators(npu_dev); @@ -372,7 +379,7 @@ static int npu_set_power_level(struct npu_device *npu_dev) npu_dev->core_clks[i].clk_name)) continue; - if (!npu_dev->host_ctx.fw_enabled) { + if (npu_dev->host_ctx.fw_state == FW_DISABLED) { if (npu_is_post_clock( npu_dev->core_clks[i].clk_name)) continue; @@ -381,14 +388,11 @@ static int npu_set_power_level(struct npu_device *npu_dev) pr_debug("requested rate of clock [%s] to [%ld]\n", npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]); - clk_rate = clk_round_rate(npu_dev->core_clks[i].clk, - pwrlevel->clk_freq[i]); - pr_debug("actual round clk rate [%ld]\n", clk_rate); ret = clk_set_rate(npu_dev->core_clks[i].clk, clk_rate); if (ret) { - pr_err("clk_set_rate %s to %ld failed with %d\n", + pr_debug("clk_set_rate %s to %ld failed with %d\n", npu_dev->core_clks[i].clk_name, clk_rate, ret); break; @@ -571,7 +575,7 @@ static void npu_disable_core_clocks(struct npu_device *npu_dev) for (i = (npu_dev->core_clk_num)-1; i >= 0 ; i--) { if (npu_is_exclude_clock(core_clks[i].clk_name)) continue; - if (!npu_dev->host_ctx.fw_enabled) { + if (npu_dev->host_ctx.fw_state == FW_DISABLED) { if (npu_is_post_clock(npu_dev->core_clks[i].clk_name)) continue; } @@ -679,18 +683,38 @@ static void npu_disable_regulators(struct npu_device *npu_dev) */ int npu_enable_irq(struct npu_device *npu_dev) { + int i; + /* clear pending irq state */ REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); - enable_irq(npu_dev->irq); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), NPU_ERROR_IRQ_MASK); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0), NPU_ERROR_IRQ_MASK); + + for (i = 0; i < NPU_MAX_IRQ; i++) { + if (npu_dev->irq[i].irq != 0) { + enable_irq(npu_dev->irq[i].irq); + pr_debug("enable irq %d\n", npu_dev->irq[i].irq); + } + } return 0; } void npu_disable_irq(struct npu_device *npu_dev) { - disable_irq(npu_dev->irq); + int i; + + for (i = 0; i < NPU_MAX_IRQ; i++) { + if (npu_dev->irq[i].irq != 0) { + disable_irq(npu_dev->irq[i].irq); + pr_debug("disable irq %d\n", npu_dev->irq[i].irq); + } + } + + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0), 0); /* clear pending irq state */ REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), NPU_ERROR_IRQ_MASK); } /* ------------------------------------------------------------------------- @@ -1089,6 +1113,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, uint32_t j = 0; uint32_t index; uint32_t clk_array_values[NUM_TOTAL_CLKS]; + uint32_t clk_rate; struct npu_pwrlevel *level; if (of_property_read_u32(child, "reg", &index)) @@ -1116,18 +1141,25 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev, of_property_read_string_index(pdev->dev.of_node, "clock-names", i, &clock_name); + if (npu_is_exclude_rate_clock(clock_name)) + continue; + for (j = 0; j < npu_dev->core_clk_num; j++) { if (!strcmp(npu_clock_order[j], - clock_name)) { - level->clk_freq[j] = - clk_array_values[i]; + clock_name)) break; - } } + if (j == npu_dev->core_clk_num) { pr_err("pwrlevel clock is not in ordered list\n"); return -EINVAL; } + + clk_rate = clk_round_rate(npu_dev->core_clks[j].clk, + clk_array_values[i]); + pr_debug("clk %s rate [%ld]:[%ld]\n", clock_name, + clk_array_values[i], clk_rate); + level->clk_freq[j] = clk_rate; } } @@ -1201,6 +1233,42 @@ static int npu_thermalctrl_init(struct npu_device *npu_dev) return ret; } +static int npu_irq_init(struct npu_device *npu_dev) +{ + unsigned long irq_type; + int ret = 0, i; + + memcpy(npu_dev->irq, npu_irq_info, sizeof(npu_irq_info)); + for (i = 0; i < NPU_MAX_IRQ; i++) { + irq_type = IRQF_TRIGGER_RISING | IRQF_ONESHOT; + npu_dev->irq[i].irq = platform_get_irq_byname( + npu_dev->pdev, npu_dev->irq[i].name); + if (npu_dev->irq[i].irq < 0) { + pr_err("get_irq for %s failed\n\n", + npu_dev->irq[i].name); + ret = -EINVAL; + break; + } + + pr_debug("irq %s: %d\n", npu_dev->irq[i].name, + npu_dev->irq[i].irq); + irq_set_status_flags(npu_dev->irq[i].irq, + IRQ_NOAUTOEN); + ret = devm_request_irq(&npu_dev->pdev->dev, + npu_dev->irq[i].irq, npu_intr_hdler, + irq_type, npu_dev->irq[i].name, + npu_dev); + if (ret) { + pr_err("devm_request_irq(%s:%d) failed\n", + npu_dev->irq[i].name, + npu_dev->irq[i].irq); + break; + } + } + + return ret; +} + /* ------------------------------------------------------------------------- * Probe/Remove * ------------------------------------------------------------------------- @@ -1245,6 +1313,10 @@ static int npu_probe(struct platform_device *pdev) if (rc) goto error_get_dev_num; + rc = npu_irq_init(npu_dev); + if (rc) + goto error_get_dev_num; + npu_dev->npu_base = devm_ioremap(&pdev->dev, res->start, npu_dev->reg_size); if (unlikely(!npu_dev->npu_base)) { @@ -1257,21 +1329,6 @@ static int npu_probe(struct platform_device *pdev) pr_debug("hw base phy address=0x%x virt=%pK\n", npu_dev->npu_phys, npu_dev->npu_base); - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - pr_err("unable to get irq\n"); - rc = -ENOMEM; - goto error_get_dev_num; - } - rc = devm_request_irq(&pdev->dev, res->start, - npu_intr_hdler, 0x0, "npu", npu_dev); - if (rc) { - pr_err("devm_request_irq() failed\n"); - goto error_get_dev_num; - } - disable_irq(res->start); - npu_dev->irq = res->start; - pr_debug("irq %d\n", npu_dev->irq); /* character device might be optional */ rc = alloc_chrdev_region(&npu_dev->dev_num, 0, 1, DRIVER_NAME); if (rc < 0) { diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c index 0730e2019edca11eeabbee2ccc86165d70a99e44..36408bacb15f3399f584e1da7ca795b23320fc82 100644 --- a/drivers/media/platform/msm/npu/npu_host_ipc.c +++ b/drivers/media/platform/msm/npu/npu_host_ipc.c @@ -33,6 +33,9 @@ #define QUEUE_TBL_VERSION 0x87654321 +static DEFINE_SPINLOCK(hfi_rd_lock); +static DEFINE_SPINLOCK(hfi_wr_lock); + /* ------------------------------------------------------------------------- * Data Structures * ------------------------------------------------------------------------- @@ -207,6 +210,7 @@ static int ipc_queue_read(struct npu_device *npu_dev, uint32_t packet_size, new_read_idx; size_t read_ptr; size_t offset = 0; + unsigned long flags; offset = (size_t)IPC_ADDR + sizeof(struct hfi_queue_tbl_header) + target_que * sizeof(struct hfi_queue_header); @@ -214,6 +218,8 @@ static int ipc_queue_read(struct npu_device *npu_dev, if ((packet == NULL) || (is_tx_req_set == NULL)) return -EINVAL; + spin_lock_irqsave(&hfi_rd_lock, flags); + /* Read the queue */ MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, HFI_QUEUE_HEADER_SIZE); @@ -288,6 +294,8 @@ static int ipc_queue_read(struct npu_device *npu_dev, (size_t)&(queue.qhdr_read_idx) - (size_t)&queue)), (uint8_t *)&queue.qhdr_read_idx, sizeof(queue.qhdr_read_idx)); + spin_unlock_irqrestore(&hfi_rd_lock, flags); + return status; } @@ -301,6 +309,7 @@ static int ipc_queue_write(struct npu_device *npu_dev, uint32_t empty_space; void *write_ptr; uint32_t read_idx; + unsigned long flags; size_t offset = (size_t)IPC_ADDR + sizeof(struct hfi_queue_tbl_header) + @@ -309,6 +318,8 @@ static int ipc_queue_write(struct npu_device *npu_dev, if ((packet == NULL) || (is_rx_req_set == NULL)) return -EINVAL; + spin_lock_irqsave(&hfi_wr_lock, flags); + MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, HFI_QUEUE_HEADER_SIZE); packet_size = (*(uint32_t *)packet); @@ -384,6 +395,8 @@ static int ipc_queue_write(struct npu_device *npu_dev, (size_t)&(queue.qhdr_write_idx) - (size_t)&queue))), &queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx)); + spin_unlock_irqrestore(&hfi_wr_lock, flags); + return status; } diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h index 8ab802e41ee6e05018d91cd70081d43575c2063a..3e71c53187c056dfd32aec499b2ff211b2ce5bd9 100644 --- a/drivers/media/platform/msm/npu/npu_hw.h +++ b/drivers/media/platform/msm/npu/npu_hw.h @@ -24,6 +24,18 @@ #define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00101008+0x1000*(n)) #define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4 #define NPU_GPR0 (0x00100100) +#define NPU_MASTERn_ERROR_IRQ_STATUS(n) (0x00101010+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_INCLUDE(n) (0x00101014+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_ENABLE(n) (0x00101018+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_CLEAR(n) (0x0010101C+0x1000*(n)) +#define NPU_MASTERn_ERROR_IRQ_SET(n) (0x00101020+0x1000*(n)) +#define NPU_ERROR_IRQ_MASK 0x000000E3 +#define NPU_MASTERn_WDOG_IRQ_STATUS(n) (0x00101030+0x1000*(n)) +#define NPU_WDOG_BITE_IRQ_STATUS (1 << 1) +#define NPU_MASTERn_WDOG_IRQ_INCLUDE(n) (0x00101034+0x1000*(n)) +#define NPU_WDOG_BITE_IRQ_INCLUDE (1 << 1) + + #define NPU_GPR1 (0x00100104) #define NPU_GPR2 (0x00100108) #define NPU_GPR3 (0x0010010C) diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c index 37b152f9c558dd48ea4394f8c35884f6aed00ce1..94e194af7d9cadb7fb42e538021f58802229fe1e 100644 --- a/drivers/media/platform/msm/npu/npu_hw_access.c +++ b/drivers/media/platform/msm/npu/npu_hw_access.c @@ -111,8 +111,25 @@ void *npu_ipc_addr(void) */ void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num) { + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + uint32_t wdg_irq_sts = 0, error_irq_sts = 0; + /* Clear irq state */ REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); + + wdg_irq_sts = REGR(npu_dev, NPU_MASTERn_WDOG_IRQ_STATUS(0)); + if (wdg_irq_sts != 0) { + pr_err("wdg irq %x\n", wdg_irq_sts); + host_ctx->wdg_irq_sts |= wdg_irq_sts; + } + + error_irq_sts = REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_STATUS(0)); + error_irq_sts &= REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0)); + if (error_irq_sts != 0) { + REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), error_irq_sts); + pr_err("error irq %x\n", error_irq_sts); + host_ctx->err_irq_sts |= error_irq_sts; + } } int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev) diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 640f60f23be12e6baf166b4db5adb8692c7b934f..0348b3ef9788bf08645e17d2a6352e08fed3d654 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -49,6 +49,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg); static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg); static void host_session_msg_hdlr(struct npu_device *npu_dev); static void host_session_log_hdlr(struct npu_device *npu_dev); +static int host_error_hdlr(struct npu_device *npu_dev); /* ------------------------------------------------------------------------- * Function Definitions - Init / Deinit @@ -57,16 +58,33 @@ static void host_session_log_hdlr(struct npu_device *npu_dev); int fw_init(struct npu_device *npu_dev) { uint32_t reg_val = 0; + unsigned long flags; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int ret = 0; - if (host_ctx->fw_enabled) + spin_lock_irqsave(&host_ctx->lock, flags); + if (host_ctx->fw_state == FW_ENABLED) { + host_ctx->fw_ref_cnt++; + spin_unlock_irqrestore(&host_ctx->lock, flags); return 0; + } else if (host_ctx->fw_state == FW_ENABLING || + host_ctx->fw_state == FW_DISABLING) { + spin_unlock_irqrestore(&host_ctx->lock, flags); + return -EAGAIN; + } - if (npu_enable_core_power(npu_dev)) - return -EPERM; + host_ctx->fw_state = FW_ENABLING; + spin_unlock_irqrestore(&host_ctx->lock, flags); + + if (npu_enable_core_power(npu_dev)) { + ret = -EPERM; + goto enable_pw_fail; + } - if (npu_enable_sys_cache(npu_dev)) - return -EPERM; + if (npu_enable_sys_cache(npu_dev)) { + ret = -EPERM; + goto enable_sys_cache_fail; + } /* Boot the NPU subsystem */ host_ctx->subsystem_handle = subsystem_get_local("npu"); @@ -78,8 +96,10 @@ int fw_init(struct npu_device *npu_dev) REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0); /* Post PIL clocks */ - if (npu_enable_post_pil_clocks(npu_dev)) - return -EPERM; + if (npu_enable_post_pil_clocks(npu_dev)) { + ret = -EPERM; + goto enable_post_clk_fail; + } /* * Set logging state and clock gating state @@ -99,15 +119,17 @@ int fw_init(struct npu_device *npu_dev) /* Keep reading ctrl status until NPU is ready */ pr_debug("waiting for status ready from fw\n"); - if (wait_for_fw_ready(npu_dev)) - return -EPERM; - - host_ctx->fw_enabled = 1; + if (wait_for_fw_ready(npu_dev)) { + ret = -EPERM; + goto wait_fw_ready_fail; + } npu_host_ipc_post_init(npu_dev); - if (npu_enable_irq(npu_dev)) - return -EPERM; + if (npu_enable_irq(npu_dev)) { + ret = -EPERM; + goto wait_fw_ready_fail; + } /* Set logging state */ if (!npu_hw_log_enabled()) { @@ -115,39 +137,67 @@ int fw_init(struct npu_device *npu_dev) turn_off_fw_logging(npu_dev); } + spin_lock_irqsave(&host_ctx->lock, flags); + host_ctx->fw_state = FW_ENABLED; + host_ctx->fw_ref_cnt++; + spin_unlock_irqrestore(&host_ctx->lock, flags); pr_debug("firmware init complete\n"); - return 0; + + return ret; + +wait_fw_ready_fail: + subsystem_put_local(host_ctx->subsystem_handle); +enable_post_clk_fail: + npu_disable_sys_cache(npu_dev); +enable_sys_cache_fail: + npu_disable_core_power(npu_dev); +enable_pw_fail: + host_ctx->fw_state = FW_DISABLED; + return ret; } -void fw_deinit(struct npu_device *npu_dev) +void fw_deinit(struct npu_device *npu_dev, bool fw_alive) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; struct ipc_cmd_shutdown_pkt cmd_shutdown_pkt; + unsigned long flags; int ret = 0; - if (!host_ctx->fw_enabled) + spin_lock_irqsave(&host_ctx->lock, flags); + host_ctx->fw_ref_cnt--; + pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt); + if ((host_ctx->fw_state != FW_ENABLED) || + (host_ctx->fw_ref_cnt > 0 && fw_alive)) { + spin_unlock_irqrestore(&host_ctx->lock, flags); return; - - /* Command header */ - cmd_shutdown_pkt.header.cmd_type = NPU_IPC_CMD_SHUTDOWN; - cmd_shutdown_pkt.header.size = sizeof(struct ipc_cmd_shutdown_pkt); - cmd_shutdown_pkt.header.trans_id = 1; - cmd_shutdown_pkt.header.flags = 0xF; - ret = npu_host_ipc_send_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY, - &cmd_shutdown_pkt); - - pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret); - - if (ret) - pr_err("npu_host_ipc_send_cmd failed\n"); + } + host_ctx->fw_state = FW_DISABLING; + spin_unlock_irqrestore(&host_ctx->lock, flags); + + if (fw_alive) { + /* Command header */ + cmd_shutdown_pkt.header.cmd_type = NPU_IPC_CMD_SHUTDOWN; + cmd_shutdown_pkt.header.size = + sizeof(struct ipc_cmd_shutdown_pkt); + cmd_shutdown_pkt.header.trans_id = 1; + cmd_shutdown_pkt.header.flags = 0xF; + ret = npu_host_ipc_send_cmd(npu_dev, + IPC_QUEUE_CMD_HIGH_PRIORITY, &cmd_shutdown_pkt); + + pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret); + + if (ret) + pr_err("npu_host_ipc_send_cmd failed\n"); + } npu_disable_irq(npu_dev); npu_disable_sys_cache(npu_dev); subsystem_put_local(host_ctx->subsystem_handle); - host_ctx->fw_enabled = 0; + host_ctx->fw_state = FW_DISABLED; npu_disable_core_power(npu_dev); pr_debug("firmware deinit complete\n"); + return; } int npu_host_init(struct npu_device *npu_dev) @@ -160,6 +210,9 @@ int npu_host_init(struct npu_device *npu_dev) init_completion(&host_ctx->unload_done); host_ctx->sys_cache_disable = 0; + spin_lock_init(&host_ctx->lock); + host_ctx->fw_state = FW_DISABLED; + host_ctx->fw_ref_cnt = 0; host_ctx->wq = npu_create_wq(host_ctx, "irq_hdl", host_irq_wq, &host_ctx->irq_work); @@ -200,6 +253,41 @@ irqreturn_t npu_intr_hdler(int irq, void *ptr) * Function Definitions - Control * ------------------------------------------------------------------------- */ +static int host_error_hdlr(struct npu_device *npu_dev) +{ + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + struct npu_network *network = NULL; + bool fw_alive = true; + int i; + + if ((host_ctx->wdg_irq_sts == 0) && (host_ctx->err_irq_sts == 0)) + return 0; + + if (host_ctx->wdg_irq_sts) { + pr_info("watchdog irq triggered\n"); + fw_alive = false; + } + + fw_deinit(npu_dev, fw_alive); + fw_init(npu_dev); + + host_ctx->wdg_irq_sts = 0; + host_ctx->err_irq_sts = 0; + + /* flush all pending npu cmds with error */ + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + network = &host_ctx->networks[i]; + if (network->id != 0) + network->cmd_error = true; + } + + complete_all(&host_ctx->exec_done); + complete_all(&host_ctx->load_done); + complete_all(&host_ctx->unload_done); + + return 1; +} + static void host_irq_wq(struct work_struct *work) { struct npu_host_ctx *host_ctx; @@ -207,6 +295,10 @@ static void host_irq_wq(struct work_struct *work) host_ctx = container_of(work, struct npu_host_ctx, irq_work); npu_dev = container_of(host_ctx, struct npu_device, host_ctx); + + if (host_error_hdlr(npu_dev)) + return; + host_session_log_hdlr(npu_dev); host_session_msg_hdlr(npu_dev); } @@ -258,7 +350,9 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx) { int32_t i; struct npu_network *network = ctx->networks; + unsigned long flags; + spin_lock_irqsave(&ctx->lock, flags); for (i = 0; i < MAX_LOADED_NETWORK; i++) { if (network->id == 0) { network->id = i + 1; @@ -272,8 +366,11 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx) network++; } if (i >= MAX_LOADED_NETWORK) - return NULL; - ctx->network_num++; + network = NULL; + else + ctx->network_num++; + spin_unlock_irqrestore(&ctx->lock, flags); + return network; } @@ -288,10 +385,13 @@ static struct npu_network *get_network(struct npu_host_ctx *ctx, int64_t id) static void free_network(struct npu_host_ctx *ctx, int64_t id) { struct npu_network *network = get_network(ctx, id); + unsigned long flags; if (network) { + spin_lock_irqsave(&ctx->lock, flags); memset(network, 0, sizeof(struct npu_network)); ctx->network_num--; + spin_unlock_irqrestore(&ctx->lock, flags); } } @@ -489,8 +589,10 @@ int32_t npu_host_load_network(struct npu_device *npu_dev, return ret; network = alloc_network(host_ctx); - if (!network) - return -ENOMEM; + if (!network) { + ret = -ENOMEM; + goto err_deinit_fw; + } network->buf_hdl = load_ioctl->buf_ion_hdl; network->size = load_ioctl->buf_size; @@ -525,8 +627,10 @@ int32_t npu_host_load_network(struct npu_device *npu_dev, pr_debug("NPU_IPC_CMD_LOAD sent status: %d\n", ret); - if (ret) - return -EIO; + if (ret) { + ret = -EIO; + goto error_free_network; + } if (!wait_for_completion_interruptible_timeout( &host_ctx->load_done, NW_LOAD_TIMEOUT)) { @@ -535,10 +639,19 @@ int32_t npu_host_load_network(struct npu_device *npu_dev, goto error_free_network; } + if (network->cmd_error) { + network->cmd_error = false; + ret = -EIO; + pr_err("load cmd returns with error\n"); + goto error_free_network; + } + return ret; error_free_network: free_network(host_ctx, network->id); +err_deinit_fw: + fw_deinit(npu_dev, true); return ret; } @@ -567,25 +680,26 @@ int32_t npu_host_unload_network(struct npu_device *npu_dev, ret = npu_host_ipc_send_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &unload_packet); - pr_debug("NPU_IPC_CMD_UNLOAD sent status: %d\n", ret); - - if (ret) + if (ret) { + pr_err("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret); return -EIO; + } if (!wait_for_completion_interruptible_timeout(&host_ctx->unload_done, NW_UNLOAD_TIMEOUT)) { pr_err_ratelimited("npu: NPU_IPC_CMD_UNLOAD time out\n"); ret = -ETIMEDOUT; + } else if (network->cmd_error) { + network->cmd_error = false; + ret = -EIO; + pr_err("unload cmd returns with error\n"); } else { /* * free the network on the kernel if the corresponding ACO * handle is unloaded on the firmware side */ free_network(host_ctx, (int64_t)unload->network_hdl); - if (host_ctx->network_num <= 0) { - fw_deinit(npu_dev); - host_ctx->network_num = 0; - } + fw_deinit(npu_dev, true); } return ret; @@ -651,6 +765,10 @@ int32_t npu_host_exec_network(struct npu_device *npu_dev, /* dump debug stats */ npu_dump_debug_timeout_stats(npu_dev); ret = -ETIMEDOUT; + } else if (network->cmd_error) { + network->cmd_error = false; + ret = -EIO; + pr_err("execute cmd returns with error\n"); } /* Invalidate output buffers */ diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index b5103a6635f4130073f668d7209b34a5f9aea26e..80eaca5285870411b0da53be6f286f8471431496 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -17,6 +17,7 @@ * Includes * ------------------------------------------------------------------------- */ +#include #include "npu_hw_access.h" /* ------------------------------------------------------------------------- @@ -50,12 +51,22 @@ struct npu_network { uint32_t ipc_trans_id; uint32_t priority; uint32_t perf_mode; + + bool cmd_error; +}; + +enum fw_state { + FW_DISABLED = 0, + FW_ENABLING = 1, + FW_DISABLING = 2, + FW_ENABLED = 3, }; struct npu_host_ctx { + spinlock_t lock; void *subsystem_handle; - bool fw_enabled; - bool power_enabled; + enum fw_state fw_state; + int32_t fw_ref_cnt; int32_t power_vote_num; struct work_struct irq_work; struct workqueue_struct *wq; @@ -65,6 +76,9 @@ struct npu_host_ctx { int32_t network_num; struct npu_network networks[MAX_LOADED_NETWORK]; bool sys_cache_disable; + + uint32_t err_irq_sts; + uint32_t wdg_irq_sts; }; struct npu_device; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c index 86e63c698a656be965781342bbd1d6b410bae253..8200b879f8e4f3f45494890ffe5c2f20e0a319ec 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -587,36 +587,36 @@ static struct sde_mdp_format_params sde_mdp_format_map[] = { FMT_RGB_8888( SDE_PIX_FMT_ABGR_8888, "SDE/ABGR_8888", SDE_MDP_FMT_LINEAR, - 0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + 0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_XRGB_8888, "SDE/XRGB_8888", SDE_MDP_FMT_LINEAR, - 0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + 0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_ARGB_8888, "SDE/ARGB_8888", SDE_MDP_FMT_LINEAR, - 0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + 0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_RGBA_8888, "SDE/RGBA_8888", SDE_MDP_FMT_LINEAR, - 0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + 0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_RGBX_8888, "SDE/RGBX_8888", SDE_MDP_FMT_LINEAR, - 0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + 0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_BGRA_8888, "SDE/BGRA_8888", SDE_MDP_FMT_LINEAR, - 0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + 0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_BGRX_8888, "SDE/BGRX_8888", SDE_MDP_FMT_LINEAR, - 0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + 0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE), FMT_RGB_8888( SDE_PIX_FMT_XBGR_8888, "SDE/XBGR_8888", SDE_MDP_FMT_LINEAR, - 0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + 0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V1, "Y_CRCB_H2V1", @@ -755,35 +755,35 @@ static struct sde_mdp_format_params sde_mdp_format_map[] = { C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA), FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102, "SDE/RGBA_1010102", SDE_MDP_FMT_LINEAR, - 0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + 0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102, "SDE/RGBX_1010102", SDE_MDP_FMT_LINEAR, - 0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, + 0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102, "SDE/BGRA_1010102", SDE_MDP_FMT_LINEAR, - 0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + 0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102, "SDE/BGRX_1010102", SDE_MDP_FMT_LINEAR, - 0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, + 0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010, "SDE/ARGB_2101010", SDE_MDP_FMT_LINEAR, - 0, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + 0, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010, "SDE/XRGB_2101010", SDE_MDP_FMT_LINEAR, - 0, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, + 0, 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010, "SDE/ABGR_2101010", SDE_MDP_FMT_LINEAR, - 0, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + 0, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE), FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010, "SDE/XBGR_2101010", SDE_MDP_FMT_LINEAR, - 0, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, + 0, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE), }; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c index dd026233f3bdeb4640307ddde19357d17617ae99..fe709b55a8f6c156d90af970a44f6e3df83dbc47 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c @@ -485,7 +485,7 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain, SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name); /* generate dump, but no panic */ - sde_rot_evtlog_tout_handler(false, __func__, "rot", "vbif_dbg_bus"); + SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus"); /* * return -ENOSYS to allow smmu driver to dump out useful diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 2e335fdf6552627bdd75ddd1b0f688c50b58fe68..f1ba2f85a4152080e2af7f6ff27f6427c0fb9aa7 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -1365,6 +1365,19 @@ int create_pkt_cmd_session_set_property( memcpy(hfi, (struct hfi_intra_period *) pdata, sizeof(struct hfi_intra_period)); pkt->size += sizeof(struct hfi_intra_period); + + if (hfi->bframes) { + struct hfi_enable *hfi_enable; + u32 *prop_type; + + prop_type = (u32 *)((u8 *)&pkt->rg_property_data[0] + + sizeof(u32) + sizeof(struct hfi_intra_period)); + *prop_type = HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B; + hfi_enable = (struct hfi_enable *)(prop_type + 1); + hfi_enable->enable = true; + pkt->num_properties = 2; + pkt->size += sizeof(struct hfi_enable) + sizeof(u32); + } break; } case HAL_CONFIG_VENC_IDR_PERIOD: @@ -1913,6 +1926,21 @@ int create_pkt_cmd_session_set_property( pkt->size += sizeof(struct hfi_hdr10_pq_sei); break; } + case HAL_CONFIG_VENC_VBV_HRD_BUF_SIZE: + { + struct hfi_vbv_hdr_buf_size *hfi; + struct hal_vbv_hdr_buf_size *prop = + (struct hal_vbv_hdr_buf_size *) pdata; + + pkt->rg_property_data[0] = + HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE; + hfi = (struct hfi_vbv_hdr_buf_size *) + &pkt->rg_property_data[1]; + + hfi->vbv_hdr_buf_size = prop->vbv_hdr_buf_size; + pkt->size += sizeof(struct hfi_vbv_hdr_buf_size); + break; + } /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */ case HAL_CONFIG_BUFFER_REQUIREMENTS: case HAL_CONFIG_PRIORITY: diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 8ad0abe7bff67187e6943067c9a65339c244e6b1..2262ae6dc3a14ff307f81ecdb79e906fe26a873c 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -32,6 +32,7 @@ #include "venus_boot.h" #include "vidc_hfi_api.h" #include "msm_v4l2_private.h" +#include "msm_vidc_clocks.h" #define BASE_DEVICE_NUMBER 32 @@ -252,6 +253,14 @@ static int msm_v4l2_queryctrl(struct file *file, void *fh, return msm_vidc_query_ctrl((void *)vidc_inst, ctrl); } +static long msm_v4l2_default(struct file *file, void *fh, + bool valid_prio, unsigned int cmd, void *arg) +{ + struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh); + + return msm_vidc_private((void *)vidc_inst, cmd, arg); +} + static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_querycap = msm_v4l2_querycap, .vidioc_enum_fmt_vid_cap_mplane = msm_v4l2_enum_fmt, @@ -278,6 +287,7 @@ static const struct v4l2_ioctl_ops msm_v4l2_ioctl_ops = { .vidioc_g_parm = msm_v4l2_g_parm, .vidioc_g_crop = msm_v4l2_g_crop, .vidioc_enum_framesizes = msm_v4l2_enum_framesizes, + .vidioc_default = msm_v4l2_default, }; static const struct v4l2_ioctl_ops msm_v4l2_enc_ioctl_ops = { @@ -352,6 +362,7 @@ static int msm_vidc_initialize_core(struct platform_device *pdev, } INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler); + INIT_WORK(&core->ssr_work, msm_vidc_ssr_handler); mutex_lock(&core->lock); core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES, @@ -360,6 +371,7 @@ static int msm_vidc_initialize_core(struct platform_device *pdev, dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__); mutex_unlock(&core->lock); + msm_vidc_init_core_clk_ops(core); return rc; } @@ -488,7 +500,7 @@ static const struct of_device_id msm_vidc_dt_match[] = { {.compatible = "qcom,msm-vidc"}, {.compatible = "qcom,msm-vidc,context-bank"}, {.compatible = "qcom,msm-vidc,bus"}, - {.compatible = "qcom,msm-vidc,mem-adsp"}, + {.compatible = "qcom,msm-vidc,mem-cdsp"}, {} }; @@ -680,9 +692,9 @@ static int msm_vidc_probe_vidc_device(struct platform_device *pdev) return rc; } -static int msm_vidc_probe_mem_adsp(struct platform_device *pdev) +static int msm_vidc_probe_mem_cdsp(struct platform_device *pdev) { - return read_mem_adsp_resources_from_dt(pdev); + return read_mem_cdsp_resources_from_dt(pdev); } static int msm_vidc_probe_context_bank(struct platform_device *pdev) @@ -711,8 +723,8 @@ static int msm_vidc_probe(struct platform_device *pdev) "qcom,msm-vidc,context-bank")) { return msm_vidc_probe_context_bank(pdev); } else if (of_device_is_compatible(pdev->dev.of_node, - "qcom,msm-vidc,mem-adsp")) { - return msm_vidc_probe_mem_adsp(pdev); + "qcom,msm-vidc,mem-cdsp")) { + return msm_vidc_probe_mem_cdsp(pdev); } /* How did we end up here? */ diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index 15c91a67ba49c4bdc93d04dd4a1506e7671ce918..405575f8f904f07b1953d600fc5a6abeccc70a92 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -1154,13 +1154,6 @@ static struct msm_vidc_format venc_formats[] = { .get_frame_size = get_frame_size_nv12_ubwc, .type = OUTPUT_PORT, }, - { - .name = "RGBA 8:8:8:8", - .description = "RGBA 8:8:8:8", - .fourcc = V4L2_PIX_FMT_RGB32, - .get_frame_size = get_frame_size_rgba, - .type = OUTPUT_PORT, - }, { .name = "H264", .description = "H264 compressed format", @@ -1514,12 +1507,14 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: { int temp = 0; + enable.enable = false; switch (ctrl->val) { case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB: temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB; break; case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES: temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES; + enable.enable = true; break; case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE: default: @@ -1527,6 +1522,19 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; } + temp_ctrl = + TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE); + if (!temp_ctrl->val) { + rc = msm_comm_try_set_prop(inst, + HAL_PARAM_VENC_LOW_LATENCY, &enable.enable); + if (rc) + dprintk(VIDC_ERR, + "SliceMode Low Latency enable fail\n"); + else + inst->clk_data.low_latency_mode = + (bool) enable.enable; + } + if (temp) temp_ctrl = TRY_GET_CTRL(temp); diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 3e9fa354b042b1f5b08690d6870bf74f5c44dfe9..3b8e85fe700112c8a880149d811fa5d181cb7d28 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -905,12 +905,77 @@ static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst) return rc; } +int msm_vidc_set_internal_config(struct msm_vidc_inst *inst) +{ + int rc = 0; + u32 rc_mode; + bool set_rc = false; + struct hal_vbv_hdr_buf_size hrd_buf_size; + struct hal_enable latency; + struct hfi_device *hdev; + u32 codec; + + if (!inst || !inst->core || !inst->core->device) { + dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__); + return -EINVAL; + } + + if (inst->session_type != MSM_VIDC_ENCODER) + return rc; + + hdev = inst->core->device; + + codec = inst->fmts[CAPTURE_PORT].fourcc; + rc_mode = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_BITRATE_MODE); + latency.enable = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE); + + if (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR) { + rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_MBR; + set_rc = true; + } else if (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + latency.enable == V4L2_MPEG_MSM_VIDC_ENABLE && + codec != V4L2_PIX_FMT_VP8) { + rc_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; + set_rc = true; + } + + if (set_rc) { + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_PARAM_VENC_RATE_CONTROL, + (void *)&rc_mode); + } + + if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR || + rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) && + (codec != V4L2_PIX_FMT_VP8)) { + hrd_buf_size.vbv_hdr_buf_size = 1000; + dprintk(VIDC_DBG, "Enable cbr+ hdr_buf_size %d :\n", + hrd_buf_size.vbv_hdr_buf_size); + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_CONFIG_VENC_VBV_HRD_BUF_SIZE, + (void *)&hrd_buf_size); + + latency.enable = V4L2_MPEG_MSM_VIDC_ENABLE; + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY, + (void *)&latency); + + inst->clk_data.low_latency_mode = latency.enable; + } + + return rc; +} + static inline int start_streaming(struct msm_vidc_inst *inst) { int rc = 0; struct hfi_device *hdev; struct hal_buffer_size_minimum b; + dprintk(VIDC_DBG, "%s: %x : inst %pK\n", __func__, + hash32_ptr(inst->session), inst); hdev = inst->core->device; /* Check if current session is under HW capability */ @@ -928,8 +993,15 @@ static inline int start_streaming(struct msm_vidc_inst *inst) goto fail_start; } + rc = msm_vidc_set_internal_config(inst); + if (rc) { + dprintk(VIDC_ERR, + "Set internal config failed %pK\n", inst); + goto fail_start; + } + /* Decide work route for current session */ - rc = msm_vidc_decide_work_route(inst); + rc = call_core_op(inst->core, decide_work_route, inst); if (rc) { dprintk(VIDC_ERR, "Failed to decide work route for session %pK\n", inst); @@ -937,7 +1009,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst) } /* Decide work mode for current session */ - rc = msm_vidc_decide_work_mode(inst); + rc = call_core_op(inst->core, decide_work_mode, inst); if (rc) { dprintk(VIDC_ERR, "Failed to decide work mode for session %pK\n", inst); @@ -1004,14 +1076,13 @@ static inline int start_streaming(struct msm_vidc_inst *inst) } } - if (is_batching_allowed(inst)) { - dprintk(VIDC_DBG, - "%s: batching enabled for inst %pK (%#x)\n", - __func__, inst, hash32_ptr(inst->session)); + if (is_batching_allowed(inst)) inst->batch.enable = true; - /* this will disable dcvs as batching enabled */ - msm_dcvs_try_enable(inst); - } + else + inst->batch.enable = false; + dprintk(VIDC_DBG, "%s: batching %s for inst %pK (%#x)\n", + __func__, inst->batch.enable ? "enabled" : "disabled", + inst, hash32_ptr(inst->session)); /* * For seq_changed_insufficient, driver should set session_continue @@ -1147,6 +1218,9 @@ static inline int stop_streaming(struct msm_vidc_inst *inst) { int rc = 0; + dprintk(VIDC_DBG, "%s: %x : inst %pK\n", __func__, + hash32_ptr(inst->session), inst); + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); if (rc) dprintk(VIDC_ERR, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 4b30929ea670119781363259319309dcd9d80914..1a258c452820e5676dea6f313d3ef89c2b6067c3 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -22,6 +22,24 @@ #define MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO (1 << 16) #define MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO (5 << 16) +static unsigned long msm_vidc_calc_freq_ar50(struct msm_vidc_inst *inst, + u32 filled_len); +static int msm_vidc_decide_work_mode_ar50(struct msm_vidc_inst *inst); +static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, + u32 filled_len); + +struct msm_vidc_core_ops core_ops_vpu4 = { + .calc_freq = msm_vidc_calc_freq_ar50, + .decide_work_route = NULL, + .decide_work_mode = msm_vidc_decide_work_mode_ar50, +}; + +struct msm_vidc_core_ops core_ops_vpu5 = { + .calc_freq = msm_vidc_calc_freq, + .decide_work_route = msm_vidc_decide_work_route, + .decide_work_mode = msm_vidc_decide_work_mode, +}; + static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs) { dprintk(VIDC_PROF, @@ -373,6 +391,8 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) if (!inst->clk_data.dcvs_mode || inst->batch.enable) { dprintk(VIDC_DBG, "Skip DCVS (dcvs %d, batching %d)\n", inst->clk_data.dcvs_mode, inst->batch.enable); + /* Request right clocks (load normal clocks) */ + inst->clk_data.load = inst->clk_data.load_norm; return 0; } @@ -574,8 +594,77 @@ void msm_comm_update_input_cr(struct msm_vidc_inst *inst, mutex_unlock(&inst->input_crs.lock); } +static unsigned long msm_vidc_calc_freq_ar50(struct msm_vidc_inst *inst, + u32 filled_len) +{ + unsigned long freq = 0; + unsigned long vpp_cycles = 0, vsp_cycles = 0; + u32 vpp_cycles_per_mb; + u32 mbs_per_second; + struct msm_vidc_core *core = NULL; + int i = 0; + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + u64 rate = 0; + struct clock_data *dcvs = NULL; + + core = inst->core; + dcvs = &inst->clk_data; + + mbs_per_second = msm_comm_get_inst_load_per_core(inst, + LOAD_CALC_NO_QUIRKS); + + /* + * Calculate vpp, vsp cycles separately for encoder and decoder. + * Even though, most part is common now, in future it may change + * between them. + */ + + if (inst->session_type == MSM_VIDC_ENCODER) { + vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ? + inst->clk_data.entry->low_power_cycles : + inst->clk_data.entry->vpp_cycles; + + vpp_cycles = mbs_per_second * vpp_cycles_per_mb; + + vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; + + /* 10 / 7 is overhead factor */ + vsp_cycles += (inst->clk_data.bitrate * 10) / 7; + } else if (inst->session_type == MSM_VIDC_DECODER) { + vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles; + + vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; + /* 10 / 7 is overhead factor */ + vsp_cycles += ((inst->prop.fps * filled_len * 8) * 10) / 7; + + } else { + dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__); + return msm_vidc_max_freq(inst->core); + } + + freq = max(vpp_cycles, vsp_cycles); + + dprintk(VIDC_DBG, "Update DCVS Load\n"); + allowed_clks_tbl = core->resources.allowed_clks_tbl; + for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) { + rate = allowed_clks_tbl[i].clock_rate; + if (rate >= freq) + break; + } + + dcvs->load_norm = rate; + dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ? + allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm; + dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate : + dcvs->load_norm; + + msm_dcvs_print_dcvs_stats(dcvs); + dprintk(VIDC_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n", + __func__, inst, filled_len, freq); + return freq; +} static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, u32 filled_len) @@ -589,6 +678,7 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, struct allowed_clock_rates_table *allowed_clks_tbl = NULL; u64 rate = 0; struct clock_data *dcvs = NULL; + u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7; core = inst->core; dcvs = &inst->clk_data; @@ -614,8 +704,14 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; - /* 10 / 7 is overhead factor */ - vsp_cycles += (inst->clk_data.bitrate * 10) / 7; + /* bitrate is based on fps, scale it using operating rate */ + operating_rate = inst->clk_data.operating_rate >> 16; + if (operating_rate > inst->prop.fps && inst->prop.fps) { + vsp_factor_num *= operating_rate; + vsp_factor_den *= inst->prop.fps; + } + vsp_cycles += ((u64)inst->clk_data.bitrate * vsp_factor_num) / + vsp_factor_den; } else if (inst->session_type == MSM_VIDC_DECODER) { vpp_cycles = mbs_per_second * inst->clk_data.entry->vpp_cycles; /* 21 / 20 is overhead factor */ @@ -820,7 +916,7 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) goto no_clock_change; } - freq = msm_vidc_calc_freq(inst, filled_len); + freq = call_core_op(inst->core, calc_freq, inst, filled_len); msm_vidc_update_freq_entry(inst, freq, device_addr, is_turbo); @@ -1033,7 +1129,6 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) int rc = 0; struct hfi_device *hdev; struct hal_video_work_route pdata; - u32 yuv_size = 0; if (!inst || !inst->core || !inst->core->device) { dprintk(VIDC_ERR, @@ -1057,7 +1152,6 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) break; } } else if (inst->session_type == MSM_VIDC_ENCODER) { - u32 rc_mode = 0; u32 slice_mode = 0; switch (inst->fmts[CAPTURE_PORT].fourcc) { @@ -1067,23 +1161,11 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) goto decision_done; } - yuv_size = inst->prop.height[CAPTURE_PORT] * - inst->prop.width[CAPTURE_PORT]; - - if ((yuv_size <= 1920 * 1088) && - inst->prop.fps <= 60) { - rc_mode = msm_comm_g_ctrl_for_id(inst, - V4L2_CID_MPEG_VIDEO_BITRATE_MODE); - slice_mode = msm_comm_g_ctrl_for_id(inst, - V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); - - if ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) || - (rc_mode == - V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR) || - (slice_mode == - V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES)) { - pdata.video_work_route = 1; - } + slice_mode = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); + if (slice_mode == + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) { + pdata.video_work_route = 1; } } else { return -EINVAL; @@ -1102,6 +1184,71 @@ int msm_vidc_decide_work_route(struct msm_vidc_inst *inst) return rc; } +static int msm_vidc_decide_work_mode_ar50(struct msm_vidc_inst *inst) +{ + int rc = 0; + struct hfi_device *hdev; + struct hal_video_work_mode pdata; + struct hal_enable latency; + + if (!inst || !inst->core || !inst->core->device) { + dprintk(VIDC_ERR, + "%s Invalid args: Inst = %pK\n", + __func__, inst); + return -EINVAL; + } + + hdev = inst->core->device; + if (inst->clk_data.low_latency_mode) { + pdata.video_work_mode = VIDC_WORK_MODE_1; + goto decision_done; + } + + if (inst->session_type == MSM_VIDC_DECODER) { + pdata.video_work_mode = VIDC_WORK_MODE_2; + switch (inst->fmts[OUTPUT_PORT].fourcc) { + case V4L2_PIX_FMT_MPEG2: + pdata.video_work_mode = VIDC_WORK_MODE_1; + break; + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_HEVC: + if (inst->prop.height[OUTPUT_PORT] * + inst->prop.width[OUTPUT_PORT] <= + 1280 * 720) + pdata.video_work_mode = VIDC_WORK_MODE_1; + break; + } + } else if (inst->session_type == MSM_VIDC_ENCODER) + pdata.video_work_mode = VIDC_WORK_MODE_1; + else { + return -EINVAL; + } + +decision_done: + + inst->clk_data.work_mode = pdata.video_work_mode; + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE, + (void *)&pdata); + if (rc) + dprintk(VIDC_WARN, + " Failed to configure Work Mode %pK\n", inst); + + /* For WORK_MODE_1, set Low Latency mode by default to HW. */ + + if (inst->session_type == MSM_VIDC_ENCODER && + inst->clk_data.work_mode == VIDC_WORK_MODE_1) { + latency.enable = 1; + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY, + (void *)&latency); + } + + rc = msm_comm_scale_clocks_and_bus(inst); + + return rc; +} + int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) { int rc = 0; @@ -1121,7 +1268,6 @@ int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) if (inst->clk_data.low_latency_mode) { pdata.video_work_mode = VIDC_WORK_MODE_1; - goto decision_done; } @@ -1144,26 +1290,27 @@ int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) break; } } else if (inst->session_type == MSM_VIDC_ENCODER) { - u32 rc_mode = 0; - u32 slice_mode = 0; + u32 codec = inst->fmts[CAPTURE_PORT].fourcc; + u32 width = inst->prop.width[OUTPUT_PORT]; - pdata.video_work_mode = VIDC_WORK_MODE_1; + pdata.video_work_mode = VIDC_WORK_MODE_2; - switch (inst->fmts[CAPTURE_PORT].fourcc) { + switch (codec) { case V4L2_PIX_FMT_VP8: + { + if (width <= 3840) { + pdata.video_work_mode = VIDC_WORK_MODE_1; + goto decision_done; + } + break; + } case V4L2_PIX_FMT_TME: + { + pdata.video_work_mode = VIDC_WORK_MODE_1; goto decision_done; } + } - slice_mode = msm_comm_g_ctrl_for_id(inst, - V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); - rc_mode = msm_comm_g_ctrl_for_id(inst, - V4L2_CID_MPEG_VIDEO_BITRATE_MODE); - if ((slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) && - ((rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) || - (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR) || - (rc_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_MBR_VFR))) - pdata.video_work_mode = VIDC_WORK_MODE_2; } else { return -EINVAL; } @@ -1434,6 +1581,17 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst) return rc; } +void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core) +{ + if (!core) + return; + + if (core->platform_data->vpu_ver == VPU_VERSION_4) + core->core_ops = &core_ops_vpu4; + else + core->core_ops = &core_ops_vpu5; +} + void msm_print_core_status(struct msm_vidc_core *core, u32 core_id) { struct msm_vidc_inst *inst = NULL; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h index 8020bc1c0065489f5e5a4fb338af3f82d9320ee8..73dcc1670f6d2cc36698eeebe4d2bf20733a5eae 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h @@ -44,4 +44,5 @@ void msm_comm_update_input_cr(struct msm_vidc_inst *inst, u32 index, u32 cr); void update_recon_stats(struct msm_vidc_inst *inst, struct recon_stats_type *recon_stats); +void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index a9bf66952f61462093a62b3a6de2d64fba1f4bdc..79b7c665d4f70d03db3504340543e58e08d09316 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -2757,14 +2757,13 @@ bool is_batching_allowed(struct msm_vidc_inst *inst) * - session resolution <= 1080p * - low latency not enabled * - not a thumbnail session - * - realtime session * - UBWC color format */ if (is_decode_session(inst) && inst->core->resources.decode_batching && (msm_vidc_get_mbs_per_frame(inst) <= MAX_DEC_BATCH_WIDTH * MAX_DEC_BATCH_HEIGHT) && !inst->clk_data.low_latency_mode && - !is_thumbnail_session(inst) && is_realtime_session(inst) && + !is_thumbnail_session(inst) && (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_UBWC || inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_TP10_UBWC)) allowed = true; @@ -4168,17 +4167,18 @@ int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, } /* - * Don't batch for initial few buffers to avoid startup latency increase + * Don't defer buffers initially to avoid startup latency increase * due to batching - */ - if (inst->count.fbd < 30) - return msm_comm_qbuf(inst, mbuf); - - count = num_pending_qbufs(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); - if (count < inst->batch.size) { - mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; - print_vidc_buffer(VIDC_DBG, "qbuf_batch deferred", inst, mbuf); - return 0; + */ + if (inst->clk_data.buffer_counter > SKIP_BATCH_WINDOW) { + count = num_pending_qbufs(inst, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (count < inst->batch.size) { + mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; + print_vidc_buffer(VIDC_DBG, + "batch-qbuf deferred", inst, mbuf); + return 0; + } } rc = msm_comm_scale_clocks_and_bus(inst); @@ -4188,22 +4188,24 @@ int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, mutex_lock(&inst->registeredbufs.lock); list_for_each_entry(buf, &inst->registeredbufs.list, list) { /* Don't queue if buffer is not CAPTURE_MPLANE */ - if (!(buf->vvb.vb2_buf.type & - V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) - continue; + if (buf->vvb.vb2_buf.type != + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + goto loop_end; /* Don't queue if buffer is not a deferred buffer */ if (!(buf->flags & MSM_VIDC_FLAG_DEFERRED)) - continue; + goto loop_end; /* Don't queue if RBR event is pending on this buffer */ if (buf->flags & MSM_VIDC_FLAG_RBR_PENDING) - continue; - print_vidc_buffer(VIDC_DBG, "qbuf", inst, buf); + goto loop_end; + + print_vidc_buffer(VIDC_DBG, "batch-qbuf", inst, buf); rc = msm_comm_qbuf_to_hfi(inst, buf); if (rc) { dprintk(VIDC_ERR, "%s: Failed qbuf to hfi: %d\n", __func__, rc); break; } +loop_end: /* Queue pending buffers till the current buffer only */ if (buf == mbuf) break; @@ -5144,17 +5146,32 @@ int msm_vidc_noc_error_info(struct msm_vidc_core *core) int msm_vidc_trigger_ssr(struct msm_vidc_core *core, enum hal_ssr_trigger_type type) { - int rc = 0; + if (!core) { + dprintk(VIDC_WARN, "%s: Invalid parameters\n", __func__); + return -EINVAL; + } + core->ssr_type = type; + schedule_work(&core->ssr_work); + return 0; +} + +void msm_vidc_ssr_handler(struct work_struct *work) +{ + int rc; + struct msm_vidc_core *core; struct hfi_device *hdev; + core = container_of(work, struct msm_vidc_core, ssr_work); if (!core || !core->device) { - dprintk(VIDC_WARN, "Invalid parameters: %pK\n", core); - return -EINVAL; + dprintk(VIDC_ERR, "%s: Invalid params\n", __func__); + return; } hdev = core->device; mutex_lock(&core->lock); if (core->state == VIDC_CORE_INIT_DONE) { + dprintk(VIDC_WARN, "%s: ssr type %d\n", __func__, + core->ssr_type); /* * In current implementation user-initiated SSR triggers * a fatal error from hardware. However, there is no way @@ -5163,7 +5180,7 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core, */ core->trigger_ssr = true; rc = call_hfi_op(hdev, core_trigger_ssr, - hdev->hfi_device_data, type); + hdev->hfi_device_data, core->ssr_type); if (rc) { dprintk(VIDC_ERR, "%s: trigger_ssr failed\n", __func__); @@ -5174,8 +5191,6 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core, __func__, core); } mutex_unlock(&core->lock); - - return rc; } static int msm_vidc_load_supported(struct msm_vidc_inst *inst) @@ -6179,6 +6194,8 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, rc = -EEXIST; } if (rc == -EEXIST) { + print_vidc_buffer(VIDC_DBG, + "existing qbuf", inst, mbuf); /* enable RBR pending */ mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING; } @@ -6189,10 +6206,15 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, list_add_tail(&mbuf->list, &inst->registeredbufs.list); mutex_unlock(&inst->registeredbufs.lock); - if (rc == -EEXIST) { - print_vidc_buffer(VIDC_DBG, "qbuf upon rbr", inst, mbuf); + + /* + * Return mbuf if decode batching is enabled as this buffer + * may trigger queuing full batch to firmware, also this buffer + * will not be queued to firmware while full batch queuing, + * it will be queued when rbr event arrived from firmware. + */ + if (rc == -EEXIST && !inst->batch.enable) return ERR_PTR(rc); - } return mbuf; @@ -6345,7 +6367,6 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, mutex_unlock(&inst->registeredbufs.lock); if (found) { - print_vidc_buffer(VIDC_DBG, "rbr qbuf", inst, mbuf); rc = msm_comm_qbuf_in_rbr(inst, mbuf); if (rc) print_vidc_buffer(VIDC_ERR, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 423cae63259045ef3f2edefa8554d3e225448092..d73da96f89b66879f231660a22268943a762b1a2 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -18,6 +18,7 @@ #define MAX_DEC_BATCH_SIZE 6 #define MAX_DEC_BATCH_WIDTH 1920 #define MAX_DEC_BATCH_HEIGHT 1088 +#define SKIP_BATCH_WINDOW 100 struct vb2_buf_entry { struct list_head list; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 29b4d821e52d91ed865a854b87d1f56e59efadea..3b3882026bd4d53ddcecc0241ef2fb6dca386d21 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -75,6 +75,12 @@ #define NUM_MBS_PER_FRAME(__height, __width) \ ((ALIGN(__height, 16) / 16) * (ALIGN(__width, 16) / 16)) +#define call_core_op(c, op, args...) \ + (((c) && (c)->core_ops && (c)->core_ops->op) ? \ + ((c)->core_ops->op(args)) : 0) + +struct msm_vidc_inst; + enum vidc_ports { OUTPUT_PORT, CAPTURE_PORT, @@ -215,6 +221,17 @@ struct msm_vidc_efuse_data { enum efuse_purpose purpose; }; +enum vpu_version { + VPU_VERSION_4 = 1, + VPU_VERSION_5, +}; + +#define IS_VPU_4(ver) \ + (ver == VPU_VERSION_4) + +#define IS_VPU_5(ver) \ + (ver == VPU_VERSION_5) + struct msm_vidc_platform_data { struct msm_vidc_common_data *common_data; unsigned int common_data_length; @@ -224,6 +241,9 @@ struct msm_vidc_platform_data { struct msm_vidc_efuse_data *efuse_data; unsigned int efuse_data_length; unsigned int sku_version; + phys_addr_t gcc_register_base; + uint32_t gcc_register_size; + uint32_t vpu_ver; }; struct msm_vidc_format { @@ -345,6 +365,12 @@ enum msm_vidc_modes { VIDC_REALTIME = BIT(4), }; +struct msm_vidc_core_ops { + unsigned long (*calc_freq)(struct msm_vidc_inst *inst, u32 filled_len); + int (*decide_work_route)(struct msm_vidc_inst *inst); + int (*decide_work_mode)(struct msm_vidc_inst *inst); +}; + struct msm_vidc_core { struct list_head list; struct mutex lock; @@ -364,11 +390,14 @@ struct msm_vidc_core { u32 codec_count; struct msm_vidc_capability *capabilities; struct delayed_work fw_unload_work; + struct work_struct ssr_work; + enum hal_ssr_trigger_type ssr_type; bool smmu_fault_handled; bool trigger_ssr; unsigned long min_freq; unsigned long curr_freq; struct vidc_bus_vote_data *vote_data; + struct msm_vidc_core_ops *core_ops; }; struct msm_vidc_inst { @@ -486,6 +515,7 @@ int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); struct dma_buf *msm_smem_get_dma_buf(int fd); void msm_smem_put_dma_buf(void *dma_buf); void msm_vidc_fw_unload_handler(struct work_struct *work); +void msm_vidc_ssr_handler(struct work_struct *work); /* * XXX: normally should be in msm_vidc.h, but that's meant for public APIs, * whereas this is private diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c index 86534d87cb8aea0ecf7166120fee11b2878a048e..8281ea7fec3dea54fedb88844eea633dea7da17d 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c @@ -45,11 +45,27 @@ .purpose = p \ } +#define GCC_VIDEO_AXI_REG_START_ADDR 0x10B024 +#define GCC_VIDEO_AXI_REG_SIZE 0xC + static struct msm_vidc_codec_data default_codec_data[] = { CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320), CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 125, 675, 320), }; +/* Update with SM6150 data */ +static struct msm_vidc_codec_data sm6150_codec_data[] = { + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 125, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_ENCODER, 125, 675, 320), + CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_VIDC_ENCODER, 0, 540, 540), + CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 50, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 50, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 50, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_DECODER, 50, 200, 200), + CODEC_ENTRY(V4L2_PIX_FMT_VP9, MSM_VIDC_DECODER, 50, 200, 200), +}; + /* Update with 855 data */ static struct msm_vidc_codec_data sm8150_codec_data[] = { CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 10, 675, 320), @@ -114,6 +130,53 @@ static struct msm_vidc_common_data default_common_data[] = { }, }; +static struct msm_vidc_common_data sm6150_common_data[] = { + { + .key = "qcom,never-unload-fw", + .value = 1, + }, + { + .key = "qcom,sw-power-collapse", + .value = 1, + }, + { + .key = "qcom,domain-attr-non-fatal-faults", + .value = 1, + }, + { + .key = "qcom,max-secure-instances", + .value = 5, + }, + { + .key = "qcom,max-hw-load", + .value = 1216800, + }, + { + .key = "qcom,max-hq-mbs-per-frame", + .value = 8160, + }, + { + .key = "qcom,max-hq-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,max-b-frame-size", + .value = 8160, + }, + { + .key = "qcom,max-b-frames-per-sec", + .value = 60, + }, + { + .key = "qcom,power-collapse-delay", + .value = 500, + }, + { + .key = "qcom,hw-resp-timeout", + .value = 250, + }, +}; + static struct msm_vidc_common_data sm8150_common_data[] = { { .key = "qcom,never-unload-fw", @@ -157,7 +220,7 @@ static struct msm_vidc_common_data sm8150_common_data[] = { }, { .key = "qcom,hw-resp-timeout", - .value = 10000, + .value = 1000, }, { .key = "qcom,debug-timeout", @@ -169,7 +232,7 @@ static struct msm_vidc_common_data sm8150_common_data[] = { }, { .key = "qcom,decode-batching", - .value = 0, + .value = 1, }, }; @@ -337,6 +400,25 @@ static struct msm_vidc_platform_data default_data = { .efuse_data = NULL, .efuse_data_length = 0, .sku_version = 0, + .gcc_register_base = 0, + .gcc_register_size = 0, + .vpu_ver = VPU_VERSION_5, +}; + +static struct msm_vidc_platform_data sm6150_data = { + .codec_data = sm6150_codec_data, + .codec_data_length = ARRAY_SIZE(sm6150_codec_data), + .common_data = sm6150_common_data, + .common_data_length = ARRAY_SIZE(sm6150_common_data), + .csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff, + .csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff, + .csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff, + .efuse_data = NULL, + .efuse_data_length = 0, + .sku_version = 0, + .gcc_register_base = 0, + .gcc_register_size = 0, + .vpu_ver = VPU_VERSION_4, }; static struct msm_vidc_platform_data sm8150_data = { @@ -350,6 +432,9 @@ static struct msm_vidc_platform_data sm8150_data = { .efuse_data = NULL, .efuse_data_length = 0, .sku_version = 0, + .gcc_register_base = GCC_VIDEO_AXI_REG_START_ADDR, + .gcc_register_size = GCC_VIDEO_AXI_REG_SIZE, + .vpu_ver = VPU_VERSION_5, }; static struct msm_vidc_platform_data sdm845_data = { @@ -363,6 +448,9 @@ static struct msm_vidc_platform_data sdm845_data = { .efuse_data = NULL, .efuse_data_length = 0, .sku_version = 0, + .gcc_register_base = 0, + .gcc_register_size = 0, + .vpu_ver = VPU_VERSION_4, }; static struct msm_vidc_platform_data sdm670_data = { @@ -376,9 +464,16 @@ static struct msm_vidc_platform_data sdm670_data = { .efuse_data = sdm670_efuse_data, .efuse_data_length = ARRAY_SIZE(sdm670_efuse_data), .sku_version = 0, + .gcc_register_base = 0, + .gcc_register_size = 0, + .vpu_ver = VPU_VERSION_4, }; static const struct of_device_id msm_vidc_dt_match[] = { + { + .compatible = "qcom,sm6150-vidc", + .data = &sm6150_data, + }, { .compatible = "qcom,sm8150-vidc", .data = &sm8150_data, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index a1bf73744cb3a18e78741cb2f935a6bfeb025199..8ff33e8a025751c4e2a01153042966fcc261164a 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -29,9 +29,6 @@ enum clock_properties { #define PERF_GOV "performance" -#define GCC_VIDEO_AXI_REG_START_ADDR 0x10B024 -#define GCC_VIDEO_AXI_REG_SIZE 0xC - static inline struct device *msm_iommu_get_ctx(const char *ctx_name) { return NULL; @@ -377,10 +374,10 @@ static int msm_vidc_load_allowed_clocks_table( return 0; } -static int msm_vidc_populate_mem_adsp(struct device *dev, +static int msm_vidc_populate_mem_cdsp(struct device *dev, struct msm_vidc_platform_resources *res) { - res->mem_adsp.dev = dev; + res->mem_cdsp.dev = dev; return 0; } @@ -795,6 +792,10 @@ int read_platform_resources_from_drv_data( res->csc_coeff_data = &platform_data->csc_data; + res->gcc_register_base = platform_data->gcc_register_base; + res->gcc_register_size = platform_data->gcc_register_size; + + res->vpu_ver = platform_data->vpu_ver; return rc; } @@ -825,9 +826,6 @@ int read_platform_resources_from_dt( res->register_base = kres ? kres->start : -1; res->register_size = kres ? (kres->end + 1 - kres->start) : -1; - res->gcc_register_base = GCC_VIDEO_AXI_REG_START_ADDR; - res->gcc_register_size = GCC_VIDEO_AXI_REG_SIZE; - kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res->irq = kres ? kres->start : -1; @@ -1288,7 +1286,7 @@ int read_bus_resources_from_dt(struct platform_device *pdev) return msm_vidc_populate_bus(&pdev->dev, &core->resources); } -int read_mem_adsp_resources_from_dt(struct platform_device *pdev) +int read_mem_cdsp_resources_from_dt(struct platform_device *pdev) { struct msm_vidc_core *core; @@ -1308,5 +1306,5 @@ int read_mem_adsp_resources_from_dt(struct platform_device *pdev) return -EINVAL; } - return msm_vidc_populate_mem_adsp(&pdev->dev, &core->resources); + return msm_vidc_populate_mem_cdsp(&pdev->dev, &core->resources); } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h index 0d628f7cb55dfb3602fb2efa0459fc3b598d6011..2d7a1b126068cd0a69ef01ac5371b1f4346a780e 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.h @@ -30,7 +30,7 @@ int read_platform_resources_from_dt( int read_context_bank_resources_from_dt(struct platform_device *pdev); int read_bus_resources_from_dt(struct platform_device *pdev); -int read_mem_adsp_resources_from_dt(struct platform_device *pdev); +int read_mem_cdsp_resources_from_dt(struct platform_device *pdev); int msm_vidc_load_u32_table(struct platform_device *pdev, struct device_node *of_node, char *table_name, int struct_size, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h index a32a94ce2acdfc3e1eeb479964b021af991a6b36..981aafaa8fe6761f563ce63f2c597544309968bf 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h @@ -143,7 +143,7 @@ struct subcache_set { u32 count; }; -struct msm_vidc_mem_adsp { +struct msm_vidc_mem_cdsp { struct device *dev; }; @@ -196,7 +196,8 @@ struct msm_vidc_platform_resources { struct msm_vidc_codec_data *codec_data; int codec_data_count; struct msm_vidc_csc_coeff *csc_coeff_data; - struct msm_vidc_mem_adsp mem_adsp; + struct msm_vidc_mem_cdsp mem_cdsp; + uint32_t vpu_ver; }; static inline bool is_iommu_present(struct msm_vidc_platform_resources *res) diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index 277ebf2df272b5e497807ba1612626cba571dd39..1776769e34bbc6a3a5279deefdf508f1138a0076 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -108,6 +108,23 @@ static int __disable_subcaches(struct venus_hfi_device *device); static int __power_collapse(struct venus_hfi_device *device, bool force); static int venus_hfi_noc_error_info(void *dev); +static void interrupt_init_vpu4(struct venus_hfi_device *device); +static void interrupt_init_vpu5(struct venus_hfi_device *device); +static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device); +static void clock_config_on_enable_vpu5(struct venus_hfi_device *device); + +struct venus_hfi_vpu_ops vpu4_ops = { + .interrupt_init = interrupt_init_vpu4, + .setup_dsp_uc_memmap = NULL, + .clock_config_on_enable = NULL, +}; + +struct venus_hfi_vpu_ops vpu5_ops = { + .interrupt_init = interrupt_init_vpu5, + .setup_dsp_uc_memmap = setup_dsp_uc_memmap_vpu5, + .clock_config_on_enable = clock_config_on_enable_vpu5, +}; + /** * Utility function to enforce some of our assumptions. Spam calls to this * in hotspots in code to double check some of the assumptions that we hold. @@ -1611,7 +1628,7 @@ static void __interface_dsp_queues_release(struct venus_hfi_device *device) dma_unmap_single_attrs(cb->dev, mem_data->device_addr, mem_data->size, DMA_BIDIRECTIONAL, 0); - dma_free_coherent(device->res->mem_adsp.dev, mem_data->size, + dma_free_coherent(device->res->mem_cdsp.dev, mem_data->size, mem_data->kvaddr, mem_data->dma_handle); for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) { @@ -1643,7 +1660,7 @@ static int __interface_dsp_queues_init(struct venus_hfi_device *dev) mem_data = &dev->dsp_iface_q_table.mem_data; /* Allocate dsp queues from ADSP device memory */ - kvaddr = dma_alloc_coherent(dev->res->mem_adsp.dev, q_size, + kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size, &dma_handle, GFP_KERNEL); if (IS_ERR_OR_NULL(kvaddr)) { dprintk(VIDC_ERR, "%s: failed dma allocation\n", __func__); @@ -1726,7 +1743,7 @@ static int __interface_dsp_queues_init(struct venus_hfi_device *dev) return rc; fail_dma_map: - dma_free_coherent(dev->res->mem_adsp.dev, q_size, kvaddr, dma_handle); + dma_free_coherent(dev->res->mem_cdsp.dev, q_size, kvaddr, dma_handle); fail_dma_alloc: return -ENOMEM; } @@ -1860,20 +1877,7 @@ static void __setup_ucregion_memory_map(struct venus_hfi_device *device) if (device->qdss.align_device_addr) __write_register(device, VIDC_MMAP_ADDR, (u32)device->qdss.align_device_addr); - /* initialize DSP QTBL & UCREGION with CPU queues */ - __write_register(device, HFI_DSP_QTBL_ADDR, - (u32)device->iface_q_table.align_device_addr); - __write_register(device, HFI_DSP_UC_REGION_ADDR, - (u32)device->iface_q_table.align_device_addr); - __write_register(device, HFI_DSP_UC_REGION_SIZE, SHARED_QSIZE); - if (device->res->domain_cvp) { - __write_register(device, HFI_DSP_QTBL_ADDR, - (u32)device->dsp_iface_q_table.align_device_addr); - __write_register(device, HFI_DSP_UC_REGION_ADDR, - (u32)device->dsp_iface_q_table.align_device_addr); - __write_register(device, HFI_DSP_UC_REGION_SIZE, - device->dsp_iface_q_table.mem_data.size); - } + call_venus_op(device, setup_dsp_uc_memmap, device); } static int __interface_queues_init(struct venus_hfi_device *dev) @@ -3745,14 +3749,18 @@ static int __init_regs_and_interrupts(struct venus_hfi_device *device, goto error_irq_fail; } - hal->gcc_reg_base = devm_ioremap_nocache(&res->pdev->dev, - res->gcc_register_base, res->gcc_register_size); - hal->gcc_reg_size = res->gcc_register_size; - if (!hal->gcc_reg_base) { - dprintk(VIDC_ERR, - "could not map gcc reg addr %pa of size %d\n", - &res->gcc_register_base, res->gcc_register_size); - goto error_irq_fail; + if (res->gcc_register_base) { + hal->gcc_reg_base = devm_ioremap_nocache(&res->pdev->dev, + res->gcc_register_base, + res->gcc_register_size); + hal->gcc_reg_size = res->gcc_register_size; + if (!hal->gcc_reg_base) { + dprintk(VIDC_ERR, + "could not map gcc reg addr %pa of size %d\n", + &res->gcc_register_base, + res->gcc_register_size); + goto error_irq_fail; + } } device->hal_data = hal; @@ -3965,8 +3973,7 @@ static inline int __prepare_enable_clks(struct venus_hfi_device *device) dprintk(VIDC_DBG, "Clock: %s prepared and enabled\n", cl->name); } - __write_register(device, VIDC_WRAPPER_CPU_CGC_DIS, 0); - __write_register(device, VIDC_WRAPPER_CPU_CLOCK_CONFIG, 0); + call_venus_op(device, clock_config_on_enable, device); return rc; fail_clk_enable: @@ -4540,10 +4547,53 @@ static int __disable_subcaches(struct venus_hfi_device *device) return 0; } +static void interrupt_init_vpu5(struct venus_hfi_device *device) +{ + u32 mask_val = 0; + + /* All interrupts should be disabled initially 0x1F6 : Reset value */ + mask_val = __read_register(device, VIDC_WRAPPER_INTR_MASK); + + /* Write 0 to unmask CPU and WD interrupts */ + mask_val &= ~(VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK | + VIDC_WRAPPER_INTR_MASK_A2HCPU_BMSK); + __write_register(device, VIDC_WRAPPER_INTR_MASK, mask_val); +} + +static void interrupt_init_vpu4(struct venus_hfi_device *device) +{ + __write_register(device, VIDC_WRAPPER_INTR_MASK, + VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK); +} + +static void setup_dsp_uc_memmap_vpu5(struct venus_hfi_device *device) +{ + /* initialize DSP QTBL & UCREGION with CPU queues */ + __write_register(device, HFI_DSP_QTBL_ADDR, + (u32)device->iface_q_table.align_device_addr); + __write_register(device, HFI_DSP_UC_REGION_ADDR, + (u32)device->iface_q_table.align_device_addr); + __write_register(device, HFI_DSP_UC_REGION_SIZE, SHARED_QSIZE); + if (device->res->domain_cvp) { + __write_register(device, HFI_DSP_QTBL_ADDR, + (u32)device->dsp_iface_q_table.align_device_addr); + __write_register(device, HFI_DSP_UC_REGION_ADDR, + (u32)device->dsp_iface_q_table.align_device_addr); + __write_register(device, HFI_DSP_UC_REGION_SIZE, + device->dsp_iface_q_table.mem_data.size); + } +} + +static void clock_config_on_enable_vpu5(struct venus_hfi_device *device) +{ + __write_register(device, VIDC_WRAPPER_CPU_CGC_DIS, 0); + __write_register(device, VIDC_WRAPPER_CPU_CLOCK_CONFIG, 0); +} + static int __venus_power_on(struct venus_hfi_device *device) { int rc = 0; - u32 mask_val = 0; + if (device->power_enabled) return 0; @@ -4588,13 +4638,7 @@ static int __venus_power_on(struct venus_hfi_device *device) */ __set_registers(device); - /* All interrupts should be disabled initially 0x1F6 : Reset value */ - mask_val = __read_register(device, VIDC_WRAPPER_INTR_MASK); - - /* Write 0 to unmask CPU and WD interrupts */ - mask_val &= ~(VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK | - VIDC_WRAPPER_INTR_MASK_A2HCPU_BMSK); - __write_register(device, VIDC_WRAPPER_INTR_MASK, mask_val); + call_venus_op(device, interrupt_init, device); device->intr_status = 0; enable_irq(device->hal_data->irq); @@ -4997,6 +5041,14 @@ static int __initialize_packetization(struct venus_hfi_device *device) return rc; } +void __init_venus_ops(struct venus_hfi_device *device) +{ + if (device->res->vpu_ver == VPU_VERSION_4) + device->vpu_ops = &vpu4_ops; + else + device->vpu_ops = &vpu5_ops; +} + static struct venus_hfi_device *__add_device(u32 device_id, struct msm_vidc_platform_resources *res, hfi_cmd_response_callback callback) @@ -5040,6 +5092,8 @@ static struct venus_hfi_device *__add_device(u32 device_id, hdevice->device_id = device_id; hdevice->callback = callback; + __init_venus_ops(hdevice); + hdevice->vidc_workq = create_singlethread_workqueue( "msm_vidc_workerq_venus"); if (!hdevice->vidc_workq) { diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h index ef9e2ec593205880110a5f3bdb57f67a5f2774d8..e14d6f3f4ece0eebd0a032a24cca70c18029c415 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.h +++ b/drivers/media/platform/msm/vidc/venus_hfi.h @@ -206,6 +206,10 @@ struct vidc_iface_q_info { #define venus_hfi_for_each_subcache_reverse(__device, __sinfo) \ venus_hfi_for_each_thing_reverse(__device, __sinfo, subcache) +#define call_venus_op(d, op, args...) \ + (((d) && (d)->vpu_ops && (d)->vpu_ops->op) ? \ + ((d)->vpu_ops->op(args)):0) + /* Internal data used in vidc_hal not exposed to msm_vidc*/ struct hal_data { u32 irq; @@ -230,6 +234,14 @@ enum venus_hfi_state { VENUS_STATE_INIT, }; +struct venus_hfi_device; + +struct venus_hfi_vpu_ops { + void (*interrupt_init)(struct venus_hfi_device *ptr); + void (*setup_dsp_uc_memmap)(struct venus_hfi_device *device); + void (*clock_config_on_enable)(struct venus_hfi_device *device); +}; + struct venus_hfi_device { struct list_head list; struct list_head sess_head; @@ -266,6 +278,7 @@ struct venus_hfi_device { struct pm_qos_request qos; unsigned int skip_pc_count; struct msm_vidc_capability *sys_init_capabilities; + struct venus_hfi_vpu_ops *vpu_ops; }; void venus_hfi_delete_device(void *device); diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 5c690843748d7a4289893062e067d4bccc3f2692..65e1545927db6410c6c6846dc5cb55f8025166f7 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -215,6 +215,7 @@ enum hal_property { HAL_PARAM_SECURE, HAL_PARAM_VENC_HDR10_PQ_SEI, HAL_PARAM_VIDEO_WORK_ROUTE, + HAL_CONFIG_VENC_VBV_HRD_BUF_SIZE, }; enum hal_domain { @@ -1406,6 +1407,10 @@ struct hal_hdr10_pq_sei { struct msm_vidc_content_light_level_sei_payload cll_sei; }; +struct hal_vbv_hdr_buf_size { + u32 vbv_hdr_buf_size; +}; + #define call_hfi_op(q, op, args...) \ (((q) && (q)->op) ? ((q)->op(args)) : 0) diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index 481c7bb61bce9d7820d4236ca62183aff0d156a8..c16a407a48334d00e5fa0f0bb9bfbab261aea82e 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -350,6 +350,8 @@ struct hfi_buffer_info { (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00A) #define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00B) +#define HFI_PROPERTY_CONFIG_VENC_VBV_HRD_BUF_SIZE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00D) #define HFI_PROPERTY_CONFIG_VENC_PERF_MODE \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E) #define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID \ @@ -1117,4 +1119,8 @@ struct hfi_hdr10_pq_sei { struct hfi_content_light_level_sei_payload cll_info; }; +struct hfi_vbv_hdr_buf_size { + u32 vbv_hdr_buf_size; +}; + #endif diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h index 767adb29fe0b302ac7953b03d345b36e0b533098..bc3be97f0250ab1d129f8187a07ff82fd73b0a7c 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h @@ -106,6 +106,7 @@ #define VIDC_WRAPPER_INTR_MASK (VIDC_WRAPPER_BASE_OFFS + 0x10) #define VIDC_WRAPPER_INTR_MASK_A2HWD_BMSK 0x10 #define VIDC_WRAPPER_INTR_MASK_A2HWD_SHFT 0x4 +#define VIDC_WRAPPER_INTR_MASK_A2HVCODEC_BMSK 0x8 #define VIDC_WRAPPER_INTR_MASK_A2HCPU_BMSK 0x4 #define VIDC_WRAPPER_INTR_MASK_A2HCPU_SHFT 0x2 diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c index 25c7a7d42292fca87ebccda4979e5fcded1c2c9a..0f0324a14d515552939cc697b03ab0f0575ec30a 100644 --- a/drivers/media/platform/s3c-camif/camif-capture.c +++ b/drivers/media/platform/s3c-camif/camif-capture.c @@ -1256,16 +1256,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif, { const struct s3c_camif_variant *variant = camif->variant; const struct vp_pix_limits *pix_lim; - int i = ARRAY_SIZE(camif_mbus_formats); + unsigned int i; /* FIXME: constraints against codec or preview path ? */ pix_lim = &variant->vp_pix_limits[VP_CODEC]; - while (i-- >= 0) + for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++) if (camif_mbus_formats[i] == mf->code) break; - mf->code = camif_mbus_formats[i]; + if (i == ARRAY_SIZE(camif_mbus_formats)) + mf->code = camif_mbus_formats[0]; if (pad == CAMIF_SD_PAD_SINK) { v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH, diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c index 34731f71cc0077f4e66bb344c2e6737833e8b44e..8ca9343b6730216b6b4e76b9f86175d7f37bdb87 100644 --- a/drivers/media/platform/vivid/vivid-ctrls.c +++ b/drivers/media/platform/vivid/vivid-ctrls.c @@ -1191,6 +1191,7 @@ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl) v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls); v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls); + dev->radio_rx_dev.device_caps = dev->radio_rx_caps; break; case V4L2_CID_RDS_RECEPTION: dev->radio_rx_rds_enabled = ctrl->val; @@ -1265,6 +1266,7 @@ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl) dev->radio_tx_caps &= ~V4L2_CAP_READWRITE; if (!dev->radio_tx_rds_controls) dev->radio_tx_caps |= V4L2_CAP_READWRITE; + dev->radio_tx_dev.device_caps = dev->radio_tx_caps; break; case V4L2_CID_RDS_TX_PTY: if (dev->radio_rx_rds_controls) diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 4dfbeac8f42c90c97cb63308efbe84e352c035ee..d3cd57f6ba529065e5037abd2fb3c038d1a30051 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -504,6 +504,15 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index) struct vsp1_rwpf *rpf = vsp1->rpf[i]; unsigned int j; + /* + * Make sure we don't accept more inputs than the hardware can + * handle. This is a temporary fix to avoid display stall, we + * need to instead allocate the BRU or BRS to display pipelines + * dynamically based on the number of planes they each use. + */ + if (pipe->num_inputs >= pipe->bru->source_pad) + pipe->inputs[i] = NULL; + if (!pipe->inputs[i]) continue; diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 4c57fd7929cb4c71786956650ebd7ea869401219..11a59854a0a62642c08c8cb95fe8cc1f7cb74f04 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -508,8 +508,10 @@ static struct em28xx_reg_seq plex_px_bcud[] = { }; /* - * 2040:0265 Hauppauge WinTV-dualHD DVB - * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM + * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc + * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk + * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc + * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk * reg 0x80/0x84: * GPIO_0: Yellow LED tuner 1, 0=on, 1=off * GPIO_1: Green LED tuner 1, 0=on, 1=off @@ -2392,7 +2394,8 @@ struct em28xx_board em28xx_boards[] = { .has_dvb = 1, }, /* - * 2040:0265 Hauppauge WinTV-dualHD (DVB version). + * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc. + * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk. * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = { @@ -2407,7 +2410,8 @@ struct em28xx_board em28xx_boards[] = { .leds = hauppauge_dualhd_leds, }, /* - * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM). + * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc. + * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk. * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157 */ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = { @@ -2548,8 +2552,12 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 }, { USB_DEVICE(0x2040, 0x0265), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, + { USB_DEVICE(0x2040, 0x8265), + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB }, { USB_DEVICE(0x2040, 0x026d), .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, + { USB_DEVICE(0x2040, 0x826d), + .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 }, { USB_DEVICE(0x0438, 0xb002), .driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 }, { USB_DEVICE(0x2001, 0xf112), @@ -2610,7 +2618,11 @@ struct usb_device_id em28xx_id_table[] = { .driver_info = EM28178_BOARD_PCTV_461E }, { USB_DEVICE(0x2013, 0x025f), .driver_info = EM28178_BOARD_PCTV_292E }, - { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */ + { USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */ + .driver_info = EM28178_BOARD_PCTV_292E }, + { USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */ + .driver_info = EM28178_BOARD_PCTV_292E }, + { USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */ .driver_info = EM28178_BOARD_PCTV_292E }, { USB_DEVICE(0x0413, 0x6f07), .driver_info = EM2861_BOARD_LEADTEK_VC100 }, diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index 88084f24f0337036d5313e6ab145a18fea553eb7..094e83b6908d11232628a0a94c4d4cf5863898ea 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h @@ -191,7 +191,7 @@ USB 2.0 spec says bulk packet size is always 512 bytes */ #define EM28XX_BULK_PACKET_MULTIPLIER 384 -#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384 +#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94 #define EM28XX_INTERLACED_DEFAULT 1 diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 3a7c80cd1a176d8e7e9eaffff343f56fe48160b6..359fb9804d160426ab037133e2102b979833ecff 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -106,7 +106,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, if (nums[i-1] + 1 != nums[i]) goto fail_map; buf->vaddr = (__force void *) - ioremap_nocache(nums[0] << PAGE_SHIFT, size); + ioremap_nocache(__pfn_to_phys(nums[0]), size + offset); } else { buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1, PAGE_KERNEL); diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 7b3b413689316bdfdf0bf73f1fe7a861c00e7463..cf6ce9f600ca84a099aa9f698aa86109d8bfe91a 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2698,6 +2698,8 @@ mptctl_hp_targetinfo(unsigned long arg) __FILE__, __LINE__, iocnum); return -ENODEV; } + if (karg.hdr.id >= MPT_MAX_FC_DEVICES) + return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", ioc->name)); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index b1afeccbb97f0b107b513d3af45b519aa1426bad..c96dcda1111f69241f3de91f378cafc2fdec54ae 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -365,6 +365,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ +/****** CXL_PSL_DEBUG *****************************************************/ +#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */ + /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/ #define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */ #define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */ @@ -659,6 +662,7 @@ struct cxl_native { irq_hw_number_t err_hwirq; unsigned int err_virq; u64 ps_off; + bool no_data_cache; /* set if no data cache on the card */ const struct cxl_service_layer_ops *sl_ops; }; diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 4a82c313cf71b4235413f2f041f4256d5bf12acb..9c042b0b8c5504e4566009f7eb4554933e88bf4b 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -352,8 +352,17 @@ int cxl_data_cache_flush(struct cxl *adapter) u64 reg; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); - pr_devel("Flushing data cache\n"); + /* + * Do a datacache flush only if datacache is available. + * In case of PSL9D datacache absent hence flush operation. + * would timeout. + */ + if (adapter->native->no_data_cache) { + pr_devel("No PSL data cache. Ignoring cache flush req.\n"); + return 0; + } + pr_devel("Flushing data cache\n"); reg = cxl_p1_read(adapter, CXL_PSL_Control); reg |= CXL_PSL_Control_Fr; cxl_p1_write(adapter, CXL_PSL_Control, reg); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 81093f8157a9e755f0cbfaeb963d04cadb8db587..2b3fd0a5170131dfc92a36e5e71b7e57860bbbd8 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -457,6 +457,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, u64 chipid; u32 phb_index; u64 capp_unit_id; + u64 psl_debug; int rc; rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); @@ -507,6 +508,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, if (cxl_is_power9_dd1()) cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); + /* + * Check if PSL has data-cache. We need to flush adapter datacache + * when as its about to be removed. + */ + psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG); + if (psl_debug & CXL_PSL_DEBUG_CDC) { + dev_dbg(&dev->dev, "No data-cache present\n"); + adapter->native->no_data_cache = true; + } + return 0; } @@ -1450,10 +1461,8 @@ int cxl_pci_reset(struct cxl *adapter) /* * The adapter is about to be reset, so ignore errors. - * Not supported on P9 DD1 */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); /* pcie_warm_reset requests a fundamental pci reset which includes a * PERST assert/deassert. PERST triggers a loading of the image @@ -1898,10 +1907,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) /* * Flush adapter datacache as its about to be removed. - * Not supported on P9 DD1. */ - if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) - cxl_data_cache_flush(adapter); + cxl_data_cache_flush(adapter); cxl_deconfigure_adapter(adapter); diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c index a52dbe95f77205d15e26ae177919da19b22966d6..87f1b338106c55aa6ef5f8c757b2bb45bf07d24e 100644 --- a/drivers/misc/hdcp_qseecom.c +++ b/drivers/misc/hdcp_qseecom.c @@ -137,10 +137,14 @@ ((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF)) #define hdcp2_app_init_var(x) \ - struct hdcp_##x##_req *req_buf = \ - (struct hdcp_##x##_req *) handle->qseecom_handle->sbuf; \ - struct hdcp_##x##_rsp *rsp_buf = \ - (struct hdcp_##x##_rsp *) (handle->qseecom_handle->sbuf + \ + struct hdcp_##x##_req *req_buf = NULL; \ + struct hdcp_##x##_rsp *rsp_buf = NULL; \ + if (!handle->qseecom_handle) { \ + pr_err("invalid qseecom_handle while processing %s\n", #x); \ + return -EINVAL; \ + } \ + req_buf = (struct hdcp_##x##_req *) handle->qseecom_handle->sbuf; \ + rsp_buf = (struct hdcp_##x##_rsp *) (handle->qseecom_handle->sbuf + \ QSEECOM_ALIGN(sizeof(struct hdcp_##x##_req))); \ req_buf->commandid = hdcp_cmd_##x @@ -835,6 +839,7 @@ static int hdcp2_app_session_init(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); + rc = -EINVAL; goto exit; } @@ -867,12 +872,14 @@ static int hdcp2_app_session_deinit(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); + rc = -EINVAL; goto exit; } if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { /* unload library here */ pr_err("session not initialized\n"); + rc = -EINVAL; goto exit; } @@ -896,12 +903,14 @@ static int hdcp2_app_tx_deinit(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); + rc = -EINVAL; goto exit; } if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { /* unload library here */ pr_err("txmtr not initialized\n"); + rc = -EINVAL; goto exit; } @@ -925,11 +934,13 @@ static int hdcp2_app_start_auth(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { pr_err("session not initialized\n"); + rc = -EINVAL; goto exit; } if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { pr_err("txmtr not initialized\n"); + rc = -EINVAL; goto exit; } @@ -983,14 +994,23 @@ static int hdcp2_app_start(struct hdcp2_handle *handle) return rc; } -static void hdcp2_app_stop(struct hdcp2_handle *handle) +static int hdcp2_app_stop(struct hdcp2_handle *handle) { - hdcp2_app_tx_deinit(handle); + int rc = 0; - if (!handle->legacy_app) - hdcp2_app_session_deinit(handle); + rc = hdcp2_app_tx_deinit(handle); + if (rc) + goto end; + + if (!handle->legacy_app) { + rc = hdcp2_app_session_deinit(handle); + if (rc) + goto end; + } - hdcp2_app_unload(handle); + rc = hdcp2_app_unload(handle); +end: + return rc; } static int hdcp2_app_process_msg(struct hdcp2_handle *handle) @@ -1128,11 +1148,14 @@ int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, rc = hdcp2_app_query_stream(handle); break; case HDCP2_CMD_STOP: - hdcp2_app_stop(handle); + rc = hdcp2_app_stop(handle); default: goto exit; } + if (rc) + goto exit; + handle->app_data.request.data = hdcp2_get_recv_buf(handle); app_data->request.data = handle->app_data.request.data; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index b157b530e03755a1f7ee5a524f20d748cbcb3eff..a34bbb100f278b2abf85d78f569d4e4f49c48216 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -3307,7 +3307,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) * supported by the card. */ pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n", - new_req->rq_disk->disk_name); + old_req->rq_disk->disk_name); mmc_host_clear_sdr104(card->host); mmc_suspend_clk_scaling(card->host); mmc_blk_reset(md, card->host, type); diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index 61666d2697713a7665191b848084846142644457..0cfbdb3ab68a65db660142a0cbe54d314bf17d22 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -33,6 +33,8 @@ struct sdhci_iproc_host { const struct sdhci_iproc_data *data; u32 shadow_cmd; u32 shadow_blk; + bool is_cmd_shadowed; + bool is_blk_shadowed; }; #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) @@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg) static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) { - u32 val = sdhci_iproc_readl(host, (reg & ~3)); - u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host); + u32 val; + u16 word; + + if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) { + /* Get the saved transfer mode */ + val = iproc_host->shadow_cmd; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { + /* Get the saved block info */ + val = iproc_host->shadow_blk; + } else { + val = sdhci_iproc_readl(host, (reg & ~3)); + } + word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; return word; } @@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) if (reg == SDHCI_COMMAND) { /* Write the block now as we are issuing a command */ - if (iproc_host->shadow_blk != 0) { + if (iproc_host->is_blk_shadowed) { sdhci_iproc_writel(host, iproc_host->shadow_blk, SDHCI_BLOCK_SIZE); - iproc_host->shadow_blk = 0; + iproc_host->is_blk_shadowed = false; } oldval = iproc_host->shadow_cmd; - } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { + iproc_host->is_cmd_shadowed = false; + } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) && + iproc_host->is_blk_shadowed) { /* Block size and count are stored in shadow reg */ oldval = iproc_host->shadow_blk; } else { @@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg) if (reg == SDHCI_TRANSFER_MODE) { /* Save the transfer mode until the command is issued */ iproc_host->shadow_cmd = newval; + iproc_host->is_cmd_shadowed = true; } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { /* Save the block info until the command is issued */ iproc_host->shadow_blk = newval; + iproc_host->is_blk_shadowed = true; } else { /* Command or other regular 32-bit write */ sdhci_iproc_writel(host, newval, reg & ~3); @@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = { static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, - .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, + .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON, .ops = &sdhci_iproc_32only_ops, }; @@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = { .caps1 = SDHCI_DRIVER_TYPE_C | SDHCI_DRIVER_TYPE_D | SDHCI_SUPPORT_DDR50, - .mmc_caps = MMC_CAP_1_8V_DDR, }; static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6def5445e03e185cdbb92a997bc0a0db95a37b41..9e627e68c4bcd3c1ff28844f7ada1cc1633cf6a6 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -50,6 +50,19 @@ config MTD_MS02NV say M here and read . The module will be called ms02-nv. +config MTD_MSM_QPIC_NAND + tristate "MSM QPIC NAND Device Support" + depends on MTD && (ARCH_QCOM || ARCH_MSM) && !MTD_MSM_NAND + select CRC16 + select BITREVERSE + select MTD_NAND_IDS + default n + help + Support for NAND controller in Qualcomm Technologies, Inc. + Parallel Interface controller (QPIC). This new controller + supports BAM mode and BCH error correction mechanism. Based on the + device capabilities either 4 bit or 8 bit BCH ECC will be used. + config MTD_DATAFLASH tristate "Support for AT45xxx DataFlash" depends on SPI_MASTER diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 94895eab3066cd5c19b4366938cfab3f0d6ce0d0..2c897108e81c27782ac297a165a28a41f065eb4f 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o obj-$(CONFIG_MTD_MTDRAM) += mtdram.o obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o +obj-$(CONFIG_MTD_MSM_QPIC_NAND) += msm_qpic_nand.o obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o obj-$(CONFIG_MTD_M25P80) += m25p80.o obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c new file mode 100644 index 0000000000000000000000000000000000000000..11b6efb55a5d64e6f368d781c9b7454fa747ff30 --- /dev/null +++ b/drivers/mtd/devices/msm_qpic_nand.c @@ -0,0 +1,3609 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_qpic_nand.h" + +#define QPIC_BAM_DEFAULT_IPC_LOGLVL 2 + +/* The driver supports devices upto 4K page */ +#define MAX_CW_PER_PAGE 8 +/* + * Max descriptors needed for erase, read, write operations. + * Usually, this is (2 * MAX_CW_PER_PAGE). + */ +#define MAX_DESC 16 +#define SMEM_AARM_PARTITION_TABLE 9 +#define SMEM_APPS 0 +static bool enable_euclean; + +/* + * Get the DMA memory for requested amount of size. It returns the pointer + * to free memory available from the allocated pool. Returns NULL if there + * is no free memory. + */ +static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size) +{ + uint32_t bitmask, free_bitmask, old_bitmask; + uint32_t need_mask, current_need_mask; + int free_index; + + need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ)) + - 1; + bitmask = atomic_read(&chip->dma_buffer_busy); + free_bitmask = ~bitmask; + if (free_bitmask == 0) + return NULL; + + do { + free_index = __ffs(free_bitmask); + current_need_mask = need_mask << free_index; + + if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >= + MSM_NAND_DMA_BUFFER_SIZE) + return NULL; + + if ((bitmask & current_need_mask) == 0) { + old_bitmask = + atomic_cmpxchg(&chip->dma_buffer_busy, + bitmask, + bitmask | current_need_mask); + if (old_bitmask == bitmask) + return chip->dma_virt_addr + + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ; + free_bitmask = 0;/* force return */ + } + /* current free range was too small, clear all free bits */ + /* below the top busy bit within current_need_mask */ + free_bitmask &= + ~(~0U >> (32 - fls(bitmask & current_need_mask))); + } while (free_bitmask); + + return NULL; +} + +/* + * Releases the DMA memory used to the free pool and also wakes up any user + * thread waiting on wait queue for free memory to be available. + */ +static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip, + void *buffer, size_t size) +{ + int index; + uint32_t used_mask; + + used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ)) + - 1; + index = ((uint8_t *)buffer - chip->dma_virt_addr) / + MSM_NAND_DMA_BUFFER_SLOT_SZ; + atomic_sub(used_mask << index, &chip->dma_buffer_busy); + + wake_up(&chip->dma_wait_queue); +} + +/* + * Calculates page address of the buffer passed, offset of buffer within + * that page and then maps it for DMA by calling dma_map_page(). + */ +static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size, + enum dma_data_direction dir) +{ + struct page *page; + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + if (virt_addr_valid(addr)) + page = virt_to_page(addr); + else { + if (WARN_ON(size + offset > PAGE_SIZE)) + return ~0; + page = vmalloc_to_page(addr); + } + return dma_map_page(dev, page, offset, size, dir); +} + +#ifdef CONFIG_QCOM_BUS_SCALING +static int msm_nand_bus_set_vote(struct msm_nand_info *info, + unsigned int vote) +{ + int ret = 0; + + ret = msm_bus_scale_client_update_request(info->clk_data.client_handle, + vote); + if (ret) + pr_err("msm_bus_scale_client_update_request() failed, bus_client_handle=0x%x, vote=%d, err=%d\n", + info->clk_data.client_handle, vote, ret); + return ret; +} + +static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info, + bool vote) +{ + int ret = 0; + + if (!info->clk_data.rpmh_clk) { + if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) { + ret = -EINVAL; + goto out; + } + } + if (atomic_read(&info->clk_data.clk_enabled) == vote) + goto out; + if (!atomic_read(&info->clk_data.clk_enabled) && vote) { + ret = msm_nand_bus_set_vote(info, 1); + if (ret) { + pr_err("Failed to vote for bus with %d\n", ret); + goto out; + } + if (!info->clk_data.rpmh_clk) { + ret = clk_prepare_enable(info->clk_data.qpic_clk); + if (ret) { + pr_err("Failed to enable the bus-clock with error %d\n", + ret); + msm_nand_bus_set_vote(info, 0); + goto out; + } + } + } else if (atomic_read(&info->clk_data.clk_enabled) && !vote) { + if (!info->clk_data.rpmh_clk) + clk_disable_unprepare(info->clk_data.qpic_clk); + msm_nand_bus_set_vote(info, 0); + } + atomic_set(&info->clk_data.clk_enabled, vote); +out: + return ret; +} +#else +static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info, + bool vote) +{ + return 0; +} +#endif + +#ifdef CONFIG_PM +static int msm_nand_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct msm_nand_info *info = dev_get_drvdata(dev); + + ret = msm_nand_setup_clocks_and_bus_bw(info, false); + + return ret; +} + +static int msm_nand_runtime_resume(struct device *dev) +{ + int ret = 0; + struct msm_nand_info *info = dev_get_drvdata(dev); + + ret = msm_nand_setup_clocks_and_bus_bw(info, true); + + return ret; +} + +static void msm_nand_print_rpm_info(struct device *dev) +{ + pr_err("RPM: runtime_status=%d, usage_count=%d, is_suspended=%d, disable_depth=%d, runtime_error=%d, request_pending=%d, request=%d\n", + dev->power.runtime_status, atomic_read(&dev->power.usage_count), + dev->power.is_suspended, dev->power.disable_depth, + dev->power.runtime_error, dev->power.request_pending, + dev->power.request); +} +#else +static int msm_nand_runtime_suspend(struct device *dev) +{ + return 0; +} + +static int msm_nand_runtime_resume(struct device *dev) +{ + return 0; +} + +static void msm_nand_print_rpm_info(struct device *dev) +{ +} +#endif + +#ifdef CONFIG_PM +static int msm_nand_suspend(struct device *dev) +{ + int ret = 0; + + if (!pm_runtime_suspended(dev)) + ret = msm_nand_runtime_suspend(dev); + + return ret; +} + +static int msm_nand_resume(struct device *dev) +{ + int ret = 0; + + if (!pm_runtime_suspended(dev)) + ret = msm_nand_runtime_resume(dev); + + return ret; +} +#else +static int msm_nand_suspend(struct device *dev) +{ + return 0; +} + +static int msm_nand_resume(struct device *dev) +{ + return 0; +} +#endif + +#ifdef CONFIG_PM +static int msm_nand_get_device(struct device *dev) +{ + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pr_err("Failed to resume with %d\n", ret); + msm_nand_print_rpm_info(dev); + } else { /* Reset to success */ + ret = 0; + } + return ret; +} + +static int msm_nand_put_device(struct device *dev) +{ + int ret = 0; + + pm_runtime_mark_last_busy(dev); + ret = pm_runtime_put_autosuspend(dev); + if (ret < 0) { + pr_err("Failed to suspend with %d\n", ret); + msm_nand_print_rpm_info(dev); + } else { /* Reset to success */ + ret = 0; + } + return ret; +} +#else +static int msm_nand_get_device(struct device *dev) +{ + return 0; +} + +static int msm_nand_put_device(struct device *dev) +{ + return 0; +} +#endif + +#ifdef CONFIG_QCOM_BUS_SCALING +static int msm_nand_bus_register(struct platform_device *pdev, + struct msm_nand_info *info) +{ + int ret = 0; + + info->clk_data.use_cases = msm_bus_cl_get_pdata(pdev); + if (!info->clk_data.use_cases) { + ret = -EINVAL; + pr_err("msm_bus_cl_get_pdata failed\n"); + goto out; + } + info->clk_data.client_handle = + msm_bus_scale_register_client(info->clk_data.use_cases); + if (!info->clk_data.client_handle) { + ret = -EINVAL; + pr_err("msm_bus_scale_register_client failed\n"); + } +out: + return ret; +} + +static void msm_nand_bus_unregister(struct msm_nand_info *info) +{ + if (info->clk_data.client_handle) + msm_bus_scale_unregister_client(info->clk_data.client_handle); +} +#else +static int msm_nand_bus_register(struct platform_device *pdev, + struct msm_nand_info *info) +{ + pr_info("couldn't register due to missing config option\n"); + return 0; +} + +static void msm_nand_bus_unregister(struct msm_nand_info *info) +{ +} +#endif + +/* + * Wrapper function to prepare a single SPS command element with the data + * that is passed to this function. + */ +static inline void msm_nand_prep_ce(struct sps_command_element *ce, + uint32_t addr, uint32_t command, uint32_t data) +{ + ce->addr = addr; + ce->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND : + (uint32_t) SPS_READ_COMMAND; + ce->data = data; + ce->mask = 0xFFFFFFFF; +} + +static int msm_nand_sps_get_iovec(struct sps_pipe *pipe, uint32_t indx, + unsigned int cnt, struct sps_iovec *iovec) +{ + int ret = 0; + + do { + do { + ret = sps_get_iovec((pipe), (iovec)); + } while (((iovec)->addr == 0x0) && ((iovec)->size == 0x0)); + if (ret) + return ret; + } while (--(cnt)); + return ret; +} + +/* + * Wrapper function to prepare a single command descriptor with a single + * SPS command element with the data that is passed to this function. + * + * Since for any command element it is a must to have this flag + * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a + * command element that is passed and thus, the caller need not explicilty + * pass this flag. The other flags must be passed based on the need. If a + * command element doesn't have any other flag, then 0 can be passed to flags. + */ +static inline void msm_nand_prep_single_desc(struct msm_nand_sps_cmd *sps_cmd, + uint32_t addr, uint32_t command, + uint32_t data, uint32_t flags) +{ + msm_nand_prep_ce(&sps_cmd->ce, addr, command, data); + sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags; +} +/* + * Read a single NANDc register as mentioned by its parameter addr. The return + * value indicates whether read is successful or not. The register value read + * is stored in val. + */ +static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr, + uint32_t *val) +{ + int ret = 0, submitted_num_desc = 1; + struct msm_nand_sps_cmd *cmd; + struct msm_nand_chip *chip = &info->nand_chip; + struct { + struct msm_nand_sps_cmd cmd; + uint32_t data; + } *dma_buffer; + struct sps_iovec iovec_temp; + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + cmd = &dma_buffer->cmd; + msm_nand_prep_single_desc(cmd, addr, READ, msm_virt_to_dma(chip, + &dma_buffer->data), SPS_IOVEC_FLAG_INT); + + mutex_lock(&info->lock); + ret = msm_nand_get_device(chip->dev); + if (ret) + goto out; + ret = sps_transfer_one(info->sps.cmd_pipe.handle, + msm_virt_to_dma(chip, &cmd->ce), + sizeof(struct sps_command_element), NULL, cmd->flags); + if (ret) { + pr_err("failed to submit command %x ret %d\n", addr, ret); + msm_nand_put_device(chip->dev); + goto out; + } + ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, submitted_num_desc, + &iovec_temp); + if (ret) { + pr_err("Failed to get iovec for pipe %d: (ret%d)\n", + (info->sps.cmd_pipe.index), ret); + goto out; + } + ret = msm_nand_put_device(chip->dev); + if (ret) + goto out; + *val = dma_buffer->data; +out: + mutex_unlock(&info->lock); + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + return ret; +} + +/* + * Read the Flash ID from the Nand Flash Device. The return value < 0 + * indicates failure. When successful, the Flash ID is stored in parameter + * read_id. + */ +#define READID_CMDS 5 +static int msm_nand_flash_read_id(struct msm_nand_info *info, + bool read_onfi_signature, uint32_t *read_id, + uint32_t *read_id2) +{ + int err = 0, i = 0; + struct msm_nand_sps_cmd *cmd; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + struct msm_nand_chip *chip = &info->nand_chip; + /* + * The following 5 commands are required to read id - + * write commands - addr0, flash, exec + * read_commands - read_id, read_id2 + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[READID_CMDS]; + struct msm_nand_sps_cmd cmd[READID_CMDS]; + uint32_t data[READID_CMDS]; + } *dma_buffer; + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer + (chip, sizeof(*dma_buffer)))); + if (read_onfi_signature) + dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS; + else + dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS; + + dma_buffer->data[1] = EXTENDED_FETCH_ID | MSM_NAND_CMD_FETCH_ID; + dma_buffer->data[2] = 1; + dma_buffer->data[3] = 0xeeeeeeee; + dma_buffer->data[4] = 0xeeeeeeee; + + cmd = dma_buffer->cmd; + msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE, + dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE, + dma_buffer->data[1], 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE, + dma_buffer->data[2], SPS_IOVEC_FLAG_NWD); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID(info), READ, + msm_virt_to_dma(chip, &dma_buffer->data[3]), 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID2(info), READ, + msm_virt_to_dma(chip, &dma_buffer->data[4]), + SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); + cmd++; + + WARN_ON(cmd - dma_buffer->cmd > READID_CMDS); + dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { + iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); + iovec->size = sizeof(struct sps_command_element); + iovec->flags = dma_buffer->cmd[i].flags; + iovec++; + } + + mutex_lock(&info->lock); + err = msm_nand_get_device(chip->dev); + if (err) + goto out; + err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); + if (err) { + pr_err("Failed to submit commands %d\n", err); + msm_nand_put_device(chip->dev); + goto out; + } + err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count, + &iovec_temp); + + if (err) { + pr_err("Failed to get iovec for pipe %d: (err:%d)\n", + (info->sps.cmd_pipe.index), err); + goto out; + } + pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]); + if (!read_onfi_signature) + pr_debug("nandid: %x maker %02x device %02x\n", + dma_buffer->data[3], dma_buffer->data[3] & 0xff, + (dma_buffer->data[3] >> 8) & 0xff); + *read_id = dma_buffer->data[3]; + if (read_id2) { + pr_debug("Extended Read ID register value 0x%x\n", + dma_buffer->data[4]); + *read_id2 = dma_buffer->data[4]; + } + err = msm_nand_put_device(chip->dev); +out: + mutex_unlock(&info->lock); + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + return err; +} + +/* + * Contains data for common configuration registers that must be programmed + * for every NANDc operation. + */ +struct msm_nand_common_cfgs { + uint32_t cmd; + uint32_t addr0; + uint32_t addr1; + uint32_t cfg0; + uint32_t cfg1; +}; + +/* + * Function to prepare SPS command elements to write into NANDc configuration + * registers as per the data defined in struct msm_nand_common_cfgs. This is + * required for the following NANDc operations - Erase, Bad Block checking + * and for reading ONFI parameter page. + */ +static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info, + struct msm_nand_common_cfgs data, + struct msm_nand_sps_cmd **curr_cmd) +{ + struct msm_nand_sps_cmd *cmd; + + cmd = *curr_cmd; + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE, + data.cmd, SPS_IOVEC_FLAG_LOCK); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE, + data.addr0, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR1(info), WRITE, + data.addr1, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, + data.cfg0, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, + data.cfg1, 0); + cmd++; + *curr_cmd = cmd; +} + +/* + * Function to check the CRC integrity check on ONFI parameter page read. + * For ONFI parameter page read, the controller ECC will be disabled. Hence, + * it is mandatory to manually compute CRC and check it against the value + * stored within ONFI page. + */ +static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count) +{ + int i; + uint16_t result; + + for (i = 0; i < count; i++) + buffer[i] = bitrev8(buffer[i]); + + result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count)); + + for (i = 0; i < count; i++) + buffer[i] = bitrev8(buffer[i]); + + return result; +} + +/* + * Structure that contains NANDc register data for commands required + * for reading ONFI parameter page. + */ +struct msm_nand_flash_onfi_data { + struct msm_nand_common_cfgs cfg; + uint32_t exec; + uint32_t ecc_bch_cfg; +}; + +struct version { + uint16_t nand_major; + uint16_t nand_minor; + uint16_t qpic_major; + uint16_t qpic_minor; +}; + +static int msm_nand_version_check(struct msm_nand_info *info, + struct version *nandc_version) +{ + uint32_t qpic_ver = 0, nand_ver = 0; + int err = 0; + + /* Lookup the version to identify supported features */ + err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info), + &nand_ver); + if (err) { + pr_err("Failed to read NAND_VERSION, err=%d\n", err); + goto out; + } + nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >> + MSM_NAND_VERSION_MAJOR_SHIFT; + nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >> + MSM_NAND_VERSION_MINOR_SHIFT; + + err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info), + &qpic_ver); + if (err) { + pr_err("Failed to read QPIC_VERSION, err=%d\n", err); + goto out; + } + nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >> + MSM_NAND_VERSION_MAJOR_SHIFT; + nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >> + MSM_NAND_VERSION_MINOR_SHIFT; + pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n", + nandc_version->nand_major, nandc_version->nand_minor, + nandc_version->qpic_major, nandc_version->qpic_minor); +out: + return err; +} + +/* + * Function to identify whether the attached NAND flash device is + * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter + * page to get the device parameters. + */ +#define ONFI_CMDS 9 +static int msm_nand_flash_onfi_probe(struct msm_nand_info *info) +{ + struct msm_nand_chip *chip = &info->nand_chip; + struct flash_identification *flash = &info->flash_dev; + uint32_t crc_chk_count = 0, page_address = 0; + int ret = 0, i = 0, submitted_num_desc = 1; + + /* SPS parameters */ + struct msm_nand_sps_cmd *cmd, *curr_cmd; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + uint32_t rdata; + + /* ONFI Identifier/Parameter Page parameters */ + uint8_t *onfi_param_info_buf = NULL; + dma_addr_t dma_addr_param_info = 0; + struct onfi_param_page *onfi_param_page_ptr; + struct msm_nand_flash_onfi_data data; + uint32_t onfi_signature = 0; + + /* + * The following 9 commands are required to get onfi parameters - + * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, + * read_loc_0, exec, flash_status (read cmd). + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[ONFI_CMDS]; + struct msm_nand_sps_cmd cmd[ONFI_CMDS]; + uint32_t flash_status; + } *dma_buffer; + + + /* Lookup the version to identify supported features */ + struct version nandc_version = {0}; + + ret = msm_nand_version_check(info, &nandc_version); + if (!ret && !(nandc_version.nand_major == 1 && + nandc_version.nand_minor >= 5 && + nandc_version.qpic_major == 1 && + nandc_version.qpic_minor >= 5)) { + ret = -EPERM; + goto out; + } + wait_event(chip->dma_wait_queue, (onfi_param_info_buf = + msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH))); + dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf); + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer + (chip, sizeof(*dma_buffer)))); + + ret = msm_nand_flash_read_id(info, 1, &onfi_signature, NULL); + if (ret < 0) { + pr_err("Failed to read ONFI signature\n"); + goto free_dma; + } + if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) { + pr_info("Found a non ONFI device\n"); + ret = -EIO; + goto free_dma; + } + + memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data)); + + /* Lookup the partition to which apps has access to */ + for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) { + if (mtd_part[i].name && !strcmp("boot", mtd_part[i].name)) { + page_address = mtd_part[i].offset << 6; + break; + } + } + if (!page_address) { + pr_info("%s: no apps partition found in smem\n", __func__); + ret = -EPERM; + goto free_dma; + } + data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ONFI; + data.exec = 1; + data.cfg.addr0 = (page_address << 16) | + FLASH_READ_ONFI_PARAMETERS_ADDRESS; + data.cfg.addr1 = (page_address >> 16) & 0xFF; + data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO; + data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO; + data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; + dma_buffer->flash_status = 0xeeeeeeee; + + curr_cmd = cmd = dma_buffer->cmd; + msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); + + cmd = curr_cmd; + msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE, + data.ecc_bch_cfg, 0); + cmd++; + + rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31); + msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, + rdata, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE, + data.exec, SPS_IOVEC_FLAG_NWD); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ, + msm_virt_to_dma(chip, &dma_buffer->flash_status), + SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); + cmd++; + + WARN_ON(cmd - dma_buffer->cmd > ONFI_CMDS); + dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { + iovec->addr = msm_virt_to_dma(chip, + &dma_buffer->cmd[i].ce); + iovec->size = sizeof(struct sps_command_element); + iovec->flags = dma_buffer->cmd[i].flags; + iovec++; + } + mutex_lock(&info->lock); + ret = msm_nand_get_device(chip->dev); + if (ret) + goto unlock_mutex; + /* Submit data descriptor */ + ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info, + ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT); + if (ret) { + pr_err("Failed to submit data descriptors %d\n", ret); + goto put_dev; + } + /* Submit command descriptors */ + ret = sps_transfer(info->sps.cmd_pipe.handle, + &dma_buffer->xfer); + if (ret) { + pr_err("Failed to submit commands %d\n", ret); + goto put_dev; + } + + ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count, + &iovec_temp); + + if (ret) { + pr_err("Failed to get iovec for pipe %d: (ret:%d)\n", + (info->sps.cmd_pipe.index), ret); + goto put_dev; + } + ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle, + info->sps.data_prod.index, submitted_num_desc, + &iovec_temp); + if (ret) { + pr_err("Failed to get iovec for pipe %d: (ret:%d)\n", + (info->sps.data_prod.index), ret); + goto put_dev; + } + + ret = msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); + if (ret) + goto free_dma; + + /* Check for flash status errors */ + if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) { + pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status); + ret = -EIO; + goto free_dma; + } + + for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH + / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) { + onfi_param_page_ptr = + (struct onfi_param_page *) + (&(onfi_param_info_buf + [ONFI_PARAM_PAGE_LENGTH * + crc_chk_count])); + if (msm_nand_flash_onfi_crc_check( + (uint8_t *)onfi_param_page_ptr, + ONFI_PARAM_PAGE_LENGTH - 2) == + onfi_param_page_ptr->integrity_crc) { + break; + } + } + if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH + / ONFI_PARAM_PAGE_LENGTH) { + pr_err("CRC Check failed on param page\n"); + ret = -EIO; + goto free_dma; + } + ret = msm_nand_flash_read_id(info, 0, &flash->flash_id, NULL); + if (ret < 0) { + pr_err("Failed to read flash ID\n"); + goto free_dma; + } + flash->widebus = onfi_param_page_ptr->features_supported & 0x01; + flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page; + flash->blksize = onfi_param_page_ptr->number_of_pages_per_block * + flash->pagesize; + flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page; + flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit + * flash->blksize; + flash->ecc_correctability = + onfi_param_page_ptr->number_of_bits_ecc_correctability; + + pr_info("Found an ONFI compliant device %s\n", + onfi_param_page_ptr->device_model); + /* + * Temporary hack for MT29F4G08ABC device. + * Since the device is not properly adhering + * to ONFi specification it is reporting + * as 16 bit device though it is 8 bit device!!! + */ + if (!strcmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC")) + flash->widebus = 0; + goto free_dma; +put_dev: + msm_nand_put_device(chip->dev); +unlock_mutex: + mutex_unlock(&info->lock); +free_dma: + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + msm_nand_release_dma_buffer(chip, onfi_param_info_buf, + ONFI_PARAM_INFO_LENGTH); +out: + return ret; +} + +/* + * Structure that contains read/write parameters required for reading/writing + * from/to a page. + */ +struct msm_nand_rw_params { + uint32_t page; + uint32_t page_count; + uint32_t sectordatasize; + uint32_t sectoroobsize; + uint32_t cwperpage; + uint32_t oob_len_cmd; + uint32_t oob_len_data; + uint32_t start_sector; + uint32_t oob_col; + dma_addr_t data_dma_addr; + dma_addr_t oob_dma_addr; + dma_addr_t ecc_dma_addr; + dma_addr_t data_dma_addr_curr; + dma_addr_t oob_dma_addr_curr; + dma_addr_t ecc_dma_addr_curr; + bool read; +}; + +/* + * Structure that contains NANDc register data required for reading/writing + * from/to a page. + */ +struct msm_nand_rw_reg_data { + uint32_t cmd; + uint32_t addr0; + uint32_t addr1; + uint32_t cfg0; + uint32_t cfg1; + uint32_t ecc_bch_cfg; + uint32_t exec; + uint32_t ecc_cfg; + uint32_t clrfstatus; + uint32_t clrrstatus; +}; + +/* + * Function that validates page read/write MTD parameters received from upper + * layers such as MTD/YAFFS2 and returns error for any unsupported operations + * by the driver. In case of success, it also maps the data and oob buffer + * received for DMA. + */ +static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, + loff_t offset, + struct mtd_oob_ops *ops, + struct msm_nand_rw_params *args) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + int err = 0; + + pr_debug("========================================================\n"); + pr_debug("offset 0x%llx mode %d\ndatbuf 0x%pK datlen 0x%x\n", + offset, ops->mode, ops->datbuf, ops->len); + pr_debug("oobbuf 0x%pK ooblen 0x%x\n", ops->oobbuf, ops->ooblen); + + if (ops->mode == MTD_OPS_PLACE_OOB) { + pr_err("MTD_OPS_PLACE_OOB is not supported\n"); + err = -EINVAL; + goto out; + } + + if (mtd->writesize == PAGE_SIZE_2K) + args->page = offset >> 11; + + if (mtd->writesize == PAGE_SIZE_4K) + args->page = offset >> 12; + + args->oob_len_cmd = ops->ooblen; + args->oob_len_data = ops->ooblen; + args->cwperpage = (mtd->writesize >> 9); + args->read = (read ? true : false); + + if (offset & (mtd->writesize - 1)) { + pr_err("unsupported offset 0x%llx\n", offset); + err = -EINVAL; + goto out; + } + + if (!read && !ops->datbuf) { + pr_err("No data buffer provided for write!!\n"); + err = -EINVAL; + goto out; + } + + if (ops->mode == MTD_OPS_RAW) { + if (!ops->datbuf) { + pr_err("No data buffer provided for RAW mode\n"); + err = -EINVAL; + goto out; + } else if ((ops->len % (mtd->writesize + + mtd->oobsize)) != 0) { + pr_err("unsupported data len %d for RAW mode\n", + ops->len); + err = -EINVAL; + goto out; + } + args->page_count = ops->len / (mtd->writesize + mtd->oobsize); + + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf && (ops->len % mtd->writesize) != 0) { + /* when ops->datbuf is NULL, ops->len can be ooblen */ + pr_err("unsupported data len %d for AUTO mode\n", + ops->len); + err = -EINVAL; + goto out; + } + if (read && ops->oobbuf && !ops->datbuf) { + args->start_sector = args->cwperpage - 1; + args->page_count = ops->ooblen / mtd->oobavail; + if ((args->page_count == 0) && (ops->ooblen)) + args->page_count = 1; + } else if (ops->datbuf) { + args->page_count = ops->len / mtd->writesize; + } + } + + if (ops->datbuf) { + if (read) + memset(ops->datbuf, 0xFF, ops->len); + args->data_dma_addr_curr = args->data_dma_addr = + msm_nand_dma_map(chip->dev, ops->datbuf, ops->len, + (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); + if (dma_mapping_error(chip->dev, args->data_dma_addr)) { + pr_err("dma mapping failed for 0x%pK\n", ops->datbuf); + err = -EIO; + goto out; + } + } + if (ops->oobbuf) { + if (read) + memset(ops->oobbuf, 0xFF, ops->ooblen); + args->oob_dma_addr_curr = args->oob_dma_addr = + msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen, + (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE)); + if (dma_mapping_error(chip->dev, args->oob_dma_addr)) { + pr_err("dma mapping failed for 0x%pK\n", ops->oobbuf); + err = -EIO; + goto dma_map_oobbuf_failed; + } + } + goto out; +dma_map_oobbuf_failed: + if (ops->datbuf) + dma_unmap_page(chip->dev, args->data_dma_addr, ops->len, + (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); +out: + return err; +} + +/* + * Function that updates NANDc register data (struct msm_nand_rw_reg_data) + * required for page read/write. + */ +static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, + struct mtd_oob_ops *ops, + struct msm_nand_rw_params *args, + struct msm_nand_rw_reg_data *data) +{ + if (args->read) { + if (ops->mode != MTD_OPS_RAW) { + data->cmd = MSM_NAND_CMD_PAGE_READ_ECC; + data->cfg0 = + (chip->cfg0 & ~(7U << CW_PER_PAGE)) | + (((args->cwperpage-1) - args->start_sector) + << CW_PER_PAGE); + data->cfg1 = chip->cfg1; + data->ecc_bch_cfg = chip->ecc_bch_cfg; + } else { + data->cmd = MSM_NAND_CMD_PAGE_READ_ALL; + data->cfg0 = + (chip->cfg0_raw & ~(7U << CW_PER_PAGE)) | + (((args->cwperpage-1) - args->start_sector) + << CW_PER_PAGE); + data->cfg1 = chip->cfg1_raw; + data->ecc_bch_cfg = chip->ecc_cfg_raw; + } + + } else { + if (ops->mode != MTD_OPS_RAW) { + data->cmd = MSM_NAND_CMD_PRG_PAGE; + data->cfg0 = chip->cfg0; + data->cfg1 = chip->cfg1; + data->ecc_bch_cfg = chip->ecc_bch_cfg; + } else { + data->cmd = MSM_NAND_CMD_PRG_PAGE_ALL; + data->cfg0 = chip->cfg0_raw; + data->cfg1 = chip->cfg1_raw; + data->ecc_bch_cfg = chip->ecc_cfg_raw; + } + data->clrfstatus = MSM_NAND_RESET_FLASH_STS; + data->clrrstatus = MSM_NAND_RESET_READ_STS; + } + data->exec = 1; + data->ecc_cfg = chip->ecc_buf_cfg; +} + +/* + * Function to prepare series of SPS command descriptors required for a page + * read/write operation. + */ +static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, + struct msm_nand_rw_params *args, + struct msm_nand_rw_reg_data *data, + struct msm_nand_info *info, + uint32_t curr_cw, + struct msm_nand_rw_cmd_desc *cmd_list, + uint32_t *cw_desc_cnt, + uint32_t ecc_parity_bytes) +{ + struct msm_nand_chip *chip = &info->nand_chip; + uint32_t rdata; + /* read_location register parameters */ + uint32_t offset, size, last_read; + struct sps_command_element *curr_ce, *start_ce; + uint32_t *flags_ptr, *num_ce_ptr; + + if (curr_cw == args->start_sector) { + curr_ce = start_ce = &cmd_list->setup_desc.ce[0]; + num_ce_ptr = &cmd_list->setup_desc.num_ce; + flags_ptr = &cmd_list->setup_desc.flags; + *flags_ptr = CMD_LCK; + cmd_list->count = 1; + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_CMD(info), WRITE, + data->cmd); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR0(info), WRITE, + data->addr0); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR1(info), WRITE, + data->addr1); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG0(info), WRITE, + data->cfg0); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG1(info), WRITE, + data->cfg1); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_ECC_CFG(info), WRITE, + data->ecc_bch_cfg); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_EBI2_ECC_BUF_CFG(info), + WRITE, data->ecc_cfg); + curr_ce++; + + if (!args->read) { + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info), + WRITE, data->clrfstatus); + curr_ce++; + goto sub_exec_cmd; + } else { + msm_nand_prep_ce(curr_ce, + MSM_NAND_ERASED_CW_DETECT_CFG(info), + WRITE, CLR_ERASED_PAGE_DET); + curr_ce++; + msm_nand_prep_ce(curr_ce, + MSM_NAND_ERASED_CW_DETECT_CFG(info), + WRITE, SET_ERASED_PAGE_DET); + curr_ce++; + } + } else { + curr_ce = start_ce = &cmd_list->cw_desc[*cw_desc_cnt].ce[0]; + num_ce_ptr = &cmd_list->cw_desc[*cw_desc_cnt].num_ce; + flags_ptr = &cmd_list->cw_desc[*cw_desc_cnt].flags; + *cw_desc_cnt += 1; + *flags_ptr = CMD; + cmd_list->count++; + } + if (!args->read) + goto sub_exec_cmd; + + if (ops->mode == MTD_OPS_RAW) { + if (ecc_parity_bytes) { + rdata = (BYTES_517 << 0) | (ecc_parity_bytes << 16) + | (1 << 31); + msm_nand_prep_ce(curr_ce, + MSM_NAND_READ_LOCATION_0(info), + WRITE, rdata); + curr_ce++; + } else { + rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31); + msm_nand_prep_ce(curr_ce, + MSM_NAND_READ_LOCATION_0(info), + WRITE, rdata); + curr_ce++; + } + } + if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) { + offset = 0; + size = (curr_cw < (args->cwperpage - 1)) ? 516 : + (512 - ((args->cwperpage - 1) << 2)); + last_read = (curr_cw < (args->cwperpage - 1)) ? 1 : + (ops->oobbuf ? 0 : 1); + rdata = (offset << 0) | (size << 16) | + (last_read << 31); + + msm_nand_prep_ce(curr_ce, + MSM_NAND_READ_LOCATION_0(info), + WRITE, + rdata); + curr_ce++; + } + if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) { + offset = 512 - ((args->cwperpage - 1) << 2); + size = (args->cwperpage) << 2; + if (size > args->oob_len_cmd) + size = args->oob_len_cmd; + args->oob_len_cmd -= size; + last_read = 1; + rdata = (offset << 0) | (size << 16) | + (last_read << 31); + + if (!ops->datbuf) + msm_nand_prep_ce(curr_ce, + MSM_NAND_READ_LOCATION_0(info), + WRITE, rdata); + else + msm_nand_prep_ce(curr_ce, + MSM_NAND_READ_LOCATION_1(info), + WRITE, rdata); + curr_ce++; + } + } +sub_exec_cmd: + *flags_ptr |= NWD; + msm_nand_prep_ce(curr_ce, MSM_NAND_EXEC_CMD(info), WRITE, data->exec); + curr_ce++; + + *num_ce_ptr = curr_ce - start_ce; +} + +/* + * Function to prepare and submit SPS data descriptors required for a page + * read/write operation. + */ +static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, + struct msm_nand_rw_params *args, + struct msm_nand_info *info, + uint32_t curr_cw, + uint32_t ecc_parity_bytes) +{ + struct msm_nand_chip *chip = &info->nand_chip; + struct sps_pipe *data_pipe_handle; + uint32_t sectordatasize, sectoroobsize; + uint32_t sps_flags = 0; + int err = 0; + + if (args->read) + data_pipe_handle = info->sps.data_prod.handle; + else + data_pipe_handle = info->sps.data_cons.handle; + + if (ops->mode == MTD_OPS_RAW) { + if (ecc_parity_bytes && args->read) { + if (curr_cw == (args->cwperpage - 1)) + sps_flags |= SPS_IOVEC_FLAG_INT; + + /* read only ecc bytes */ + err = sps_transfer_one(data_pipe_handle, + args->ecc_dma_addr_curr, + ecc_parity_bytes, NULL, + sps_flags); + if (err) + goto out; + args->ecc_dma_addr_curr += ecc_parity_bytes; + } else { + sectordatasize = chip->cw_size; + if (!args->read) + sps_flags = SPS_IOVEC_FLAG_EOT; + if (curr_cw == (args->cwperpage - 1)) + sps_flags |= SPS_IOVEC_FLAG_INT; + + err = sps_transfer_one(data_pipe_handle, + args->data_dma_addr_curr, + sectordatasize, NULL, + sps_flags); + if (err) + goto out; + args->data_dma_addr_curr += sectordatasize; + } + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) { + sectordatasize = (curr_cw < (args->cwperpage - 1)) + ? 516 : (512 - ((args->cwperpage - 1) << 2)); + + if (!args->read) { + sps_flags = SPS_IOVEC_FLAG_EOT; + if (curr_cw == (args->cwperpage - 1) && + ops->oobbuf) + sps_flags = 0; + } + if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf) + sps_flags |= SPS_IOVEC_FLAG_INT; + + err = sps_transfer_one(data_pipe_handle, + args->data_dma_addr_curr, + sectordatasize, NULL, + sps_flags); + if (err) + goto out; + args->data_dma_addr_curr += sectordatasize; + } + + if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) { + sectoroobsize = args->cwperpage << 2; + if (sectoroobsize > args->oob_len_data) + sectoroobsize = args->oob_len_data; + + if (!args->read) + sps_flags |= SPS_IOVEC_FLAG_EOT; + sps_flags |= SPS_IOVEC_FLAG_INT; + err = sps_transfer_one(data_pipe_handle, + args->oob_dma_addr_curr, + sectoroobsize, NULL, + sps_flags); + if (err) + goto out; + args->oob_dma_addr_curr += sectoroobsize; + args->oob_len_data -= sectoroobsize; + } + } +out: + return err; +} + +/* + * Read ECC bytes and check whether page is erased or not. + * + * The NAND devices manufactured with newer process node technology are + * susceptible to bit-flips. These bit-flips are easily fixable with the + * ECC engine and ECC information stored on the NAND device. This device + * specific information is found in the data sheet for the NAND device + * and is usually specified as a "number of bit-flips expected per code- + * word". For example, "a single bit-flip per codeword". Also this means + * that the number of ECC errors don't increase over period of time as in + * the past and can't be used to predict a "bad-block about to happen" + * situation anymore. + * + * So what this means to erased pages: + * Since ECC data for an erased page is all 0xFF's, the ECC engine would + * not be able to correct any bit-flips that occur in these newer parts. + * If the NAND controller is unable to identify the erased page due to + * the bit-flips, then there would be "uncorrectable ECC errors" detected + * and would get reported to file system layer (YAFFS2/UBIFS etc) and would + * result in a good block being marked as a bad block and also lead to + * error scenarios. + + * So to handle this, the following will be done by software until newer + * NAND controller hardware is avialable that can detected erased pages + * with bit-flips successfully. + * + * 1. msm_nand_read_oob() calls this function when "uncorrectable ECC + * errors" occur. + * 2. This function then performs a raw read of the page. + * 3. This read is done to extract ECC bytes and not data from that page. + * 4. For each codeword’s ECC data, the following is done + * a. Count number of zero bits + * b. If that count is greater than , then it is + * not an erased page. + * c. Else repeat for next codeword’s ECC data + * d. If all codewords have less than bits of + * zeros, then it’s considered an erased page. + * + * Since "uncorrectable ECC errors" do not occur except for either an + * erased page or in the case of an actual errror, this solution would + * work. + * + */ +static int msm_nand_is_erased_page(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops, + struct msm_nand_rw_params *rw_params, + bool *erased_page) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + uint32_t cwperpage = (mtd->writesize >> 9); + int err, submitted_num_desc = 0; + uint32_t n = 0, num_zero_bits = 0, total_ecc_byte_cnt; + struct msm_nand_rw_reg_data data; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + struct mtd_oob_ops raw_ops; + + /* + * The following 6 commands will be sent only once for the first + * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1, + * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will + * be sent for every CW - flash, read_location_0, read_location_1, + * exec, flash_status and buffer_status. + */ + struct msm_nand_rw_cmd_desc *cmd_list = NULL; + uint32_t cw_desc_cnt = 0; + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[MAX_DESC]; + struct { + uint32_t count; + struct msm_nand_cmd_setup_desc setup_desc; + struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1]; + } cmd_list; + struct { + uint32_t flash_status; + uint32_t buffer_status; + uint32_t erased_cw_status; + } result[MAX_CW_PER_PAGE]; + } *dma_buffer; + uint8_t *ecc; + + pr_debug("========================================================\n"); + total_ecc_byte_cnt = (chip->ecc_parity_bytes * cwperpage); + memcpy(&raw_ops, ops, sizeof(struct mtd_oob_ops)); + raw_ops.mode = MTD_OPS_RAW; + ecc = kzalloc(total_ecc_byte_cnt, GFP_KERNEL); + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + memset(&data, 0, sizeof(struct msm_nand_rw_reg_data)); + msm_nand_update_rw_reg_data(chip, &raw_ops, rw_params, &data); + cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list; + + /* map the ecc for dma operations */ + rw_params->ecc_dma_addr_curr = rw_params->ecc_dma_addr = + dma_map_single(chip->dev, ecc, total_ecc_byte_cnt, + DMA_FROM_DEVICE); + + data.addr0 = (rw_params->page << 16) | rw_params->oob_col; + data.addr1 = (rw_params->page >> 16) & 0xff; + for (n = rw_params->start_sector; n < cwperpage; n++) { + struct sps_command_element *curr_ce, *start_ce; + + dma_buffer->result[n].flash_status = 0xeeeeeeee; + dma_buffer->result[n].buffer_status = 0xeeeeeeee; + dma_buffer->result[n].erased_cw_status = 0xeeeeee00; + + msm_nand_prep_rw_cmd_desc(&raw_ops, rw_params, &data, info, + n, cmd_list, &cw_desc_cnt, + chip->ecc_parity_bytes); + + start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0]; + curr_ce = start_ce; + cmd_list->cw_desc[cw_desc_cnt].flags = CMD; + if (n == (cwperpage - 1)) + cmd_list->cw_desc[cw_desc_cnt].flags |= + INT_UNLCK; + cmd_list->count++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].flash_status)); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].buffer_status)); + curr_ce++; + + msm_nand_prep_ce(curr_ce, + MSM_NAND_ERASED_CW_DETECT_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].erased_cw_status)); + curr_ce++; + cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - + start_ce; + } + + dma_buffer->xfer.iovec_count = cmd_list->count; + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->setup_desc.ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->setup_desc.num_ce; + iovec->flags = cmd_list->setup_desc.flags; + iovec++; + for (n = 0; n < (cmd_list->count - 1); n++) { + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->cw_desc[n].ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->cw_desc[n].num_ce; + iovec->flags = cmd_list->cw_desc[n].flags; + iovec++; + } + mutex_lock(&info->lock); + err = msm_nand_get_device(chip->dev); + if (err) + goto unlock_mutex; + /* Submit data descriptors */ + for (n = rw_params->start_sector; n < cwperpage; n++) { + err = msm_nand_submit_rw_data_desc(&raw_ops, + rw_params, info, n, + chip->ecc_parity_bytes); + if (err) { + pr_err("Failed to submit data descs %d\n", err); + panic("error in nand driver\n"); + goto put_dev; + } + } + submitted_num_desc = cwperpage - rw_params->start_sector; + + /* Submit command descriptors */ + err = sps_transfer(info->sps.cmd_pipe.handle, + &dma_buffer->xfer); + if (err) { + pr_err("Failed to submit commands %d\n", err); + goto put_dev; + } + + err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, + dma_buffer->xfer.iovec_count, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d: (err:%d)\n", + (info->sps.cmd_pipe.index), err); + goto put_dev; + } + err = msm_nand_sps_get_iovec(info->sps.data_prod.handle, + info->sps.data_prod.index, submitted_num_desc, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d: (err:%d)\n", + (info->sps.data_prod.index), err); + goto put_dev; + } + + err = msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); + if (err) + goto free_dma; + + pr_debug("addr0: 0x%08x, addr1: 0x%08x\n", data.addr0, data.addr1); + for (n = rw_params->start_sector; n < cwperpage; n++) + pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x\n", + n, dma_buffer->result[n].flash_status, + dma_buffer->result[n].buffer_status, + dma_buffer->result[n].erased_cw_status); + + goto free_dma; +put_dev: + msm_nand_put_device(chip->dev); +unlock_mutex: + mutex_unlock(&info->lock); +free_dma: + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + /* umap ecc dma memory */ + dma_unmap_single(chip->dev, rw_params->ecc_dma_addr, + total_ecc_byte_cnt, DMA_FROM_DEVICE); + /* check for bit flips in ecc data */ + for (n = rw_params->start_sector; n < cwperpage; n++) { + uint8_t *ecc_temp = ecc; + int last_pos = 0, next_pos = 0; + int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8); + + do { + last_pos = find_next_zero_bit((void *)ecc_temp, + ecc_bytes_percw_in_bits, next_pos); + + if (last_pos < ecc_bytes_percw_in_bits) + num_zero_bits++; + + if (num_zero_bits > 4) { + *erased_page = false; + goto free_mem; + } + + next_pos = last_pos + 1; + } while (last_pos < ecc_bytes_percw_in_bits); + + num_zero_bits = last_pos = next_pos = 0; + ecc_temp += chip->ecc_parity_bytes; + } + + if ((n == cwperpage) && (num_zero_bits <= 4)) + *erased_page = true; +free_mem: + kfree(ecc); + pr_debug("========================================================\n"); + return err; +} + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to read a + * page with main or/and spare data. + */ +static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + struct flash_identification *flash_dev = &info->flash_dev; + uint32_t cwperpage = (mtd->writesize >> 9); + int err, pageerr = 0, rawerr = 0, submitted_num_desc = 0; + uint32_t n = 0, pages_read = 0; + uint32_t ecc_errors = 0, total_ecc_errors = 0, ecc_capability; + struct msm_nand_rw_params rw_params; + struct msm_nand_rw_reg_data data; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + bool erased_page; + uint64_t fix_data_in_pages = 0; + + /* + * The following 6 commands will be sent only once for the first + * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1, + * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will + * be sent for every CW - flash, read_location_0, read_location_1, + * exec, flash_status and buffer_status. + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[MAX_DESC]; + struct { + uint32_t count; + struct msm_nand_cmd_setup_desc setup_desc; + struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1]; + } cmd_list; + struct { + uint32_t flash_status; + uint32_t buffer_status; + uint32_t erased_cw_status; + } result[MAX_CW_PER_PAGE]; + } *dma_buffer; + struct msm_nand_rw_cmd_desc *cmd_list = NULL; + + memset(&rw_params, 0, sizeof(struct msm_nand_rw_params)); + err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params); + if (err) + goto validate_mtd_params_failed; + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + + rw_params.oob_col = rw_params.start_sector * chip->cw_size; + if (chip->cfg1 & (1 << WIDE_FLASH)) + rw_params.oob_col >>= 1; + + memset(&data, 0, sizeof(struct msm_nand_rw_reg_data)); + msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data); + cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list; + + ecc_capability = flash_dev->ecc_capability; + + while (rw_params.page_count-- > 0) { + uint32_t cw_desc_cnt = 0; + + erased_page = false; + data.addr0 = (rw_params.page << 16) | rw_params.oob_col; + data.addr1 = (rw_params.page >> 16) & 0xff; + + for (n = rw_params.start_sector; n < cwperpage; n++) { + struct sps_command_element *curr_ce, *start_ce; + + dma_buffer->result[n].flash_status = 0xeeeeeeee; + dma_buffer->result[n].buffer_status = 0xeeeeeeee; + dma_buffer->result[n].erased_cw_status = 0xeeeeee00; + + msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info, + n, cmd_list, &cw_desc_cnt, 0); + + start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0]; + curr_ce = start_ce; + cmd_list->cw_desc[cw_desc_cnt].flags = CMD; + if (n == (cwperpage - 1)) + cmd_list->cw_desc[cw_desc_cnt].flags |= + INT_UNLCK; + cmd_list->count++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].flash_status)); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].buffer_status)); + curr_ce++; + + msm_nand_prep_ce(curr_ce, + MSM_NAND_ERASED_CW_DETECT_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->result[n].erased_cw_status)); + curr_ce++; + cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - + start_ce; + } + + dma_buffer->xfer.iovec_count = cmd_list->count; + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->setup_desc.ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->setup_desc.num_ce; + iovec->flags = cmd_list->setup_desc.flags; + iovec++; + for (n = 0; n < (cmd_list->count - 1); n++) { + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->cw_desc[n].ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->cw_desc[n].num_ce; + iovec->flags = cmd_list->cw_desc[n].flags; + iovec++; + } + mutex_lock(&info->lock); + err = msm_nand_get_device(chip->dev); + if (err) + goto unlock_mutex; + /* Submit data descriptors */ + for (n = rw_params.start_sector; n < cwperpage; n++) { + err = msm_nand_submit_rw_data_desc(ops, + &rw_params, info, n, 0); + if (err) { + pr_err("Failed to submit data descs %d\n", err); + panic("error in nand driver\n"); + goto put_dev; + } + } + + if (ops->mode == MTD_OPS_RAW) { + submitted_num_desc = cwperpage - rw_params.start_sector; + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) + submitted_num_desc = cwperpage - + rw_params.start_sector; + if (ops->oobbuf) + submitted_num_desc++; + } + + /* Submit command descriptors */ + err = sps_transfer(info->sps.cmd_pipe.handle, + &dma_buffer->xfer); + if (err) { + pr_err("Failed to submit commands %d\n", err); + goto put_dev; + } + + err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, + dma_buffer->xfer.iovec_count, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d: (err: %d)\n", + (info->sps.cmd_pipe.index), err); + goto put_dev; + } + err = msm_nand_sps_get_iovec(info->sps.data_prod.handle, + info->sps.data_prod.index, submitted_num_desc, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d: (err: %d)\n", + (info->sps.data_prod.index), err); + goto put_dev; + } + + err = msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); + if (err) + goto free_dma; + /* Check for flash status errors */ + pageerr = rawerr = 0; + for (n = rw_params.start_sector; n < cwperpage; n++) { + if (dma_buffer->result[n].flash_status & (FS_OP_ERR | + FS_MPU_ERR)) { + rawerr = -EIO; + /* + * Check if ECC error was due to an erased + * codeword. If so, ignore the error. + * + * NOTE: There is a bug in erased page + * detection hardware block when reading + * only spare data. In order to work around + * this issue, instead of using PAGE_ALL_ERASED + * bit to check for whether a whole page is + * erased or not, we use CODEWORD_ALL_ERASED + * and CODEWORD_ERASED bits together and check + * each codeword that has FP_OP_ERR bit set is + * an erased codeword or not. + */ + if ((dma_buffer->result[n].erased_cw_status & + ERASED_CW) == ERASED_CW) { + /* + * At least one code word is detected + * as an erased code word. + */ + pr_debug("erased codeword detected - ignore ecc error\n"); + continue; + } + pageerr = rawerr; + break; + } + } + /* check for uncorrectable errors */ + if (pageerr) { + for (n = rw_params.start_sector; n < cwperpage; n++) { + if (dma_buffer->result[n].buffer_status & + BS_UNCORRECTABLE_BIT) { + /* + * Check if page is actually + * erased or not. + */ + err = msm_nand_is_erased_page(mtd, + from, ops, + &rw_params, + &erased_page); + if (err) + goto free_dma; + if (!erased_page) { + mtd->ecc_stats.failed++; + pageerr = -EBADMSG; + break; + } + pageerr = 0; + pr_debug("Uncorrectable ECC errors detected on an erased page and has been fixed.\n"); + break; + } + } + } + + if (rawerr && !pageerr && erased_page) { + /* + * This means an erased page had bit flips and now + * those bit-flips need to be cleared in the data + * being sent to upper layers. This will keep track + * of those pages and at the end, the data will be + * fixed before this function returns. + * Note that a whole page worth of data will be fixed + * and this will only handle about 64 pages being read + * at a time i.e. one erase block worth of pages. + */ + fix_data_in_pages |= BIT(rw_params.page_count); + } + /* check for correctable errors */ + if (!rawerr) { + for (n = rw_params.start_sector; n < cwperpage; n++) { + ecc_errors = + dma_buffer->result[n].buffer_status + & BS_CORRECTABLE_ERR_MSK; + if (ecc_errors) { + total_ecc_errors += ecc_errors; + mtd->ecc_stats.corrected += ecc_errors; + /* + * Since the nand device can have the + * ecc errors even on the first ever + * write. Any reporting of EUCLEAN + * when there are less then the ecc + * capability of the device is not + * useful. + * + * Also don't report EUCLEAN unless + * the enable_euclean is set. + */ + if (enable_euclean && + ecc_errors >= ecc_capability) + pageerr = -EUCLEAN; + } + } + } + if (pageerr && (pageerr != -EUCLEAN || err == 0)) + err = pageerr; + + if (rawerr && !pageerr) { + pr_debug("%llx %x %x empty page\n", + (loff_t)rw_params.page * mtd->writesize, + ops->len, ops->ooblen); + } else { + for (n = rw_params.start_sector; n < cwperpage; n++) + pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x, pageerr: %d, rawerr: %d\n", + n, dma_buffer->result[n].flash_status, + dma_buffer->result[n].buffer_status, + dma_buffer->result[n].erased_cw_status, + pageerr, rawerr); + } + if (err && err != -EUCLEAN && err != -EBADMSG) + goto free_dma; + pages_read++; + rw_params.page++; + } + goto free_dma; +put_dev: + msm_nand_put_device(chip->dev); +unlock_mutex: + mutex_unlock(&info->lock); +free_dma: + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + if (ops->oobbuf) + dma_unmap_page(chip->dev, rw_params.oob_dma_addr, + ops->ooblen, DMA_FROM_DEVICE); + if (ops->datbuf) + dma_unmap_page(chip->dev, rw_params.data_dma_addr, + ops->len, DMA_BIDIRECTIONAL); + /* + * If there were any erased pages detected with ECC errors, then + * it is most likely that the data is not all 0xff. So memset that + * page to all 0xff. + */ + while (fix_data_in_pages) { + int temp_page = 0, oobsize = rw_params.cwperpage << 2; + int count = 0, offset = 0; + + temp_page = fix_data_in_pages & BIT_MASK(0); + fix_data_in_pages = fix_data_in_pages >> 1; + count++; + + if (!temp_page) + continue; + + offset = (count - 1) * mtd->writesize; + if (ops->datbuf) + memset((ops->datbuf + offset), 0xff, mtd->writesize); + + offset = (count - 1) * oobsize; + if (ops->oobbuf) + memset(ops->oobbuf + offset, 0xff, oobsize); + } +validate_mtd_params_failed: + if (ops->mode != MTD_OPS_RAW) + ops->retlen = mtd->writesize * pages_read; + else + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read; + ops->oobretlen = ops->ooblen - rw_params.oob_len_data; + if (err) + pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n", + from, ops->datbuf ? ops->len : 0, ops->ooblen, err, + total_ecc_errors); + pr_debug("ret %d, retlen %d oobretlen %d\n", + err, ops->retlen, ops->oobretlen); + + pr_debug("========================================================\n"); + return err; +} + +/** + * msm_nand_read_partial_page() - read partial page + * @mtd: pointer to mtd info + * @from: start address of the page + * @ops: pointer to mtd_oob_ops + * + * Reads a page into a bounce buffer and copies the required + * number of bytes to actual buffer. The pages that are aligned + * do not use bounce buffer. + */ +static int msm_nand_read_partial_page(struct mtd_info *mtd, + loff_t from, struct mtd_oob_ops *ops) +{ + int err = 0; + unsigned char *actual_buf; + unsigned char *bounce_buf; + loff_t aligned_from; + loff_t offset; + size_t len; + size_t actual_len, ret_len; + int is_euclean = 0; + int is_ebadmsg = 0; + + actual_len = ops->len; + ret_len = 0; + actual_buf = ops->datbuf; + + bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL); + if (!bounce_buf) { + err = -ENOMEM; + goto out; + } + + /* Get start address of page to read from */ + ops->len = mtd->writesize; + offset = from & (mtd->writesize - 1); + aligned_from = from - offset; + + for (;;) { + bool no_copy = false; + + len = mtd->writesize - offset; + if (len > actual_len) + len = actual_len; + + if (offset == 0 && len == mtd->writesize) + no_copy = true; + + if (!virt_addr_valid(actual_buf) && + !is_buffer_in_page(actual_buf, ops->len)) + no_copy = false; + + ops->datbuf = no_copy ? actual_buf : bounce_buf; + err = msm_nand_read_oob(mtd, aligned_from, ops); + if (err == -EUCLEAN) { + is_euclean = 1; + err = 0; + } + + if (err == -EBADMSG) { + is_ebadmsg = 1; + err = 0; + } + + if (err < 0) { + /* Clear previously set EUCLEAN / EBADMSG */ + is_euclean = 0; + is_ebadmsg = 0; + ret_len = ops->retlen; + break; + } + + if (!no_copy) + memcpy(actual_buf, bounce_buf + offset, len); + + actual_len -= len; + ret_len += len; + + if (actual_len == 0) + break; + + actual_buf += len; + offset = 0; + aligned_from += mtd->writesize; + } + + ops->retlen = ret_len; + kfree(bounce_buf); +out: + if (is_euclean == 1) + err = -EUCLEAN; + + /* Snub EUCLEAN if we also have EBADMSG */ + if (is_ebadmsg == 1) + err = -EBADMSG; + return err; +} + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to read a + * page with only main data. + */ +static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + int ret; + int is_euclean = 0; + int is_ebadmsg = 0; + struct mtd_oob_ops ops; + unsigned char *bounce_buf = NULL; + + ops.mode = MTD_OPS_AUTO_OOB; + ops.retlen = 0; + ops.ooblen = 0; + ops.oobbuf = NULL; + *retlen = 0; + + if (!(from & (mtd->writesize - 1)) && !(len % mtd->writesize)) { + /* + * Handle reading of large size read buffer in vmalloc + * address space that does not fit in an MMU page. + */ + if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) { + ops.len = mtd->writesize; + + bounce_buf = kmalloc(ops.len, GFP_KERNEL); + if (!bounce_buf) { + ret = -ENOMEM; + goto out; + } + + for (;;) { + bool no_copy = false; + + if (!is_buffer_in_page(buf, ops.len)) { + memcpy(bounce_buf, buf, ops.len); + ops.datbuf = (uint8_t *) bounce_buf; + } else { + ops.datbuf = (uint8_t *) buf; + no_copy = true; + } + ret = msm_nand_read_oob(mtd, from, &ops); + if (ret == -EUCLEAN) { + is_euclean = 1; + ret = 0; + } + if (ret == -EBADMSG) { + is_ebadmsg = 1; + ret = 0; + } + if (ret < 0) { + /* Clear previously set errors */ + is_euclean = 0; + is_ebadmsg = 0; + break; + } + + + if (!no_copy) + memcpy(buf, bounce_buf, ops.retlen); + + len -= ops.retlen; + *retlen += ops.retlen; + if (len == 0) + break; + buf += ops.retlen; + from += ops.retlen; + + if (len < mtd->writesize) { + ops.len = len; + ops.datbuf = buf; + ret = msm_nand_read_partial_page( + mtd, from, &ops); + *retlen += ops.retlen; + break; + } + } + kfree(bounce_buf); + } else { + ops.len = len; + ops.datbuf = (uint8_t *)buf; + ret = msm_nand_read_oob(mtd, from, &ops); + *retlen = ops.retlen; + } + } else { + ops.len = len; + ops.datbuf = (uint8_t *)buf; + ret = msm_nand_read_partial_page(mtd, from, &ops); + *retlen = ops.retlen; + } +out: + if (is_euclean == 1) + ret = -EUCLEAN; + + /* Snub EUCLEAN if we also have EBADMSG */ + if (is_ebadmsg == 1) + ret = -EBADMSG; + + return ret; +} + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to write a + * page with both main and spare data. + */ +static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + uint32_t cwperpage = (mtd->writesize >> 9); + uint32_t n, flash_sts, pages_written = 0; + int err = 0, submitted_num_desc = 0; + struct msm_nand_rw_params rw_params; + struct msm_nand_rw_reg_data data; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + /* + * The following 7 commands will be sent only once : + * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1, + * dev0_ecc_cfg, ebi2_ecc_buf_cfg. + * For last codeword (CW) - read_status(write) + * + * The following 4 commands will be sent for every CW : + * flash, exec, flash_status (read), flash_status (write). + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[MAX_DESC + 1]; + struct { + uint32_t count; + struct msm_nand_cmd_setup_desc setup_desc; + struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC]; + } cmd_list; + struct { + uint32_t flash_status; + } data[MAX_CW_PER_PAGE]; + } *dma_buffer; + struct msm_nand_rw_cmd_desc *cmd_list = NULL; + + memset(&rw_params, 0, sizeof(struct msm_nand_rw_params)); + err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params); + if (err) + goto validate_mtd_params_failed; + + wait_event(chip->dma_wait_queue, (dma_buffer = + msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer)))); + + memset(&data, 0, sizeof(struct msm_nand_rw_reg_data)); + msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data); + cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list; + + while (rw_params.page_count-- > 0) { + uint32_t cw_desc_cnt = 0; + struct sps_command_element *curr_ce, *start_ce; + + data.addr0 = (rw_params.page << 16); + data.addr1 = (rw_params.page >> 16) & 0xff; + + for (n = 0; n < cwperpage ; n++) { + dma_buffer->data[n].flash_status = 0xeeeeeeee; + + msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info, + n, cmd_list, &cw_desc_cnt, 0); + + curr_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0]; + cmd_list->cw_desc[cw_desc_cnt].flags = CMD; + cmd_list->count++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info), + READ, msm_virt_to_dma(chip, + &dma_buffer->data[n].flash_status)); + cmd_list->cw_desc[cw_desc_cnt++].num_ce = 1; + } + + start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0]; + curr_ce = start_ce; + cmd_list->cw_desc[cw_desc_cnt].flags = CMD_INT_UNLCK; + cmd_list->count++; + msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info), + WRITE, data.clrfstatus); + curr_ce++; + + msm_nand_prep_ce(curr_ce, MSM_NAND_READ_STATUS(info), + WRITE, data.clrrstatus); + curr_ce++; + cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - start_ce; + + dma_buffer->xfer.iovec_count = cmd_list->count; + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->setup_desc.ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->setup_desc.num_ce; + iovec->flags = cmd_list->setup_desc.flags; + iovec++; + for (n = 0; n < (cmd_list->count - 1); n++) { + iovec->addr = msm_virt_to_dma(chip, + &cmd_list->cw_desc[n].ce[0]); + iovec->size = sizeof(struct sps_command_element) * + cmd_list->cw_desc[n].num_ce; + iovec->flags = cmd_list->cw_desc[n].flags; + iovec++; + } + mutex_lock(&info->lock); + err = msm_nand_get_device(chip->dev); + if (err) + goto unlock_mutex; + /* Submit data descriptors */ + for (n = 0; n < cwperpage; n++) { + err = msm_nand_submit_rw_data_desc(ops, + &rw_params, info, n, 0); + if (err) { + pr_err("Failed to submit data descs %d\n", err); + panic("Error in nand driver\n"); + goto put_dev; + } + } + + if (ops->mode == MTD_OPS_RAW) { + submitted_num_desc = n; + } else if (ops->mode == MTD_OPS_AUTO_OOB) { + if (ops->datbuf) + submitted_num_desc = n; + if (ops->oobbuf) + submitted_num_desc++; + } + + /* Submit command descriptors */ + err = sps_transfer(info->sps.cmd_pipe.handle, + &dma_buffer->xfer); + if (err) { + pr_err("Failed to submit commands %d\n", err); + goto put_dev; + } + + err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, + dma_buffer->xfer.iovec_count, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d (err:%d)\n", + (info->sps.cmd_pipe.index), err); + goto put_dev; + } + err = msm_nand_sps_get_iovec(info->sps.data_cons.handle, + info->sps.data_cons.index, submitted_num_desc, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d (err:%d)\n", + (info->sps.data_cons.index), err); + goto put_dev; + } + + err = msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); + if (err) + goto free_dma; + + for (n = 0; n < cwperpage; n++) + pr_debug("write pg %d: flash_status[%d] = %x\n", + rw_params.page, n, + dma_buffer->data[n].flash_status); + + /* Check for flash status errors */ + for (n = 0; n < cwperpage; n++) { + flash_sts = dma_buffer->data[n].flash_status; + if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) { + pr_err("MPU/OP err (0x%x) set\n", flash_sts); + err = -EIO; + goto free_dma; + } + if (n == (cwperpage - 1)) { + if (!(flash_sts & FS_DEVICE_WP) || + (flash_sts & FS_DEVICE_STS_ERR)) { + pr_err("Dev sts err 0x%x\n", flash_sts); + err = -EIO; + goto free_dma; + } + } + } + pages_written++; + rw_params.page++; + } + goto free_dma; +put_dev: + msm_nand_put_device(chip->dev); +unlock_mutex: + mutex_unlock(&info->lock); +free_dma: + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); + if (ops->oobbuf) + dma_unmap_page(chip->dev, rw_params.oob_dma_addr, + ops->ooblen, DMA_TO_DEVICE); + if (ops->datbuf) + dma_unmap_page(chip->dev, rw_params.data_dma_addr, + ops->len, DMA_TO_DEVICE); +validate_mtd_params_failed: + if (ops->mode != MTD_OPS_RAW) + ops->retlen = mtd->writesize * pages_written; + else + ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written; + + ops->oobretlen = ops->ooblen - rw_params.oob_len_data; + if (err) + pr_err("to %llx datalen %x ooblen %x failed with err %d\n", + to, ops->len, ops->ooblen, err); + pr_debug("ret %d, retlen %d oobretlen %d\n", + err, ops->retlen, ops->oobretlen); + + pr_debug("================================================\n"); + return err; +} + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to write a + * page with only main data. + */ +static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + struct mtd_oob_ops ops; + unsigned char *bounce_buf = NULL; + + ops.mode = MTD_OPS_AUTO_OOB; + ops.retlen = 0; + ops.ooblen = 0; + ops.oobbuf = NULL; + + /* partial page writes are not supported */ + if ((to & (mtd->writesize - 1)) || (len % mtd->writesize)) { + ret = -EINVAL; + *retlen = ops.retlen; + pr_err("%s: partial page writes are not supported\n", __func__); + goto out; + } + + /* + * Handle writing of large size write buffer in vmalloc + * address space that does not fit in an MMU page. + */ + if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) { + ops.len = mtd->writesize; + + bounce_buf = kmalloc(ops.len, GFP_KERNEL); + if (!bounce_buf) { + ret = -ENOMEM; + goto out; + } + + for (;;) { + if (!is_buffer_in_page(buf, ops.len)) { + memcpy(bounce_buf, buf, ops.len); + ops.datbuf = (uint8_t *) bounce_buf; + } else { + ops.datbuf = (uint8_t *) buf; + } + ret = msm_nand_write_oob(mtd, to, &ops); + if (ret < 0) + break; + + len -= mtd->writesize; + *retlen += mtd->writesize; + if (len == 0) + break; + + buf += mtd->writesize; + to += mtd->writesize; + } + kfree(bounce_buf); + } else { + ops.len = len; + ops.datbuf = (uint8_t *)buf; + ret = msm_nand_write_oob(mtd, to, &ops); + *retlen = ops.retlen; + } +out: + return ret; +} + +/* + * Structure that contains NANDc register data for commands required + * for Erase operation. + */ +struct msm_nand_erase_reg_data { + struct msm_nand_common_cfgs cfg; + uint32_t exec; + uint32_t flash_status; + uint32_t clrfstatus; + uint32_t clrrstatus; +}; + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to erase a + * block within NAND device. + */ +#define ERASE_CMDS 9 +static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + int i = 0, err = 0; + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + uint32_t page = 0; + struct msm_nand_sps_cmd *cmd, *curr_cmd; + struct msm_nand_erase_reg_data data; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + /* + * The following 9 commands are required to erase a page - + * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read), + * flash_status(write), read_status. + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[ERASE_CMDS]; + struct msm_nand_sps_cmd cmd[ERASE_CMDS]; + uint32_t flash_status; + } *dma_buffer; + + if (mtd->writesize == PAGE_SIZE_2K) + page = instr->addr >> 11; + + if (mtd->writesize == PAGE_SIZE_4K) + page = instr->addr >> 12; + + if (instr->addr & (mtd->erasesize - 1)) { + pr_err("unsupported erase address, 0x%llx\n", instr->addr); + err = -EINVAL; + goto out; + } + if (instr->len != mtd->erasesize) { + pr_err("unsupported erase len, %lld\n", instr->len); + err = -EINVAL; + goto out; + } + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer)))); + cmd = dma_buffer->cmd; + + memset(&data, 0, sizeof(struct msm_nand_erase_reg_data)); + data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE; + data.cfg.addr0 = page; + data.cfg.addr1 = 0; + data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE)); + data.cfg.cfg1 = chip->cfg1; + data.exec = 1; + dma_buffer->flash_status = 0xeeeeeeee; + data.clrfstatus = MSM_NAND_RESET_FLASH_STS; + data.clrrstatus = MSM_NAND_RESET_READ_STS; + + curr_cmd = cmd; + msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); + + cmd = curr_cmd; + msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE, + data.exec, SPS_IOVEC_FLAG_NWD); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ, + msm_virt_to_dma(chip, &dma_buffer->flash_status), 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), WRITE, + data.clrfstatus, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_READ_STATUS(info), WRITE, + data.clrrstatus, + SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); + cmd++; + + WARN_ON((cmd - dma_buffer->cmd) > ERASE_CMDS); + dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { + iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); + iovec->size = sizeof(struct sps_command_element); + iovec->flags = dma_buffer->cmd[i].flags; + iovec++; + } + mutex_lock(&info->lock); + err = msm_nand_get_device(chip->dev); + if (err) + goto unlock_mutex; + + err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); + if (err) { + pr_err("Failed to submit commands %d\n", err); + goto put_dev; + } + err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count, + &iovec_temp); + if (err) { + pr_err("Failed to get iovec for pipe %d (err: %d)\n", + (info->sps.cmd_pipe.index), err); + goto put_dev; + } + err = msm_nand_put_device(chip->dev); + if (err) + goto unlock_mutex; + + /* Check for flash status errors */ + if (dma_buffer->flash_status & (FS_OP_ERR | + FS_MPU_ERR | FS_DEVICE_STS_ERR)) { + pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status); + err = -EIO; + } + if (!(dma_buffer->flash_status & FS_DEVICE_WP)) { + pr_err("Device is write protected\n"); + err = -EIO; + } + if (err) { + pr_err("Erase failed, 0x%llx\n", instr->addr); + instr->fail_addr = instr->addr; + instr->state = MTD_ERASE_FAILED; + } else { + instr->state = MTD_ERASE_DONE; + instr->fail_addr = 0xffffffff; + mtd_erase_callback(instr); + } + goto unlock_mutex; +put_dev: + msm_nand_put_device(chip->dev); +unlock_mutex: + mutex_unlock(&info->lock); + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); +out: + return err; +} + +/* + * Structure that contains NANDc register data for commands required + * for checking if a block is bad. + */ +struct msm_nand_blk_isbad_data { + struct msm_nand_common_cfgs cfg; + uint32_t ecc_bch_cfg; + uint32_t exec; + uint32_t read_offset; +}; + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to check if + * a block is bad. This is done by reading the first page within a block and + * checking whether the bad block byte location contains 0xFF or not. If it + * doesn't contain 0xFF, then it is considered as bad block. + */ +#define ISBAD_CMDS 9 +static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + int i = 0, ret = 0, bad_block = 0, submitted_num_desc = 1; + uint8_t *buf; + uint32_t page = 0, rdata, cwperpage; + struct msm_nand_sps_cmd *cmd, *curr_cmd; + struct msm_nand_blk_isbad_data data; + struct sps_iovec *iovec; + struct sps_iovec iovec_temp; + /* + * The following 9 commands are required to check bad block - + * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0, + * exec, flash_status(read). + */ + struct { + struct sps_transfer xfer; + struct sps_iovec cmd_iovec[ISBAD_CMDS]; + struct msm_nand_sps_cmd cmd[ISBAD_CMDS]; + uint32_t flash_status; + } *dma_buffer; + + if (mtd->writesize == PAGE_SIZE_2K) + page = ofs >> 11; + + if (mtd->writesize == PAGE_SIZE_4K) + page = ofs >> 12; + + cwperpage = (mtd->writesize >> 9); + + if (ofs > mtd->size) { + pr_err("Invalid offset 0x%llx\n", ofs); + bad_block = -EINVAL; + goto out; + } + if (ofs & (mtd->erasesize - 1)) { + pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs); + bad_block = -EINVAL; + goto out; + } + + wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( + chip, sizeof(*dma_buffer) + 4))); + buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer); + + cmd = dma_buffer->cmd; + memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data)); + data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL; + data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE); + data.cfg.cfg1 = chip->cfg1_raw; + + if (chip->cfg1 & (1 << WIDE_FLASH)) + data.cfg.addr0 = (page << 16) | + ((chip->cw_size * (cwperpage-1)) >> 1); + else + data.cfg.addr0 = (page << 16) | + (chip->cw_size * (cwperpage-1)); + + data.cfg.addr1 = (page >> 16) & 0xff; + data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; + data.exec = 1; + data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1))); + dma_buffer->flash_status = 0xeeeeeeee; + + curr_cmd = cmd; + msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); + + cmd = curr_cmd; + msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE, + data.ecc_bch_cfg, 0); + cmd++; + + rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31); + msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, + rdata, 0); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE, + data.exec, SPS_IOVEC_FLAG_NWD); + cmd++; + + msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ, + msm_virt_to_dma(chip, &dma_buffer->flash_status), + SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK); + cmd++; + + WARN_ON(cmd - dma_buffer->cmd > ISBAD_CMDS); + dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); + dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; + dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, + &dma_buffer->cmd_iovec); + iovec = dma_buffer->xfer.iovec; + + for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { + iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); + iovec->size = sizeof(struct sps_command_element); + iovec->flags = dma_buffer->cmd[i].flags; + iovec++; + } + mutex_lock(&info->lock); + ret = msm_nand_get_device(chip->dev); + if (ret) { + mutex_unlock(&info->lock); + goto free_dma; + } + /* Submit data descriptor */ + ret = sps_transfer_one(info->sps.data_prod.handle, + msm_virt_to_dma(chip, buf), + 4, NULL, SPS_IOVEC_FLAG_INT); + + if (ret) { + pr_err("Failed to submit data desc %d\n", ret); + goto put_dev; + } + /* Submit command descriptor */ + ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); + if (ret) { + pr_err("Failed to submit commands %d\n", ret); + goto put_dev; + } + + ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count, + &iovec_temp); + if (ret) { + pr_err("Failed to get iovec for pipe %d (ret: %d)\n", + (info->sps.cmd_pipe.index), ret); + goto put_dev; + } + ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle, + info->sps.data_prod.index, submitted_num_desc, + &iovec_temp); + if (ret) { + pr_err("Failed to get iovec for pipe %d (ret: %d)\n", + (info->sps.data_prod.index), ret); + goto put_dev; + } + + ret = msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); + if (ret) + goto free_dma; + + /* Check for flash status errors */ + if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) { + pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status); + bad_block = -EIO; + goto free_dma; + } + + /* Check for bad block marker byte */ + if (chip->cfg1 & (1 << WIDE_FLASH)) { + if (buf[0] != 0xFF || buf[1] != 0xFF) + bad_block = 1; + } else { + if (buf[0] != 0xFF) + bad_block = 1; + } + goto free_dma; +put_dev: + msm_nand_put_device(chip->dev); + mutex_unlock(&info->lock); +free_dma: + msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4); +out: + return ret ? ret : bad_block; +} + +/* + * Function that gets called from upper layers such as MTD/YAFFS2 to mark a + * block as bad. This is done by writing the first page within a block with 0, + * thus setting the bad block byte location as well to 0. + */ +static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct mtd_oob_ops ops; + int ret; + uint8_t *buf; + size_t len; + + if (ofs > mtd->size) { + pr_err("Invalid offset 0x%llx\n", ofs); + ret = -EINVAL; + goto out; + } + if (ofs & (mtd->erasesize - 1)) { + pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs); + ret = -EINVAL; + goto out; + } + len = mtd->writesize + mtd->oobsize; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) { + pr_err("unable to allocate memory for 0x%x size\n", len); + ret = -ENOMEM; + goto out; + } + ops.mode = MTD_OPS_RAW; + ops.len = len; + ops.retlen = 0; + ops.ooblen = 0; + ops.datbuf = buf; + ops.oobbuf = NULL; + ret = msm_nand_write_oob(mtd, ofs, &ops); + kfree(buf); +out: + return ret; +} + +/* + * Function that scans for the attached NAND device. This fills out all + * the uninitialized function pointers with the defaults. The flash ID is + * read and the mtd/chip structures are filled with the appropriate values. + */ +static int msm_nand_scan(struct mtd_info *mtd) +{ + struct msm_nand_info *info = mtd->priv; + struct msm_nand_chip *chip = &info->nand_chip; + struct flash_identification *supported_flash = &info->flash_dev; + int err = 0; + uint32_t i, j, mtd_writesize; + uint8_t dev_found = 0, wide_bus; + uint32_t manid, devid, devcfg; + uint32_t flash_id = 0, flash_id2 = 0; + uint8_t id_byte[NAND_MAX_ID_LEN]; + uint32_t bad_block_byte, spare_bytes; + struct nand_flash_dev *flashdev = NULL; + const struct nand_manufacturer *flashman = NULL; + + /* Probe the Flash device for ONFI compliance */ + if (!msm_nand_flash_onfi_probe(info)) { + dev_found = 1; + } else { + err = msm_nand_flash_read_id(info, 0, &flash_id, &flash_id2); + if (err < 0) { + pr_err("Failed to read Flash ID\n"); + err = -EINVAL; + goto out; + } + manid = id_byte[0] = flash_id & 0xFF; + devid = id_byte[1] = (flash_id >> 8) & 0xFF; + devcfg = id_byte[3] = (flash_id >> 24) & 0xFF; + id_byte[2] = (flash_id >> 16) & 0xFF; + id_byte[4] = flash_id2 & 0xFF; + id_byte[5] = (flash_id2 >> 8) & 0xFF; + id_byte[6] = (flash_id2 >> 16) & 0xFF; + id_byte[7] = (flash_id2 >> 24) & 0xFF; + + flashman = nand_get_manufacturer(manid); + + for (i = 0; !flashdev; ++i) { + /* + * If id_len is specified for an entry in the nand ids + * array, then at least 4 bytes of the nand id is + * present in the nand ids array - use that to identify + * the nand device first. If that is not present, only + * then fall back to searching the legacy or extended + * ids in the nand ids array. + * The id_len number of bytes in the nand id read from + * the device are checked against those in the nand id + * table for exact match. + */ + if (nand_flash_ids[i].id_len) { + for (j = 0; j < nand_flash_ids[i].id_len; j++) { + if (nand_flash_ids[i].id[j] == + id_byte[j]) + continue; + else + break; + } + if (j == nand_flash_ids[i].id_len) + flashdev = &nand_flash_ids[i]; + } else if (!nand_flash_ids[i].id_len && + nand_flash_ids[i].dev_id == devid) + flashdev = &nand_flash_ids[i]; + } + if (!flashdev || !flashman) { + pr_err("unknown nand flashid=%x manuf=%x devid=%x\n", + flash_id, manid, devid); + err = -ENOENT; + goto out; + } + dev_found = 1; + if (!flashdev->pagesize) { + pr_err("missing page size info - extract from NAND ID\n"); + supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0; + supported_flash->pagesize = 1024 << (devcfg & 0x3); + supported_flash->blksize = (64 * 1024) << + ((devcfg >> 4) & 0x3); + supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) * + (supported_flash->pagesize >> 9); + } else { + supported_flash->widebus = flashdev->options & + NAND_BUSWIDTH_16 ? 1 : 0; + supported_flash->pagesize = flashdev->pagesize; + supported_flash->blksize = flashdev->erasesize; + supported_flash->oobsize = flashdev->oobsize; + supported_flash->ecc_correctability = + flashdev->ecc.strength_ds; + if (!flashdev->ecc.strength_ds) + pr_err("num ecc correctable bit not specified and defaults to 4 bit BCH\n"); + } + supported_flash->flash_id = flash_id; + supported_flash->density = ((uint64_t)flashdev->chipsize) << 20; + } + + if (dev_found) { + wide_bus = supported_flash->widebus; + mtd->size = supported_flash->density; + mtd->writesize = supported_flash->pagesize; + mtd->oobsize = supported_flash->oobsize; + mtd->erasesize = supported_flash->blksize; + mtd->writebufsize = mtd->writesize; + mtd_writesize = mtd->writesize; + + /* Check whether NAND device support 8bit ECC*/ + if (supported_flash->ecc_correctability >= 8) { + chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH; + supported_flash->ecc_capability = 8; + } else { + chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH; + supported_flash->ecc_capability = 4; + } + + pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n", + supported_flash->flash_id, (wide_bus) ? 16 : 8, + (mtd->size >> 20)); + pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n", + mtd->writesize, mtd->erasesize, mtd->oobsize); + pr_info("BCH ECC: %d Bit\n", supported_flash->ecc_capability); + } + + chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528; + chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE) + | (516 << UD_SIZE_BYTES) + | (0 << DISABLE_STATUS_AFTER_WRITE) + | (5 << NUM_ADDR_CYCLES); + + bad_block_byte = (mtd_writesize - (chip->cw_size * ( + (mtd_writesize >> 9) - 1)) + 1); + chip->cfg1 = (7 << NAND_RECOVERY_CYCLES) + | (0 << CS_ACTIVE_BSY) + | (bad_block_byte << BAD_BLOCK_BYTE_NUM) + | (0 << BAD_BLOCK_IN_SPARE_AREA) + | (2 << WR_RD_BSY_GAP) + | ((wide_bus ? 1 : 0) << WIDE_FLASH) + | (1 << ENABLE_BCH_ECC); + + /* + * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O) + * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O). + */ + chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? + (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7); + + spare_bytes = chip->cw_size - (BYTES_512 + chip->ecc_parity_bytes); + chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE) + | (5 << NUM_ADDR_CYCLES) + | (spare_bytes << SPARE_SIZE_BYTES) + | (BYTES_512 << UD_SIZE_BYTES); + + chip->cfg1_raw = (2 << WR_RD_BSY_GAP) + | (1 << BAD_BLOCK_IN_SPARE_AREA) + | (21 << BAD_BLOCK_BYTE_NUM) + | (0 << CS_ACTIVE_BSY) + | (7 << NAND_RECOVERY_CYCLES) + | ((wide_bus ? 1 : 0) << WIDE_FLASH) + | (1 << DEV0_CFG1_ECC_DISABLE); + + chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE) + | (0 << ECC_SW_RESET) + | (516 << ECC_NUM_DATA_BYTES) + | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES) + | (1 << ECC_FORCE_CLK_OPEN); + + chip->ecc_cfg_raw = (1 << ECC_FORCE_CLK_OPEN) + | (BYTES_512 << ECC_NUM_DATA_BYTES) + | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES) + | (0 << ECC_SW_RESET) + | (1 << ECC_CFG_ECC_DISABLE); + + if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) { + chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES : + 2 << SPARE_SIZE_BYTES); + chip->ecc_bch_cfg |= (1 << ECC_MODE); + chip->ecc_cfg_raw |= (1 << ECC_MODE); + } else { + chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES : + 4 << SPARE_SIZE_BYTES); + chip->ecc_bch_cfg |= (0 << ECC_MODE); + chip->ecc_cfg_raw |= (0 << ECC_MODE); + } + + chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */ + + pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n" + " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n" + " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n" + " RAWECCCFG: 0x%08x, BAD BLOCK BYTE: 0x%08x\n", + chip->cfg0, chip->cfg1, chip->cfg0_raw, chip->cfg1_raw, + chip->ecc_buf_cfg, chip->ecc_bch_cfg, + chip->ecc_cfg_raw, bad_block_byte); + + if (mtd->writesize == 2048) + mtd->oobavail = 16; + else if (mtd->writesize == 4096) + mtd->oobavail = 32; + else { + pr_err("Unsupported NAND pagesize: 0x%x\n", mtd->writesize); + err = -ENODEV; + goto out; + } + + /* Fill in remaining MTD driver data */ + mtd->type = MTD_NANDFLASH; + mtd->flags = MTD_CAP_NANDFLASH; + mtd->_erase = msm_nand_erase; + mtd->_block_isbad = msm_nand_block_isbad; + mtd->_block_markbad = msm_nand_block_markbad; + mtd->_read = msm_nand_read; + mtd->_write = msm_nand_write; + mtd->_read_oob = msm_nand_read_oob; + mtd->_write_oob = msm_nand_write_oob; + mtd->owner = THIS_MODULE; +out: + return err; +} + +#define BAM_APPS_PIPE_LOCK_GRP0 0 +#define BAM_APPS_PIPE_LOCK_GRP1 1 +/* + * This function allocates, configures, connects an end point and + * also registers event notification for an end point. It also allocates + * DMA memory for descriptor FIFO of a pipe. + */ +static int msm_nand_init_endpoint(struct msm_nand_info *info, + struct msm_nand_sps_endpt *end_point, + uint32_t pipe_index) +{ + int rc = 0; + struct sps_pipe *pipe_handle; + struct sps_connect *sps_config = &end_point->config; + struct sps_register_event *sps_event = &end_point->event; + + pipe_handle = sps_alloc_endpoint(); + if (!pipe_handle) { + pr_err("sps_alloc_endpoint() failed\n"); + rc = -ENOMEM; + goto out; + } + + rc = sps_get_config(pipe_handle, sps_config); + if (rc) { + pr_err("sps_get_config() failed %d\n", rc); + goto free_endpoint; + } + + if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) { + /* READ CASE: source - BAM; destination - system memory */ + sps_config->source = info->sps.bam_handle; + sps_config->destination = SPS_DEV_HANDLE_MEM; + sps_config->mode = SPS_MODE_SRC; + sps_config->src_pipe_index = pipe_index; + } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX || + pipe_index == SPS_CMD_CONS_PIPE_INDEX) { + /* WRITE CASE: source - system memory; destination - BAM */ + sps_config->source = SPS_DEV_HANDLE_MEM; + sps_config->destination = info->sps.bam_handle; + sps_config->mode = SPS_MODE_DEST; + sps_config->dest_pipe_index = pipe_index; + } + + sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_POLL | + SPS_O_ACK_TRANSFERS; + + if (pipe_index == SPS_DATA_PROD_PIPE_INDEX || + pipe_index == SPS_DATA_CONS_PIPE_INDEX) + sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0; + else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX) + sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1; + + /* + * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors + * are allowed to be submitted before we get any ack for any of them, + * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) * + * sizeof(struct sps_iovec). + */ + sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) * + sizeof(struct sps_iovec); + sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev, + sps_config->desc.size, + &sps_config->desc.phys_base, + GFP_KERNEL); + if (!sps_config->desc.base) { + pr_err("dmam_alloc_coherent() failed for size %x\n", + sps_config->desc.size); + rc = -ENOMEM; + goto free_endpoint; + } + memset(sps_config->desc.base, 0x00, sps_config->desc.size); + + rc = sps_connect(pipe_handle, sps_config); + if (rc) { + pr_err("sps_connect() failed %d\n", rc); + goto free_endpoint; + } + + sps_event->options = SPS_O_EOT; + sps_event->mode = SPS_TRIGGER_WAIT; + sps_event->user = (void *)info; + + rc = sps_register_event(pipe_handle, sps_event); + if (rc) { + pr_err("sps_register_event() failed %d\n", rc); + goto sps_disconnect; + } + end_point->index = pipe_index; + end_point->handle = pipe_handle; + pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle, + pipe_index); + goto out; +sps_disconnect: + sps_disconnect(pipe_handle); +free_endpoint: + sps_free_endpoint(pipe_handle); +out: + return rc; +} + +/* This function disconnects and frees an end point */ +static void msm_nand_deinit_endpoint(struct msm_nand_info *info, + struct msm_nand_sps_endpt *end_point) +{ + sps_disconnect(end_point->handle); + sps_free_endpoint(end_point->handle); +} + +/* + * This function registers BAM device and initializes its end points for + * the following pipes - + * system consumer pipe for data (pipe#0), + * system producer pipe for data (pipe#1), + * system consumer pipe for commands (pipe#2). + */ +static int msm_nand_bam_init(struct msm_nand_info *nand_info) +{ + struct sps_bam_props bam = {0}; + int rc = 0; + + bam.phys_addr = nand_info->bam_phys; + bam.virt_addr = nand_info->bam_base; + bam.irq = nand_info->bam_irq; + /* + * NAND device is accessible from both Apps and Modem processor and + * thus, NANDc and BAM are shared between both the processors. But BAM + * must be enabled and instantiated only once during boot up by + * Trustzone before Modem/Apps is brought out from reset. + * + * This is indicated to SPS driver on Apps by marking flag + * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global + * initializations that will be done by Trustzone - Execution + * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and + * Descriptor summing threshold. + * + * NANDc BAM device supports 2 execution environments - Modem and Apps + * and thus the flag SPS_BAM_MGR_MULTI_EE is set. + */ + bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE; + bam.ipc_loglevel = QPIC_BAM_DEFAULT_IPC_LOGLVL; + + rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle); + if (!rc) + goto init_sps_ep; + rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle); + if (rc) { + pr_err("%s: sps_register_bam_device() failed with %d\n", + __func__, rc); + goto out; + } + pr_info("%s: BAM device registered: bam_handle 0x%lx\n", + __func__, nand_info->sps.bam_handle); +init_sps_ep: + rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod, + SPS_DATA_PROD_PIPE_INDEX); + if (rc) + goto out; + rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons, + SPS_DATA_CONS_PIPE_INDEX); + if (rc) + goto deinit_data_prod; + + rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe, + SPS_CMD_CONS_PIPE_INDEX); + if (rc) + goto deinit_data_cons; + goto out; +deinit_data_cons: + msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons); +deinit_data_prod: + msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod); +out: + return rc; +} + +/* + * This function disconnects and frees its end points for all the pipes. + * Since the BAM is shared resource, it is not deregistered as its handle + * might be in use with LCDC. + */ +static void msm_nand_bam_free(struct msm_nand_info *nand_info) +{ + msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod); + msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons); + msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe); +} + +/* This function enables DMA support for the NANDc in BAM mode. */ +static int msm_nand_enable_dma(struct msm_nand_info *info) +{ + struct msm_nand_sps_cmd *sps_cmd; + struct msm_nand_chip *chip = &info->nand_chip; + int ret, submitted_num_desc = 1; + struct sps_iovec iovec_temp; + + wait_event(chip->dma_wait_queue, + (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd)))); + + msm_nand_prep_single_desc(sps_cmd, MSM_NAND_CTRL(info), WRITE, + (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT); + + mutex_lock(&info->lock); + ret = msm_nand_get_device(chip->dev); + if (ret) { + mutex_unlock(&info->lock); + goto out; + } + ret = sps_transfer_one(info->sps.cmd_pipe.handle, + msm_virt_to_dma(chip, &sps_cmd->ce), + sizeof(struct sps_command_element), NULL, + sps_cmd->flags); + if (ret) { + pr_err("Failed to submit command: %d\n", ret); + goto put_dev; + } + ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle, + info->sps.cmd_pipe.index, submitted_num_desc, + &iovec_temp); + if (ret) { + pr_err("Failed to get iovec for pipe %d (ret: %d)\n", + (info->sps.cmd_pipe.index), ret); + goto put_dev; + } +put_dev: + ret = msm_nand_put_device(chip->dev); +out: + mutex_unlock(&info->lock); + msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd)); + return ret; + +} + +static int msm_nand_parse_smem_ptable(int *nr_parts) +{ + + uint32_t i, j; + size_t len = FLASH_PTABLE_HDR_LEN; + struct flash_partition_entry *pentry; + char *delimiter = ":"; + void *temp_ptable = NULL; + char *name = NULL; + + pr_info("Parsing partition table info from SMEM\n"); + temp_ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); + + + if (IS_ERR_OR_NULL(temp_ptable)) { + pr_err("Error reading partition table header\n"); + goto out; + } + + /* Read only the header portion of ptable */ + ptable = *(struct flash_partition_table *)temp_ptable; + + /* Verify ptable magic */ + if (ptable.magic1 != FLASH_PART_MAGIC1 || + ptable.magic2 != FLASH_PART_MAGIC2) { + pr_err("Partition table magic verification failed\n"); + goto out; + } + /* Ensure that # of partitions is less than the max we have allocated */ + if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) { + pr_err("Partition numbers exceed the max limit\n"); + goto out; + } + /* Find out length of partition data based on table version. */ + if (ptable.version <= FLASH_PTABLE_V3) { + len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 * + sizeof(struct flash_partition_entry); + } else if (ptable.version == FLASH_PTABLE_V4) { + len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 * + sizeof(struct flash_partition_entry); + } else { + pr_err("Unknown ptable version (%d)", ptable.version); + goto out; + } + + *nr_parts = ptable.numparts; + + /* + * Now that the partition table header has been parsed, verified + * and the length of the partition table calculated, read the + * complete partition table. + */ + temp_ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len); + if (IS_ERR_OR_NULL(temp_ptable)) { + pr_err("Error reading partition table\n"); + goto out; + } + + /* Read only the header portion of ptable */ + ptable = *(struct flash_partition_table *)temp_ptable; + + for (i = 0; i < ptable.numparts; i++) { + pentry = &ptable.part_entry[i]; + if (pentry->name[0] == '\0') + continue; + /* Convert name to lower case and discard the initial chars */ + name = pentry->name; + strsep(&name, delimiter); + mtd_part[i].name = name; + if (!mtd_part[i].name) + mtd_part[i].name = pentry->name; + for (j = 0; j < strlen(mtd_part[i].name); j++) + *((char *)mtd_part[i].name + j) = + tolower(*((char *)mtd_part[i].name + j)); + mtd_part[i].offset = pentry->offset; + mtd_part[i].mask_flags = pentry->attr; + mtd_part[i].size = pentry->length; + pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", + i, pentry->name, pentry->offset, pentry->length, + pentry->attr); + } + pr_info("SMEM partition table found: ver: %d len: %d\n", + ptable.version, ptable.numparts); + return 0; +out: + return -EINVAL; +} + +#define BOOT_DEV_MASK 0x1E +#define BOOT_DEV_NAND 0x4 + +/* + * This function gets called when its device named msm-nand is added to + * device tree .dts file with all its resources such as physical addresses + * for NANDc and BAM, BAM IRQ. + * + * It also expects the NAND flash partition information to be passed in .dts + * file so that it can parse the partitions by calling MTD function + * mtd_device_parse_register(). + * + */ +static int msm_nand_probe(struct platform_device *pdev) +{ + struct msm_nand_info *info; + struct resource *res; + int i, err, nr_parts; + struct device *dev; + u32 adjustment_offset; + void __iomem *boot_cfg_base; + u32 boot_dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "boot_cfg"); + if (res && res->start) { + boot_cfg_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!boot_cfg_base) { + pr_err("ioremap() failed for addr 0x%x size 0x%x\n", + res->start, resource_size(res)); + return -ENOMEM; + } + boot_dev = (readl_relaxed(boot_cfg_base) & BOOT_DEV_MASK) >> 1; + if (boot_dev != BOOT_DEV_NAND) { + pr_err("disabling nand as boot device (%x) is not NAND\n", + boot_dev); + return -ENODEV; + } + } + /* + * The partition information can also be passed from kernel command + * line. Also, the MTD core layer supports adding the whole device as + * one MTD device when no partition information is available at all. + */ + info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info), + GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto out; + } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "nand_phys"); + if (!res || !res->start) { + pr_err("NAND phys address range is not provided\n"); + err = -ENODEV; + goto out; + } + info->nand_phys = res->start; + + err = of_property_read_u32(pdev->dev.of_node, + "qcom,reg-adjustment-offset", + &adjustment_offset); + if (err) { + pr_err("adjustment_offset not found, err = %d\n", err); + WARN_ON(1); + return err; + } + + info->nand_phys_adjusted = info->nand_phys + adjustment_offset; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "bam_phys"); + if (!res || !res->start) { + pr_err("BAM phys address range is not provided\n"); + err = -ENODEV; + goto out; + } + info->bam_phys = res->start; + info->bam_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!info->bam_base) { + pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n", + res->start, resource_size(res)); + err = -ENOMEM; + goto out; + } + + info->bam_irq = platform_get_irq_byname(pdev, "bam_irq"); + if (info->bam_irq < 0) { + pr_err("BAM IRQ is not provided\n"); + err = -ENODEV; + goto out; + } + + info->mtd.name = dev_name(&pdev->dev); + info->mtd.priv = info; + info->mtd.owner = THIS_MODULE; + info->nand_chip.dev = &pdev->dev; + init_waitqueue_head(&info->nand_chip.dma_wait_queue); + mutex_init(&info->lock); + + dev = &pdev->dev; + if (dma_supported(dev, DMA_BIT_MASK(32))) { + info->dma_mask = DMA_BIT_MASK(32); + dev->coherent_dma_mask = info->dma_mask; + } + + info->nand_chip.dma_virt_addr = + dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE, + &info->nand_chip.dma_phys_addr, GFP_KERNEL); + if (!info->nand_chip.dma_virt_addr) { + pr_err("No memory for DMA buffer size %x\n", + MSM_NAND_DMA_BUFFER_SIZE); + err = -ENOMEM; + goto out; + } + err = msm_nand_bus_register(pdev, info); + if (err) + goto out; + + if (of_property_read_bool(pdev->dev.of_node, "qcom,qpic-clk-rpmh")) + info->clk_data.rpmh_clk = true; + + if (!info->clk_data.rpmh_clk) { + info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk"); + if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) { + err = clk_set_rate(info->clk_data.qpic_clk, + MSM_NAND_BUS_VOTE_MAX_RATE); + } else { + err = PTR_ERR(info->clk_data.qpic_clk); + pr_err("Failed to get clock handle, err=%d\n", err); + } + if (err) + goto bus_unregister; + } + + err = msm_nand_setup_clocks_and_bus_bw(info, true); + if (err) + goto bus_unregister; + dev_set_drvdata(&pdev->dev, info); + err = pm_runtime_set_active(&pdev->dev); + if (err) + pr_err("pm_runtime_set_active() failed with error %d", err); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_NAND_IDLE_TIMEOUT); + + err = msm_nand_bam_init(info); + if (err) { + pr_err("msm_nand_bam_init() failed %d\n", err); + goto clk_rpm_disable; + } + err = msm_nand_enable_dma(info); + if (err) { + pr_err("Failed to enable DMA in NANDc\n"); + goto free_bam; + } + err = msm_nand_parse_smem_ptable(&nr_parts); + if (err < 0) { + pr_err("Failed to parse partition table in SMEM\n"); + goto free_bam; + } + if (msm_nand_scan(&info->mtd)) { + pr_err("No nand device found\n"); + err = -ENXIO; + goto free_bam; + } + for (i = 0; i < nr_parts; i++) { + mtd_part[i].offset *= info->mtd.erasesize; + mtd_part[i].size *= info->mtd.erasesize; + } + err = mtd_device_parse_register(&info->mtd, NULL, NULL, + &mtd_part[0], nr_parts); + if (err < 0) { + pr_err("Unable to register MTD partitions %d\n", err); + goto free_bam; + } + + pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n", + info->nand_phys, info->bam_phys, info->bam_irq); + pr_info("Allocated DMA buffer at virt_addr 0x%pK, phys_addr 0x%x\n", + info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr); + goto out; +free_bam: + msm_nand_bam_free(info); +clk_rpm_disable: + msm_nand_setup_clocks_and_bus_bw(info, false); + pm_runtime_disable(&(pdev)->dev); + pm_runtime_set_suspended(&(pdev)->dev); +bus_unregister: + msm_nand_bus_unregister(info); +out: + return err; +} + +/* + * Remove functionality that gets called when driver/device msm-nand + * is removed. + */ +static int msm_nand_remove(struct platform_device *pdev) +{ + struct msm_nand_info *info = dev_get_drvdata(&pdev->dev); + + if (pm_runtime_suspended(&(pdev)->dev)) + pm_runtime_resume(&(pdev)->dev); + + pm_runtime_disable(&(pdev)->dev); + pm_runtime_set_suspended(&(pdev)->dev); + + dev_set_drvdata(&pdev->dev, NULL); + + if (info) { + msm_nand_setup_clocks_and_bus_bw(info, false); + if (info->clk_data.client_handle) + msm_nand_bus_unregister(info); + mtd_device_unregister(&info->mtd); + msm_nand_bam_free(info); + } + return 0; +} + +#define DRIVER_NAME "msm_qpic_nand" +static const struct of_device_id msm_nand_match_table[] = { + { .compatible = "qcom,msm-nand", }, + {}, +}; + +static const struct dev_pm_ops msm_nand_pm_ops = { + .suspend = msm_nand_suspend, + .resume = msm_nand_resume, + .runtime_suspend = msm_nand_runtime_suspend, + .runtime_resume = msm_nand_runtime_resume, +}; + +static struct platform_driver msm_nand_driver = { + .probe = msm_nand_probe, + .remove = msm_nand_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = msm_nand_match_table, + .pm = &msm_nand_pm_ops, + }, +}; + +module_param(enable_euclean, bool, 0644); +MODULE_PARM_DESC(enable_euclean, "Set this parameter to enable reporting EUCLEAN to upper layer when the correctable bitflips are equal to the max correctable limit."); + +module_platform_driver(msm_nand_driver); + +MODULE_ALIAS(DRIVER_NAME); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM QPIC NAND flash driver"); diff --git a/drivers/mtd/devices/msm_qpic_nand.h b/drivers/mtd/devices/msm_qpic_nand.h new file mode 100644 index 0000000000000000000000000000000000000000..d4bc53b8c8b355d0363f7666ed0e60cab57d5a54 --- /dev/null +++ b/drivers/mtd/devices/msm_qpic_nand.h @@ -0,0 +1,403 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __QPIC_NAND_H +#define __QPIC_NAND_H + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define PAGE_SIZE_2K 2048 +#define PAGE_SIZE_4K 4096 + +#undef WRITE /* To avoid redefinition in above header files */ +#undef READ /* To avoid redefinition in above header files */ +#define WRITE 1 +#define READ 0 + +#define MSM_NAND_IDLE_TIMEOUT 200 /* msecs */ +#define MSM_NAND_BUS_VOTE_MAX_RATE 100000000 /* Hz */ + +/* + * The maximum no of descriptors per transfer (page read/write) won't be more + * than 64. For more details on what those commands are, please refer to the + * page read and page write functions in the driver. + */ +#define SPS_MAX_DESC_NUM 64 +#define SPS_DATA_CONS_PIPE_INDEX 0 +#define SPS_DATA_PROD_PIPE_INDEX 1 +#define SPS_CMD_CONS_PIPE_INDEX 2 + +#define msm_virt_to_dma(chip, vaddr) \ + ((chip)->dma_phys_addr + \ + ((uint8_t *)(vaddr) - (chip)->dma_virt_addr)) + +/* + * A single page read/write request would typically need DMA memory of about + * 1K memory approximately. So for a single request this memory is more than + * enough. + * + * But to accommodate multiple clients we allocate 8K of memory. Though only + * one client request can be submitted to NANDc at any time, other clients can + * still prepare the descriptors while waiting for current client request to + * be done. Thus for a total memory of 8K, the driver can currently support + * maximum clients up to 7 or 8 at a time. The client for which there is no + * free DMA memory shall wait on the wait queue until other clients free up + * the required memory. + */ +#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K +/* + * This defines the granularity at which the buffer management is done. The + * total number of slots is based on the size of the atomic_t variable + * dma_buffer_busy(number of bits) within the structure msm_nand_chip. + */ +#define MSM_NAND_DMA_BUFFER_SLOT_SZ \ + (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8)) + +/* ONFI(Open NAND Flash Interface) parameters */ +#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800 +#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000 +#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d +#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d +#define ONFI_PARAM_INFO_LENGTH 0x0200 +#define ONFI_PARAM_PAGE_LENGTH 0x0100 +#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F +#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20 +#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00 +#define FLASH_READ_DEVICE_ID_ADDRESS 0x00 + +#define MSM_NAND_RESET_FLASH_STS 0x00000020 +#define MSM_NAND_RESET_READ_STS 0x000000C0 + +/* QPIC NANDc (NAND Controller) Register Set */ +#define MSM_NAND_REG(info, off) (info->nand_phys + off) +#define MSM_NAND_REG_ADJUSTED(info, off) (info->nand_phys_adjusted + off) +#define MSM_NAND_QPIC_VERSION(info) MSM_NAND_REG_ADJUSTED(info, 0x20100) +#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000) +#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004) +#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008) +#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010) +#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014) +#define FS_OP_ERR (1 << 4) +#define FS_MPU_ERR (1 << 8) +#define FS_DEVICE_STS_ERR (1 << 16) +#define FS_DEVICE_WP (1 << 23) + +#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018) +#define BS_UNCORRECTABLE_BIT (1 << 8) +#define BS_CORRECTABLE_ERR_MSK 0x1F + +#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020) +#define DISABLE_STATUS_AFTER_WRITE 4 +#define CW_PER_PAGE 6 +#define UD_SIZE_BYTES 9 +#define SPARE_SIZE_BYTES 23 +#define NUM_ADDR_CYCLES 27 + +#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024) +#define DEV0_CFG1_ECC_DISABLE 0 +#define WIDE_FLASH 1 +#define NAND_RECOVERY_CYCLES 2 +#define CS_ACTIVE_BSY 5 +#define BAD_BLOCK_BYTE_NUM 6 +#define BAD_BLOCK_IN_SPARE_AREA 16 +#define WR_RD_BSY_GAP 17 +#define ENABLE_BCH_ECC 27 + +#define BYTES_512 512 +#define BYTES_516 516 +#define BYTES_517 517 + +#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028) +#define ECC_CFG_ECC_DISABLE 0 +#define ECC_SW_RESET 1 +#define ECC_MODE 4 +#define ECC_PARITY_SIZE_BYTES 8 +#define ECC_NUM_DATA_BYTES 16 +#define ECC_FORCE_CLK_OPEN 30 + +#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040) +#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044) +#define MSM_NAND_READ_ID2(info) MSM_NAND_REG(info, 0x30048) +#define EXTENDED_FETCH_ID BIT(19) +#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4) +#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC) +#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0) + +#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8) +#define ERASED_CW_ECC_MASK 1 +#define AUTO_DETECT_RES 0 +#define MASK_ECC (1 << ERASED_CW_ECC_MASK) +#define RESET_ERASED_DET (1 << AUTO_DETECT_RES) +#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES) +#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC) +#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC) + +#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC) +#define PAGE_ALL_ERASED 7 +#define CODEWORD_ALL_ERASED 6 +#define PAGE_ERASED 5 +#define CODEWORD_ERASED 4 +#define ERASED_PAGE ((1 << PAGE_ALL_ERASED) | (1 << PAGE_ERASED)) +#define ERASED_CW ((1 << CODEWORD_ALL_ERASED) | (1 << CODEWORD_ERASED)) + +#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00) +#define BAM_MODE_EN 0 +#define MSM_NAND_VERSION(info) MSM_NAND_REG_ADJUSTED(info, 0x30F08) +#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20) +#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24) + +/* device commands */ +#define MSM_NAND_CMD_PAGE_READ 0x32 +#define MSM_NAND_CMD_PAGE_READ_ECC 0x33 +#define MSM_NAND_CMD_PAGE_READ_ALL 0x34 +#define MSM_NAND_CMD_PAGE_READ_ONFI 0x35 +#define MSM_NAND_CMD_PRG_PAGE 0x36 +#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37 +#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39 +#define MSM_NAND_CMD_BLOCK_ERASE 0x3A +#define MSM_NAND_CMD_FETCH_ID 0x0B + +/* Version Mask */ +#define MSM_NAND_VERSION_MAJOR_MASK 0xF0000000 +#define MSM_NAND_VERSION_MAJOR_SHIFT 28 +#define MSM_NAND_VERSION_MINOR_MASK 0x0FFF0000 +#define MSM_NAND_VERSION_MINOR_SHIFT 16 + +#define CMD SPS_IOVEC_FLAG_CMD +#define CMD_LCK (CMD | SPS_IOVEC_FLAG_LOCK) +#define INT SPS_IOVEC_FLAG_INT +#define INT_UNLCK (INT | SPS_IOVEC_FLAG_UNLOCK) +#define CMD_INT_UNLCK (CMD | INT_UNLCK) +#define NWD SPS_IOVEC_FLAG_NWD + +/* Structure that defines a NAND SPS command element */ +struct msm_nand_sps_cmd { + struct sps_command_element ce; + uint32_t flags; +}; + +struct msm_nand_cmd_setup_desc { + struct sps_command_element ce[11]; + uint32_t flags; + uint32_t num_ce; +}; + +struct msm_nand_cmd_cw_desc { + struct sps_command_element ce[3]; + uint32_t flags; + uint32_t num_ce; +}; + +struct msm_nand_rw_cmd_desc { + uint32_t count; + struct msm_nand_cmd_setup_desc setup_desc; + struct msm_nand_cmd_cw_desc cw_desc[]; +}; + +/* + * Structure that defines the NAND controller properties as per the + * NAND flash device/chip that is attached. + */ +struct msm_nand_chip { + struct device *dev; + /* + * DMA memory will be allocated only once during probe and this memory + * will be used by all NAND clients. This wait queue is needed to + * make the applications wait for DMA memory to be free'd when the + * complete memory is exhausted. + */ + wait_queue_head_t dma_wait_queue; + atomic_t dma_buffer_busy; + uint8_t *dma_virt_addr; + dma_addr_t dma_phys_addr; + uint32_t ecc_parity_bytes; + uint32_t bch_caps; /* Controller BCH ECC capabilities */ +#define MSM_NAND_CAP_4_BIT_BCH (1 << 0) +#define MSM_NAND_CAP_8_BIT_BCH (1 << 1) + uint32_t cw_size; + /* NANDc register configurations */ + uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw; + uint32_t ecc_buf_cfg; + uint32_t ecc_bch_cfg; + uint32_t ecc_cfg_raw; +}; + +/* Structure that defines an SPS end point for a NANDc BAM pipe. */ +struct msm_nand_sps_endpt { + struct sps_pipe *handle; + struct sps_connect config; + struct sps_register_event event; + struct completion completion; + uint32_t index; +}; + +/* + * Structure that defines NANDc SPS data - BAM handle and an end point + * for each BAM pipe. + */ +struct msm_nand_sps_info { + unsigned long bam_handle; + struct msm_nand_sps_endpt data_prod; + struct msm_nand_sps_endpt data_cons; + struct msm_nand_sps_endpt cmd_pipe; +}; + +/* + * Structure that contains flash device information. This gets updated after + * the NAND flash device detection. + */ +struct flash_identification { + uint32_t flash_id; + uint64_t density; + uint32_t widebus; + uint32_t pagesize; + uint32_t blksize; + uint32_t oobsize; + uint32_t ecc_correctability; + uint32_t ecc_capability; /* Set based on the ECC capability selected. */ +}; + +struct msm_nand_clk_data { + struct clk *qpic_clk; + struct msm_bus_scale_pdata *use_cases; + uint32_t client_handle; + atomic_t clk_enabled; + atomic_t curr_vote; + bool rpmh_clk; +}; + +/* Structure that defines NANDc private data. */ +struct msm_nand_info { + struct mtd_info mtd; + struct msm_nand_chip nand_chip; + struct msm_nand_sps_info sps; + unsigned long bam_phys; + unsigned long nand_phys; + unsigned long nand_phys_adjusted; + void __iomem *bam_base; + int bam_irq; + /* + * This lock must be acquired before submitting any command or data + * descriptors to BAM pipes and must be held until all the submitted + * descriptors are processed. + * + * This is required to ensure that both command and descriptors are + * submitted atomically without interruption from other clients, + * when there are requests from more than client at any time. + * Othewise, data and command descriptors can be submitted out of + * order for a request which can cause data corruption. + */ + struct mutex lock; + struct flash_identification flash_dev; + struct msm_nand_clk_data clk_data; + u64 dma_mask; +}; + +/* Structure that defines an ONFI parameter page (512B) */ +struct onfi_param_page { + uint32_t parameter_page_signature; + uint16_t revision_number; + uint16_t features_supported; + uint16_t optional_commands_supported; + uint8_t reserved0[22]; + uint8_t device_manufacturer[12]; + uint8_t device_model[20]; + uint8_t jedec_manufacturer_id; + uint16_t date_code; + uint8_t reserved1[13]; + uint32_t number_of_data_bytes_per_page; + uint16_t number_of_spare_bytes_per_page; + uint32_t number_of_data_bytes_per_partial_page; + uint16_t number_of_spare_bytes_per_partial_page; + uint32_t number_of_pages_per_block; + uint32_t number_of_blocks_per_logical_unit; + uint8_t number_of_logical_units; + uint8_t number_of_address_cycles; + uint8_t number_of_bits_per_cell; + uint16_t maximum_bad_blocks_per_logical_unit; + uint16_t block_endurance; + uint8_t guaranteed_valid_begin_blocks; + uint16_t guaranteed_valid_begin_blocks_endurance; + uint8_t number_of_programs_per_page; + uint8_t partial_program_attributes; + uint8_t number_of_bits_ecc_correctability; + uint8_t number_of_interleaved_address_bits; + uint8_t interleaved_operation_attributes; + uint8_t reserved2[13]; + uint8_t io_pin_capacitance; + uint16_t timing_mode_support; + uint16_t program_cache_timing_mode_support; + uint16_t maximum_page_programming_time; + uint16_t maximum_block_erase_time; + uint16_t maximum_page_read_time; + uint16_t maximum_change_column_setup_time; + uint8_t reserved3[23]; + uint16_t vendor_specific_revision_number; + uint8_t vendor_specific[88]; + uint16_t integrity_crc; +} __attribute__((__packed__)); + +#define FLASH_PART_MAGIC1 0x55EE73AA +#define FLASH_PART_MAGIC2 0xE35EBDDB +#define FLASH_PTABLE_V3 3 +#define FLASH_PTABLE_V4 4 +#define FLASH_PTABLE_MAX_PARTS_V3 16 +#define FLASH_PTABLE_MAX_PARTS_V4 32 +#define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t)) +#define FLASH_PTABLE_ENTRY_NAME_SIZE 16 + +struct flash_partition_entry { + char name[FLASH_PTABLE_ENTRY_NAME_SIZE]; + u32 offset; /* Offset in blocks from beginning of device */ + u32 length; /* Length of the partition in blocks */ + u8 attr; /* Flags for this partition */ +}; + +struct flash_partition_table { + u32 magic1; + u32 magic2; + u32 version; + u32 numparts; + struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4]; +}; + +static struct flash_partition_table ptable; + +static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4]; + +static inline bool is_buffer_in_page(const void *buf, size_t len) +{ + return !(((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE); +} +#endif /* __QPIC_NAND_H */ diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490995498b9764594a632b7581a700c..60666db318867912c086d5ae06ab6a275d780c42 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info) { int i; - if (!client_info->slave) + if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) return; for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { @@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; + netdev_dbg(slave->bond->dev, + "Send learning packet: dev %s mac %pM vlan %d\n", + slave->dev->name, mac_addr, vid); + if (vid) __vlan_hwaccel_put_tag(skb, vlan_proto, vid); @@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data) u8 *mac_addr = data->mac_addr; struct bond_vlan_tag *tags; - if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { - if (strict_match && - ether_addr_equal_64bits(mac_addr, - upper->dev_addr)) { + if (is_vlan_dev(upper) && + bond->nest_level == vlan_get_encap_level(upper) - 1) { + if (upper->addr_assign_type == NET_ADDR_STOLEN) { alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); - } else if (!strict_match) { + } else { alb_send_lp_vid(slave, upper->dev_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bf3be2e6d4a801c06fe4d3e6075f5ec4439e4fe5..00245b73c224cbd1eb8343b664f2fc1fd6fc81cb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1528,7 +1528,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", slave_dev->name); - goto err_close; + goto err_hwaddr_unsync; } prev_slave = bond_last_slave(bond); @@ -1734,6 +1734,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_mode_uses_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); + bond->nest_level = dev_get_nest_level(bond_dev); + netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", slave_dev->name, bond_is_active_slave(new_slave) ? "an active" : "a backup", @@ -1767,6 +1769,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) synchronize_rcu(); slave_disable_netpoll(new_slave); +err_hwaddr_unsync: + if (!bond_uses_primary(bond)) + bond_hw_addr_flush(bond_dev, slave_dev); + err_close: slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index f4947a74b65f3d4eb12bfc42bd927c2c1d506207..5d4e61741476660b925e80a81ca1d41c17587f3b 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -25,6 +25,7 @@ #include #include #include +#include /* napi related */ #define M_CAN_NAPI_WEIGHT 64 @@ -246,7 +247,7 @@ enum m_can_mram_cfg { /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ #define RXFC_FWM_SHIFT 24 -#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) +#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) #define RXFC_FS_SHIFT 16 #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) @@ -1682,6 +1683,8 @@ static __maybe_unused int m_can_suspend(struct device *dev) m_can_clk_stop(priv); } + pinctrl_pm_select_sleep_state(dev); + priv->can.state = CAN_STATE_SLEEPING; return 0; @@ -1692,6 +1695,8 @@ static __maybe_unused int m_can_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct m_can_priv *priv = netdev_priv(ndev); + pinctrl_pm_select_default_state(dev); + m_can_init_ram(priv); priv->can.state = CAN_STATE_ERROR_ACTIVE; diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index 8f2e0dd7b7565769c768543e1097caff53d555ff..d9f4e9460a60f16e36747d996590d94395f677cf 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -13,4 +13,10 @@ config CAN_MCP251X ---help--- Driver for the Microchip MCP251x SPI CAN controllers. + +config QTI_CAN + tristate "Unified driver for QTI CAN controllers" + depends on SPI + ---help--- + Unified driver for QTI CAN controllers. endmenu diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index f59fa37310736531f6927ff3b75e8503f481de68..86895c16cb2033f6ce0f9a3ae52c5b09de458a7d 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_CAN_HI311X) += hi311x.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o +obj-$(CONFIG_QTI_CAN) += qti-can.o diff --git a/drivers/net/can/spi/qti-can.c b/drivers/net/can/spi/qti-can.c new file mode 100644 index 0000000000000000000000000000000000000000..378f932da027249b98b85c010e7b2b92351e72b1 --- /dev/null +++ b/drivers/net/can/spi/qti-can.c @@ -0,0 +1,1469 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_QTI_CAN 0 +#if DEBUG_QTI_CAN == 1 +#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__) +#define LOGNI(...) netdev_info(netdev, __VA_ARGS__) +#else +#define LOGDI(...) dev_dbg(&priv_data->spidev->dev, __VA_ARGS__) +#define LOGNI(...) netdev_dbg(netdev, __VA_ARGS__) +#endif +#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__) +#define LOGNE(...) netdev_err(netdev, __VA_ARGS__) + +#define MAX_TX_BUFFERS 1 +#define XFER_BUFFER_SIZE 64 +#define RX_ASSEMBLY_BUFFER_SIZE 128 +#define QTI_CAN_FW_QUERY_RETRY_COUNT 3 +#define DRIVER_MODE_RAW_FRAMES 0 +#define DRIVER_MODE_PROPERTIES 1 +#define DRIVER_MODE_AMB 2 +#define QUERY_FIRMWARE_TIMEOUT_MS 100 + +struct qti_can { + struct net_device **netdev; + struct spi_device *spidev; + struct mutex spi_lock; /* SPI device lock */ + struct workqueue_struct *tx_wq; + char *tx_buf, *rx_buf; + int xfer_length; + atomic_t msg_seq; + char *assembly_buffer; + u8 assembly_buffer_size; + atomic_t netif_queue_stop; + struct completion response_completion; + int wait_cmd; + int cmd_result; + int driver_mode; + int clk_freq_mhz; + int max_can_channels; + int bits_per_word; + int reset_delay_msec; + int reset; + bool support_can_fd; + bool can_fw_cmd_timeout_req; + u32 rem_all_buffering_timeout_ms; + u32 can_fw_cmd_timeout_ms; +}; + +struct qti_can_netdev_privdata { + struct can_priv can; + struct qti_can *qti_can; + u8 netdev_index; +}; + +struct qti_can_tx_work { + struct work_struct work; + struct sk_buff *skb; + struct net_device *netdev; +}; + +/* Message definitions */ +struct spi_mosi { /* TLV for MOSI line */ + u8 cmd; + u8 len; + u16 seq; + u8 data[]; +} __packed; + +struct spi_miso { /* TLV for MISO line */ + u8 cmd; + u8 len; + u16 seq; /* should match seq field from request, or 0 for unsols */ + u8 data[]; +} __packed; + +#define CMD_GET_FW_VERSION 0x81 +#define CMD_CAN_SEND_FRAME 0x82 +#define CMD_CAN_ADD_FILTER 0x83 +#define CMD_CAN_REMOVE_FILTER 0x84 +#define CMD_CAN_RECEIVE_FRAME 0x85 +#define CMD_CAN_CONFIG_BIT_TIMING 0x86 +#define CMD_CAN_DATA_BUFF_ADD 0x87 +#define CMD_CAN_DATA_BUFF_REMOVE 0X88 +#define CMD_CAN_RELEASE_BUFFER 0x89 +#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A +#define CMD_PROPERTY_WRITE 0x8B +#define CMD_PROPERTY_READ 0x8C +#define CMD_GET_FW_BR_VERSION 0x95 +#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96 +#define CMD_FIRMWARE_UPGRADE_DATA 0x97 +#define CMD_END_FIRMWARE_UPGRADE 0x98 +#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99 +#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A +#define CMD_END_BOOT_ROM_UPGRADE 0x9B +#define CMD_END_FW_UPDATE_FILE 0x9C + +#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0) +#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1) +#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2) +#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3) +#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5) +#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6) +#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7) +#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8) +#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9) +#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10) +#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11) +#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12) +#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13) +#define IOCTL_END_FW_UPDATE_FILE (SIOCDEVPRIVATE + 14) + +#define IFR_DATA_OFFSET 0x100 +struct can_fw_resp { + u8 maj; + u8 min; + u8 ver[48]; +} __packed; + +struct can_write_req { + u8 can_if; + u32 mid; + u8 dlc; + u8 data[8]; +} __packed; + +struct can_write_resp { + u8 err; +} __packed; + +struct can_filter_req { + u8 can_if; + u32 mid; + u32 mask; +} __packed; + +struct can_add_filter_resp { + u8 err; +} __packed; + +struct can_receive_frame { + u8 can_if; + u32 ts; + u32 mid; + u8 dlc; + u8 data[8]; +} __packed; + +struct can_config_bit_timing { + u8 can_if; + u32 prop_seg; + u32 phase_seg1; + u32 phase_seg2; + u32 sjw; + u32 brp; +} __packed; + +static struct can_bittiming_const rh850_bittiming_const = { + .name = "qti_can", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 70, + .brp_inc = 1, +}; + +static struct can_bittiming_const flexcan_bittiming_const = { + .name = "qti_can", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +static struct can_bittiming_const qti_can_bittiming_const; + +static struct can_bittiming_const qti_can_data_bittiming_const = { + .name = "qti_can", + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 70, + .brp_inc = 1, +}; + +struct vehicle_property { + int id; + u64 ts; + int zone; + int val_type; + u32 data_len; + union { + u8 bval; + int val; + int val_arr[4]; + float f_value; + float float_arr[4]; + u8 str[36]; + }; +} __packed; + +struct qti_can_release_can_buffer { + u8 enable; +} __packed; + +struct qti_can_buffer { + u8 can_if; + u32 mid; + u32 mask; +} __packed; + +struct can_fw_br_resp { + u8 maj; + u8 min; + u8 ver[32]; + u8 br_maj; + u8 br_min; + u8 curr_exec_mode; +} __packed; + +struct qti_can_ioctl_req { + u8 len; + u8 data[64]; +} __packed; + +static int qti_can_rx_message(struct qti_can *priv_data); + +static irqreturn_t qti_can_irq(int irq, void *priv) +{ + struct qti_can *priv_data = priv; + + LOGDI("%s\n", __func__); + qti_can_rx_message(priv_data); + return IRQ_HANDLED; +} + +static void qti_can_receive_frame(struct qti_can *priv_data, + struct can_receive_frame *frame) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *skt; + ktime_t nsec; + struct net_device *netdev; + int i; + struct device *dev; + + dev = &priv_data->spidev->dev; + if (frame->can_if >= priv_data->max_can_channels) { + LOGDE("qti_can rcv error. Channel is %d\n", frame->can_if); + return; + } + + netdev = priv_data->netdev[frame->can_if]; + skb = alloc_can_skb(netdev, &cf); + if (!skb) { + LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if); + return; + } + + LOGDI("rcv frame %d %d %x %d %x %x %x %x %x %x %x %x\n", + frame->can_if, frame->ts, frame->mid, frame->dlc, + frame->data[0], frame->data[1], frame->data[2], frame->data[3], + frame->data[4], frame->data[5], frame->data[6], frame->data[7]); + cf->can_id = le32_to_cpu(frame->mid); + cf->can_dlc = get_can_dlc(frame->dlc); + + for (i = 0; i < cf->can_dlc; i++) + cf->data[i] = frame->data[i]; + + nsec = ms_to_ktime(le32_to_cpu(frame->ts)); + skt = skb_hwtstamps(skb); + skt->hwtstamp = nsec; + LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp)); + skb->tstamp = nsec; + netif_rx(skb); + netdev->stats.rx_packets++; +} + +static void qti_can_receive_property(struct qti_can *priv_data, + struct vehicle_property *property) +{ + struct canfd_frame *cfd; + u8 *p; + struct sk_buff *skb; + struct skb_shared_hwtstamps *skt; + ktime_t nsec; + struct net_device *netdev; + struct device *dev; + int i; + + /* can0 as the channel with properties */ + dev = &priv_data->spidev->dev; + netdev = priv_data->netdev[0]; + skb = alloc_canfd_skb(netdev, &cfd); + if (!skb) { + LOGDE("skb alloc failed. frame->can_if %d\n", 0); + return; + } + + LOGDI("rcv property:0x%x data:%2x %2x %2x %2x", property->id, + property->str[0], property->str[1], + property->str[2], property->str[3]); + cfd->can_id = 0x00; + cfd->len = sizeof(struct vehicle_property); + + p = (u8 *)property; + for (i = 0; i < cfd->len; i++) + cfd->data[i] = p[i]; + + nsec = ns_to_ktime(le64_to_cpu(property->ts)); + skt = skb_hwtstamps(skb); + skt->hwtstamp = nsec; + LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp)); + skb->tstamp = nsec; + netif_rx(skb); + netdev->stats.rx_packets++; +} + +static int qti_can_process_response(struct qti_can *priv_data, + struct spi_miso *resp, int length) +{ + int ret = 0; + + LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq); + if (resp->cmd == CMD_CAN_RECEIVE_FRAME) { + struct can_receive_frame *frame = + (struct can_receive_frame *)&resp->data; + if (resp->len > length) { + /* Error. This should never happen */ + LOGDE("%s error: Saving %d bytes\n", __func__, length); + memcpy(priv_data->assembly_buffer, (char *)resp, + length); + priv_data->assembly_buffer_size = length; + } else { + qti_can_receive_frame(priv_data, frame); + } + } else if (resp->cmd == CMD_PROPERTY_READ) { + struct vehicle_property *property = + (struct vehicle_property *)&resp->data; + if (resp->len > length) { + /* Error. This should never happen */ + LOGDE("%s error: Saving %d bytes\n", __func__, length); + memcpy(priv_data->assembly_buffer, (char *)resp, + length); + priv_data->assembly_buffer_size = length; + } else { + qti_can_receive_property(priv_data, property); + } + } else if (resp->cmd == CMD_GET_FW_VERSION) { + struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data; + + dev_info(&priv_data->spidev->dev, "fw %d.%d", + fw_resp->maj, fw_resp->min); + dev_info(&priv_data->spidev->dev, "fw string %s", + fw_resp->ver); + } else if (resp->cmd == CMD_GET_FW_BR_VERSION) { + struct can_fw_br_resp *fw_resp = + (struct can_fw_br_resp *)resp->data; + dev_info(&priv_data->spidev->dev, "fw_can %d.%d", + fw_resp->maj, fw_resp->min); + dev_info(&priv_data->spidev->dev, "fw string %s", + fw_resp->ver); + dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d", + fw_resp->br_maj, fw_resp->br_min, + fw_resp->curr_exec_mode); + ret = fw_resp->curr_exec_mode << 28; + ret |= (fw_resp->br_maj & 0xF) << 24; + ret |= (fw_resp->br_min & 0xFF) << 16; + ret |= (fw_resp->maj & 0xF) << 8; + ret |= (fw_resp->min & 0xFF); + } + + if (resp->cmd == priv_data->wait_cmd) { + priv_data->cmd_result = ret; + complete(&priv_data->response_completion); + } + return ret; +} + +static int qti_can_process_rx(struct qti_can *priv_data, char *rx_buf) +{ + struct spi_miso *resp; + struct device *dev; + int length_processed = 0, actual_length = priv_data->xfer_length; + int ret = 0; + + dev = &priv_data->spidev->dev; + while (length_processed < actual_length) { + int length_left = actual_length - length_processed; + int length = 0; /* length of consumed chunk */ + void *data; + + if (priv_data->assembly_buffer_size > 0) { + LOGDI("callback: Reassembling %d bytes\n", + priv_data->assembly_buffer_size); + /* should copy just 1 byte instead, since cmd should */ + /* already been copied as being first byte */ + memcpy(priv_data->assembly_buffer + + priv_data->assembly_buffer_size, + rx_buf, 2); + data = priv_data->assembly_buffer; + resp = (struct spi_miso *)data; + length = resp->len + sizeof(*resp) + - priv_data->assembly_buffer_size; + if (length > 0) + memcpy(priv_data->assembly_buffer + + priv_data->assembly_buffer_size, + rx_buf, length); + length_left += priv_data->assembly_buffer_size; + priv_data->assembly_buffer_size = 0; + } else { + data = rx_buf + length_processed; + resp = (struct spi_miso *)data; + if (resp->cmd == 0x00 || resp->cmd == 0xFF) { + /* special case. ignore cmd==0x00, 0xFF */ + length_processed += 1; + continue; + } + length = resp->len + sizeof(struct spi_miso); + } + LOGDI("processing. p %d -> l %d (t %d)\n", + length_processed, length_left, priv_data->xfer_length); + length_processed += length; + if (length_left >= sizeof(*resp) && + resp->len + sizeof(*resp) <= length_left) { + struct spi_miso *resp = + (struct spi_miso *)data; + ret = qti_can_process_response(priv_data, resp, + length_left); + } else if (length_left > 0) { + /* Not full message. Store however much we have for */ + /* later assembly */ + LOGDI("callback: Storing %d bytes of response\n", + length_left); + memcpy(priv_data->assembly_buffer, data, length_left); + priv_data->assembly_buffer_size = length_left; + break; + } + } + return ret; +} + +static int qti_can_do_spi_transaction(struct qti_can *priv_data) +{ + struct spi_device *spi; + struct spi_transfer *xfer; + struct spi_message *msg; + struct device *dev; + int ret; + + spi = priv_data->spidev; + dev = &spi->dev; + msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL); + xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL); + if (!xfer || !msg) + return -ENOMEM; + LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0], + priv_data->tx_buf[1], priv_data->tx_buf[2]); + spi_message_init(msg); + spi_message_add_tail(xfer, msg); + xfer->tx_buf = priv_data->tx_buf; + xfer->rx_buf = priv_data->rx_buf; + xfer->len = priv_data->xfer_length; + xfer->bits_per_word = priv_data->bits_per_word; + ret = spi_sync(spi, msg); + LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret, + priv_data->rx_buf[0], priv_data->rx_buf[1], + priv_data->rx_buf[2], priv_data->rx_buf[3], + priv_data->rx_buf[4], priv_data->rx_buf[5], + priv_data->rx_buf[6], priv_data->rx_buf[7]); + + if (ret == 0) + qti_can_process_rx(priv_data, priv_data->rx_buf); + devm_kfree(&spi->dev, msg); + devm_kfree(&spi->dev, xfer); + return ret; +} + +static int qti_can_rx_message(struct qti_can *priv_data) +{ + char *tx_buf, *rx_buf; + int ret; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + +static int qti_can_query_firmware_version(struct qti_can *priv_data) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_GET_FW_VERSION; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + priv_data->wait_cmd = CMD_GET_FW_VERSION; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0) { + LOGDI("waiting for completion with timeout of %d jiffies", + msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS)); + wait_for_completion_interruptible_timeout( + &priv_data->response_completion, + msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS)); + LOGDI("done waiting"); + ret = priv_data->cmd_result; + } + + return ret; +} + +static int qti_can_set_bitrate(struct net_device *netdev) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + struct can_config_bit_timing *req_d; + struct qti_can *priv_data; + struct can_priv *priv = netdev_priv(netdev); + struct qti_can_netdev_privdata *qti_can_priv; + + qti_can_priv = netdev_priv(netdev); + priv_data = qti_can_priv->qti_can; + + netdev_info(netdev, "ch%i, bitrate setting>%i", + qti_can_priv->netdev_index, priv->bittiming.bitrate); + LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i", + priv->bittiming.sjw, priv->bittiming.brp, + priv->bittiming.phase_seg1, + priv->bittiming.phase_seg2, + priv->bittiming.sample_point, + priv->bittiming.tq, priv->bittiming.prop_seg); + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_CONFIG_BIT_TIMING; + req->len = sizeof(struct can_config_bit_timing); + req->seq = atomic_inc_return(&priv_data->msg_seq); + req_d = (struct can_config_bit_timing *)req->data; + req_d->can_if = qti_can_priv->netdev_index; + req_d->prop_seg = priv->bittiming.prop_seg; + req_d->phase_seg1 = priv->bittiming.phase_seg1; + req_d->phase_seg2 = priv->bittiming.phase_seg2; + req_d->sjw = priv->bittiming.sjw; + req_d->brp = priv->bittiming.brp; + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + +static int qti_can_write(struct qti_can *priv_data, + int can_channel, struct canfd_frame *cf) +{ + char *tx_buf, *rx_buf; + int ret, i; + struct spi_mosi *req; + struct can_write_req *req_d; + struct net_device *netdev; + + if (can_channel < 0 || can_channel >= priv_data->max_can_channels) { + LOGDE("%s error. Channel is %d\n", __func__, can_channel); + return -EINVAL; + } + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) { + req->cmd = CMD_CAN_SEND_FRAME; + req->len = sizeof(struct can_write_req) + 8; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + req_d = (struct can_write_req *)req->data; + req_d->can_if = can_channel; + req_d->mid = cf->can_id; + req_d->dlc = cf->len; + + for (i = 0; i < cf->len; i++) + req_d->data[i] = cf->data[i]; + } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES || + priv_data->driver_mode == DRIVER_MODE_AMB) { + req->cmd = CMD_PROPERTY_WRITE; + req->len = sizeof(struct vehicle_property); + req->seq = atomic_inc_return(&priv_data->msg_seq); + for (i = 0; i < cf->len; i++) + req->data[i] = cf->data[i]; + } else { + LOGDE("%s: wrong driver mode %i", + __func__, priv_data->driver_mode); + } + + ret = qti_can_do_spi_transaction(priv_data); + netdev = priv_data->netdev[can_channel]; + netdev->stats.tx_packets++; + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + +static int qti_can_netdev_open(struct net_device *netdev) +{ + int err; + + LOGNI("Open"); + err = open_candev(netdev); + if (err) + return err; + + netif_start_queue(netdev); + + return 0; +} + +static int qti_can_netdev_close(struct net_device *netdev) +{ + LOGNI("Close"); + + netif_stop_queue(netdev); + close_candev(netdev); + return 0; +} + +static void qti_can_send_can_frame(struct work_struct *ws) +{ + struct qti_can_tx_work *tx_work; + struct canfd_frame *cf; + struct qti_can *priv_data; + struct net_device *netdev; + struct qti_can_netdev_privdata *netdev_priv_data; + int can_channel; + + tx_work = container_of(ws, struct qti_can_tx_work, work); + netdev = tx_work->netdev; + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + can_channel = netdev_priv_data->netdev_index; + + LOGDI("send_can_frame ws %pK\n", ws); + LOGDI("send_can_frame tx %pK\n", tx_work); + + cf = (struct canfd_frame *)tx_work->skb->data; + qti_can_write(priv_data, can_channel, cf); + + dev_kfree_skb(tx_work->skb); + kfree(tx_work); +} + +static netdev_tx_t qti_can_netdev_start_xmit( + struct sk_buff *skb, struct net_device *netdev) +{ + struct qti_can_netdev_privdata *netdev_priv_data = netdev_priv(netdev); + struct qti_can *priv_data = netdev_priv_data->qti_can; + struct qti_can_tx_work *tx_work; + + LOGNI("netdev_start_xmit"); + if (can_dropped_invalid_skb(netdev, skb)) { + LOGNE("Dropping invalid can frame\n"); + return NETDEV_TX_OK; + } + tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC); + if (!tx_work) + return NETDEV_TX_OK; + INIT_WORK(&tx_work->work, qti_can_send_can_frame); + tx_work->netdev = netdev; + tx_work->skb = skb; + queue_work(priv_data->tx_wq, &tx_work->work); + + return NETDEV_TX_OK; +} + +static int qti_can_send_release_can_buffer_cmd(struct net_device *netdev) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + int *mode; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_RELEASE_BUFFER; + req->len = sizeof(int); + req->seq = atomic_inc_return(&priv_data->msg_seq); + mode = (int *)req->data; + *mode = priv_data->driver_mode; + + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + return ret; +} + +static int qti_can_data_buffering(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + char *tx_buf, *rx_buf; + int ret; + u32 timeout; + struct spi_mosi *req; + struct qti_can_buffer *enable_buffering; + struct qti_can_buffer *add_request; + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + struct spi_device *spi; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + spi = priv_data->spidev; + timeout = priv_data->can_fw_cmd_timeout_ms; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + if (!ifr) + return -EINVAL; + add_request = devm_kzalloc(&spi->dev, + sizeof(struct qti_can_buffer), + GFP_KERNEL); + if (!add_request) + return -ENOMEM; + + if (copy_from_user(add_request, ifr->ifr_data, + sizeof(struct qti_can_buffer))) { + devm_kfree(&spi->dev, add_request); + return -EFAULT; + } + + req = (struct spi_mosi *)tx_buf; + if (cmd == IOCTL_ENABLE_BUFFERING) + req->cmd = CMD_CAN_DATA_BUFF_ADD; + else + req->cmd = CMD_CAN_DATA_BUFF_REMOVE; + req->len = sizeof(struct qti_can_buffer); + req->seq = atomic_inc_return(&priv_data->msg_seq); + + enable_buffering = (struct qti_can_buffer *)req->data; + enable_buffering->can_if = add_request->can_if; + enable_buffering->mid = add_request->mid; + enable_buffering->mask = add_request->mask; + + if (priv_data->can_fw_cmd_timeout_req) { + priv_data->wait_cmd = req->cmd; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + } + + ret = qti_can_do_spi_transaction(priv_data); + devm_kfree(&spi->dev, add_request); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0 && priv_data->can_fw_cmd_timeout_req) { + LOGDI("%s ready to wait for response\n", __func__); + ret = wait_for_completion_interruptible_timeout( + &priv_data->response_completion, + msecs_to_jiffies(timeout)); + ret = priv_data->cmd_result; + } + return ret; +} + +static int qti_can_remove_all_buffering(struct net_device *netdev) +{ + char *tx_buf, *rx_buf; + int ret; + u32 timeout; + struct spi_mosi *req; + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + timeout = priv_data->rem_all_buffering_timeout_ms; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + if (priv_data->can_fw_cmd_timeout_req) { + priv_data->wait_cmd = req->cmd; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + } + + ret = qti_can_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0 && priv_data->can_fw_cmd_timeout_req) { + LOGDI("%s wait for response\n", __func__); + ret = wait_for_completion_interruptible_timeout( + &priv_data->response_completion, + msecs_to_jiffies(timeout)); + ret = priv_data->cmd_result; + } + + return ret; +} + +static int qti_can_frame_filter(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + struct can_filter_req *add_filter; + struct can_filter_req *filter_request; + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + struct spi_device *spi; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + spi = priv_data->spidev; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + if (!ifr) + return -EINVAL; + + filter_request = + devm_kzalloc(&spi->dev, sizeof(struct can_filter_req), + GFP_KERNEL); + if (!filter_request) + return -ENOMEM; + + if (copy_from_user(filter_request, ifr->ifr_data, + sizeof(struct can_filter_req))) { + devm_kfree(&spi->dev, filter_request); + return -EFAULT; + } + + req = (struct spi_mosi *)tx_buf; + if (cmd == IOCTL_ADD_FRAME_FILTER) + req->cmd = CMD_CAN_ADD_FILTER; + else + req->cmd = CMD_CAN_REMOVE_FILTER; + + req->len = sizeof(struct can_filter_req); + req->seq = atomic_inc_return(&priv_data->msg_seq); + + add_filter = (struct can_filter_req *)req->data; + add_filter->can_if = filter_request->can_if; + add_filter->mid = filter_request->mid; + add_filter->mask = filter_request->mask; + + ret = qti_can_do_spi_transaction(priv_data); + devm_kfree(&spi->dev, filter_request); + mutex_unlock(&priv_data->spi_lock); + return ret; +} + +static int qti_can_send_spi_locked(struct qti_can *priv_data, int cmd, int len, + u8 *data) +{ + char *tx_buf, *rx_buf; + struct spi_mosi *req; + int ret; + + LOGDI("%s\n", __func__); + + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = cmd; + req->len = len; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + if (unlikely(len > 64)) + return -EINVAL; + memcpy(req->data, data, len); + + ret = qti_can_do_spi_transaction(priv_data); + return ret; +} + +static int qti_can_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd) +{ + switch (ioctl_cmd) { + case IOCTL_GET_FW_BR_VERSION: + return CMD_GET_FW_BR_VERSION; + case IOCTL_BEGIN_FIRMWARE_UPGRADE: + return CMD_BEGIN_FIRMWARE_UPGRADE; + case IOCTL_FIRMWARE_UPGRADE_DATA: + return CMD_FIRMWARE_UPGRADE_DATA; + case IOCTL_END_FIRMWARE_UPGRADE: + return CMD_END_FIRMWARE_UPGRADE; + case IOCTL_BEGIN_BOOT_ROM_UPGRADE: + return CMD_BEGIN_BOOT_ROM_UPGRADE; + case IOCTL_BOOT_ROM_UPGRADE_DATA: + return CMD_BOOT_ROM_UPGRADE_DATA; + case IOCTL_END_BOOT_ROM_UPGRADE: + return CMD_END_BOOT_ROM_UPGRADE; + case IOCTL_END_FW_UPDATE_FILE: + return CMD_END_FW_UPDATE_FILE; + } + return -EINVAL; +} + +static int qti_can_do_blocking_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + int spi_cmd, ret; + + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + struct qti_can_ioctl_req *ioctl_data = NULL; + struct spi_device *spi; + int len = 0; + u8 *data = NULL; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + spi = priv_data->spidev; + + spi_cmd = qti_can_convert_ioctl_cmd_to_spi_cmd(cmd); + LOGDI("%s spi_cmd %x\n", __func__, spi_cmd); + if (spi_cmd < 0) { + LOGDE("%s wrong command %d\n", __func__, cmd); + return spi_cmd; + } + + if (!ifr) + return -EINVAL; + + mutex_lock(&priv_data->spi_lock); + if (spi_cmd == CMD_FIRMWARE_UPGRADE_DATA || + spi_cmd == CMD_BOOT_ROM_UPGRADE_DATA) { + ioctl_data = + devm_kzalloc(&spi->dev, + sizeof(struct qti_can_ioctl_req), + GFP_KERNEL); + if (!ioctl_data) + return -ENOMEM; + + if (copy_from_user(ioctl_data, ifr->ifr_data, + sizeof(struct qti_can_ioctl_req))) { + devm_kfree(&spi->dev, ioctl_data); + return -EFAULT; + } + + /* Regular NULL check will fail here as ioctl_data is at + * some offset + */ + if ((void *)ioctl_data > (void *)0x100) { + len = ioctl_data->len; + data = ioctl_data->data; + } + } + LOGDI("%s len %d\n", __func__, len); + + priv_data->wait_cmd = spi_cmd; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + + ret = qti_can_send_spi_locked(priv_data, spi_cmd, len, data); + if (ioctl_data) + devm_kfree(&spi->dev, ioctl_data); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0) { + LOGDI("%s ready to wait for response\n", __func__); + wait_for_completion_interruptible_timeout( + &priv_data->response_completion, + 5 * HZ); + ret = priv_data->cmd_result; + } + return ret; +} + +static int qti_can_netdev_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct qti_can *priv_data; + struct qti_can_netdev_privdata *netdev_priv_data; + int *mode; + int ret = -EINVAL; + struct spi_device *spi; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->qti_can; + spi = priv_data->spidev; + LOGDI("%s %x\n", __func__, cmd); + + switch (cmd) { + case IOCTL_RELEASE_CAN_BUFFER: + if (!ifr) + return -EINVAL; + + /* Regular NULL check will fail here as ioctl_data is at + * some offset + */ + if (ifr->ifr_data > (void __user *)IFR_DATA_OFFSET) { + mutex_lock(&priv_data->spi_lock); + mode = devm_kzalloc(&spi->dev, sizeof(int), GFP_KERNEL); + if (!mode) + return -ENOMEM; + if (copy_from_user(mode, ifr->ifr_data, sizeof(int))) { + devm_kfree(&spi->dev, mode); + return -EFAULT; + } + priv_data->driver_mode = *mode; + LOGDE("qti_can_driver_mode %d\n", + priv_data->driver_mode); + devm_kfree(&spi->dev, mode); + mutex_unlock(&priv_data->spi_lock); + } + qti_can_send_release_can_buffer_cmd(netdev); + ret = 0; + break; + case IOCTL_ENABLE_BUFFERING: + case IOCTL_DISABLE_BUFFERING: + qti_can_data_buffering(netdev, ifr, cmd); + ret = 0; + break; + case IOCTL_DISABLE_ALL_BUFFERING: + qti_can_remove_all_buffering(netdev); + ret = 0; + break; + case IOCTL_ADD_FRAME_FILTER: + case IOCTL_REMOVE_FRAME_FILTER: + qti_can_frame_filter(netdev, ifr, cmd); + ret = 0; + break; + case IOCTL_GET_FW_BR_VERSION: + case IOCTL_BEGIN_FIRMWARE_UPGRADE: + case IOCTL_FIRMWARE_UPGRADE_DATA: + case IOCTL_END_FIRMWARE_UPGRADE: + case IOCTL_BEGIN_BOOT_ROM_UPGRADE: + case IOCTL_BOOT_ROM_UPGRADE_DATA: + case IOCTL_END_BOOT_ROM_UPGRADE: + case IOCTL_END_FW_UPDATE_FILE: + ret = qti_can_do_blocking_ioctl(netdev, ifr, cmd); + break; + } + LOGDI("%s ret %d\n", __func__, ret); + + return ret; +} + +static const struct net_device_ops qti_can_netdev_ops = { + .ndo_open = qti_can_netdev_open, + .ndo_stop = qti_can_netdev_close, + .ndo_start_xmit = qti_can_netdev_start_xmit, + .ndo_do_ioctl = qti_can_netdev_do_ioctl, +}; + +static int qti_can_create_netdev(struct spi_device *spi, + struct qti_can *priv_data, int index) +{ + struct net_device *netdev; + struct qti_can_netdev_privdata *netdev_priv_data; + + LOGDI("%s %d\n", __func__, index); + if (index < 0 || index >= priv_data->max_can_channels) { + LOGDE("%s wrong index %d\n", __func__, index); + return -EINVAL; + } + netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS); + if (!netdev) { + LOGDE("Couldn't alloc candev\n"); + return -ENOMEM; + } + + netdev->mtu = CANFD_MTU; + + netdev_priv_data = netdev_priv(netdev); + netdev_priv_data->qti_can = priv_data; + netdev_priv_data->netdev_index = index; + + priv_data->netdev[index] = netdev; + + netdev->netdev_ops = &qti_can_netdev_ops; + SET_NETDEV_DEV(netdev, &spi->dev); + netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_LISTENONLY; + if (priv_data->support_can_fd) + netdev_priv_data->can.ctrlmode_supported |= CAN_CTRLMODE_FD; + netdev_priv_data->can.bittiming_const = &qti_can_bittiming_const; + netdev_priv_data->can.data_bittiming_const = + &qti_can_data_bittiming_const; + netdev_priv_data->can.clock.freq = priv_data->clk_freq_mhz; + netdev_priv_data->can.do_set_bittiming = qti_can_set_bitrate; + + return 0; +} + +static struct qti_can *qti_can_create_priv_data(struct spi_device *spi) +{ + struct qti_can *priv_data; + int err; + struct device *dev; + + dev = &spi->dev; + priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL); + if (!priv_data) { + err = -ENOMEM; + return NULL; + } + spi_set_drvdata(spi, priv_data); + atomic_set(&priv_data->netif_queue_stop, 0); + priv_data->spidev = spi; + priv_data->assembly_buffer = devm_kzalloc(dev, + RX_ASSEMBLY_BUFFER_SIZE, + GFP_KERNEL); + if (!priv_data->assembly_buffer) { + err = -ENOMEM; + goto cleanup_privdata; + } + + priv_data->tx_wq = alloc_workqueue("qti_can_tx_wq", 0, 0); + if (!priv_data->tx_wq) { + LOGDE("Couldn't alloc workqueue\n"); + err = -ENOMEM; + goto cleanup_privdata; + } + + priv_data->tx_buf = devm_kzalloc(dev, + XFER_BUFFER_SIZE, + GFP_KERNEL); + priv_data->rx_buf = devm_kzalloc(dev, + XFER_BUFFER_SIZE, + GFP_KERNEL); + if (!priv_data->tx_buf || !priv_data->rx_buf) { + LOGDE("Couldn't alloc tx or rx buffers\n"); + err = -ENOMEM; + goto cleanup_privdata; + } + priv_data->xfer_length = 0; + priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES; + + mutex_init(&priv_data->spi_lock); + atomic_set(&priv_data->msg_seq, 0); + init_completion(&priv_data->response_completion); + return priv_data; + +cleanup_privdata: + if (priv_data) { + if (priv_data->tx_wq) + destroy_workqueue(priv_data->tx_wq); + devm_kfree(dev, priv_data->rx_buf); + devm_kfree(dev, priv_data->tx_buf); + devm_kfree(dev, priv_data->assembly_buffer); + devm_kfree(dev, priv_data); + } + return NULL; +} + +static const struct of_device_id qti_can_match_table[] = { + { .compatible = "qcom,renesas,rh850" }, + { .compatible = "qcom,nxp,mpc5746c" }, + { } +}; + +static int qti_can_probe(struct spi_device *spi) +{ + int err, retry = 0, query_err = -1, i; + struct qti_can *priv_data = NULL; + struct device *dev; + + dev = &spi->dev; + dev_info(dev, "%s", __func__); + + err = spi_setup(spi); + if (err) { + dev_err(dev, "spi_setup failed: %d", err); + return err; + } + + priv_data = qti_can_create_priv_data(spi); + if (!priv_data) { + dev_err(dev, "Failed to create qti_can priv_data\n"); + err = -ENOMEM; + return err; + } + + err = of_property_read_u32(spi->dev.of_node, "qcom,clk-freq-mhz", + &priv_data->clk_freq_mhz); + if (err) { + LOGDE("DT property: qcom,clk-freq-hz not defined\n"); + return err; + } + + err = of_property_read_u32(spi->dev.of_node, "qcom,max-can-channels", + &priv_data->max_can_channels); + if (err) { + LOGDE("DT property: qcom,max-can-channels not defined\n"); + return err; + } + + err = of_property_read_u32(spi->dev.of_node, "qcom,bits-per-word", + &priv_data->bits_per_word); + if (err) + priv_data->bits_per_word = 16; + + err = of_property_read_u32(spi->dev.of_node, "qcom,reset-delay-msec", + &priv_data->reset_delay_msec); + if (err) + priv_data->reset_delay_msec = 1; + + priv_data->can_fw_cmd_timeout_req = + of_property_read_bool(spi->dev.of_node, + "qcom,can-fw-cmd-timeout-req"); + + err = of_property_read_u32(spi->dev.of_node, + "qcom,can-fw-cmd-timeout-ms", + &priv_data->can_fw_cmd_timeout_ms); + if (err) + priv_data->can_fw_cmd_timeout_ms = 0; + + err = of_property_read_u32(spi->dev.of_node, + "qcom,rem-all-buffering-timeout-ms", + &priv_data->rem_all_buffering_timeout_ms); + if (err) + priv_data->rem_all_buffering_timeout_ms = 0; + + priv_data->reset = of_get_named_gpio(spi->dev.of_node, + "qcom,reset-gpio", 0); + + if (gpio_is_valid(priv_data->reset)) { + err = gpio_request(priv_data->reset, "qti-can-reset"); + if (err < 0) { + LOGDE("failed to request gpio %d: %d\n", + priv_data->reset, err); + return err; + } + + gpio_direction_output(priv_data->reset, 0); + /* delay to generate non-zero reset pulse width */ + udelay(1); + gpio_direction_output(priv_data->reset, 1); + /* wait for controller to come up after reset */ + msleep(priv_data->reset_delay_msec); + } + + priv_data->support_can_fd = of_property_read_bool(spi->dev.of_node, + "support-can-fd"); + + if (of_device_is_compatible(spi->dev.of_node, "qcom,nxp,mpc5746c")) + qti_can_bittiming_const = flexcan_bittiming_const; + else if (of_device_is_compatible(spi->dev.of_node, + "qcom,renesas,rh850")) + qti_can_bittiming_const = rh850_bittiming_const; + + priv_data->netdev = devm_kcalloc(dev, + priv_data->max_can_channels, + sizeof(priv_data->netdev[0]), + GFP_KERNEL); + if (!priv_data->netdev) { + err = -ENOMEM; + return err; + } + + for (i = 0; i < priv_data->max_can_channels; i++) { + err = qti_can_create_netdev(spi, priv_data, i); + if (err) { + LOGDE("Failed to create CAN device: %d", err); + goto cleanup_candev; + } + + err = register_candev(priv_data->netdev[i]); + if (err) { + LOGDE("Failed to register CAN device: %d", err); + goto unregister_candev; + } + } + + err = request_threaded_irq(spi->irq, NULL, qti_can_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "qti-can", priv_data); + if (err) { + LOGDE("Failed to request irq: %d", err); + goto unregister_candev; + } + dev_info(dev, "Request irq %d ret %d\n", spi->irq, err); + + while ((query_err != 0) && (retry < QTI_CAN_FW_QUERY_RETRY_COUNT)) { + LOGDI("Trying to query fw version %d", retry); + query_err = qti_can_query_firmware_version(priv_data); + priv_data->assembly_buffer_size = 0; + retry++; + } + + if (query_err) { + LOGDE("QTI CAN probe failed\n"); + err = -ENODEV; + goto free_irq; + } + return 0; + +free_irq: + free_irq(spi->irq, priv_data); +unregister_candev: + for (i = 0; i < priv_data->max_can_channels; i++) + unregister_candev(priv_data->netdev[i]); +cleanup_candev: + if (priv_data) { + for (i = 0; i < priv_data->max_can_channels; i++) { + if (priv_data->netdev[i]) + free_candev(priv_data->netdev[i]); + } + if (priv_data->tx_wq) + destroy_workqueue(priv_data->tx_wq); + devm_kfree(dev, priv_data->rx_buf); + devm_kfree(dev, priv_data->tx_buf); + devm_kfree(dev, priv_data->assembly_buffer); + devm_kfree(dev, priv_data->netdev); + devm_kfree(dev, priv_data); + } + return err; +} + +static int qti_can_remove(struct spi_device *spi) +{ + struct qti_can *priv_data = spi_get_drvdata(spi); + int i; + + LOGDI("%s\n", __func__); + for (i = 0; i < priv_data->max_can_channels; i++) { + unregister_candev(priv_data->netdev[i]); + free_candev(priv_data->netdev[i]); + } + destroy_workqueue(priv_data->tx_wq); + kfree(priv_data->rx_buf); + kfree(priv_data->tx_buf); + kfree(priv_data->assembly_buffer); + kfree(priv_data->netdev); + kfree(priv_data); + return 0; +} + +#ifdef CONFIG_PM +static int qti_can_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + enable_irq_wake(spi->irq); + return 0; +} + +static int qti_can_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct qti_can *priv_data = spi_get_drvdata(spi); + + disable_irq_wake(spi->irq); + qti_can_rx_message(priv_data); + return 0; +} + +static const struct dev_pm_ops qti_can_dev_pm_ops = { + .suspend = qti_can_suspend, + .resume = qti_can_resume, +}; +#endif + +static struct spi_driver qti_can_driver = { + .driver = { + .name = "qti-can", + .of_match_table = qti_can_match_table, + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &qti_can_dev_pm_ops, +#endif + }, + .probe = qti_can_probe, + .remove = qti_can_remove, +}; +module_spi_driver(qti_can_driver); + +MODULE_DESCRIPTION("QTI CAN controller module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d040aeb45172662320bf63c75b094cde35b9b294..15c2a831edf192b2678901c9a4c6fce7e9df62cd 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o -obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o +obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o +ifdef CONFIG_NET_DSA_LOOP +obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o +endif obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c142b97add2cd3d3f937b81e5749b4258e66bc80..3b073e15223731c82c4a4a31d9638a1e5c86342e 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1122,6 +1122,7 @@ static const struct of_device_id mt7530_of_match[] = { { .compatible = "mediatek,mt7530" }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, mt7530_of_match); static struct mdio_driver mt7530_mdio_driver = { .probe = mt7530_probe, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 48d672b204a4847bc113635ed9534d8a3fd53066..a4080f18135cd5819622db248125be7cca4d5175 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -532,7 +532,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, int i; for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { - int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); + unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; slot = &ring->slots[i]; dev_kfree_skb(slot->skb); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4040d846da8e9028d640b75520f821878c6eba91..40d02fec27472c94a7603bc3ada27463b6d5d744 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -479,9 +479,9 @@ struct bgmac_rx_header { struct bgmac { union { struct { - void *base; - void *idm_base; - void *nicpm_base; + void __iomem *base; + void __iomem *idm_base; + void __iomem *nicpm_base; } plat; struct { struct bcma_device *core; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 807cf75f0a98d56093c7a2c020900f37b3efb9ed..bfd2d0382f4cfca593e4de42b9852b278978d8c3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3808,6 +3808,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_tpa_cfg_input req = {0}; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); if (tpa_flags) { diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 48738eb27806910b36a75820ff9c081dbec0a613..9a8ef630466f2f12d1053ed71f0f4b264ab08260 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8723,14 +8723,15 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ - tg3_full_lock(tp, 0); + /* tp->hw_stats can be referenced safely: + * 1. under rtnl_lock + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. + */ if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - tg3_full_unlock(tp); } /* @@ -14167,7 +14168,7 @@ static void tg3_get_stats64(struct net_device *dev, struct tg3 *tp = netdev_priv(dev); spin_lock_bh(&tp->lock); - if (!tp->hw_stats) { + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 92d9d795d8747ced447d8597733adcaa9b4fc84c..44a0d04dd8a033e05507d40569a0e9b2642ee9a9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -815,8 +815,6 @@ static int setup_fw_sge_queues(struct adapter *adap) err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], adap->msi_idx, NULL, fwevtq_handler, NULL, -1); - if (err) - t4_free_sge_resources(adap); return err; } @@ -4679,7 +4677,6 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->needs_free_netdev = true; } static int config_mgmt_dev(struct pci_dev *pdev) @@ -5117,6 +5114,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_dev; + err = setup_fw_sge_queues(adapter); + if (err) { + dev_err(adapter->pdev_dev, + "FW sge queue allocation failed, err %d", err); + goto out_free_dev; + } + /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only @@ -5165,7 +5169,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cxgb4_ptp_init(adapter); print_adapter_info(adapter); - setup_fw_sge_queues(adapter); return 0; sriov: @@ -5221,6 +5224,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif out_free_dev: + t4_free_sge_resources(adapter); free_some_resources(adapter); if (adapter->flags & USING_MSIX) free_msix_info(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 71a315bc14097908aba2f8f7437759a7445778cb..99a9d52783697d7eaee1b5dae7789e55ef6f8a58 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + adap->sge.uld_rxq_info[uld_type] = NULL; kfree(rxq_info->rspq_id); kfree(rxq_info->uldrxq); kfree(rxq_info); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d24ee1ad3be1dadb36022b721a81effeb7cf6bb4..aef40f02c77fe12885db1fb42dde28f3e990de65 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1897,6 +1897,8 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { + /* enable rq before updating rq desc */ + vnic_rq_enable(&enic->rq[i]); vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { @@ -1908,8 +1910,6 @@ static int enic_open(struct net_device *netdev) for (i = 0; i < enic->wq_count; i++) vnic_wq_enable(&enic->wq[i]); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_enable(&enic->rq[i]); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); @@ -1935,8 +1935,12 @@ static int enic_open(struct net_device *netdev) return 0; err_out_free_rq: - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + } enic_dev_notify_unset(enic); err_out_free_intr: enic_unset_affinity_hint(enic); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 4f6e9d3470d53c8dab521c3a65c04ea40a265ece..5b4f05805006732de05d6ba770f0da2aa078334d 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1930,8 +1930,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto csum_failed; } + /* SGT[0] is used by the linear part */ sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); - qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); + frag_len = skb_headlen(skb); + qm_sg_entry_set_len(&sgt[0], frag_len); sgt[0].bpid = FSL_DPAA_BPID_INV; sgt[0].offset = 0; addr = dma_map_single(dev, skb->data, @@ -1944,9 +1946,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, qm_sg_entry_set64(&sgt[0], addr); /* populate the rest of SGT entries */ - frag = &skb_shinfo(skb)->frags[0]; - frag_len = frag->size; - for (i = 1; i <= nr_frags; i++, frag++) { + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + frag_len = frag->size; WARN_ON(!skb_frag_page(frag)); addr = skb_frag_dma_map(dev, frag, 0, frag_len, dma_dir); @@ -1956,15 +1958,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, goto sg_map_failed; } - qm_sg_entry_set_len(&sgt[i], frag_len); - sgt[i].bpid = FSL_DPAA_BPID_INV; - sgt[i].offset = 0; + qm_sg_entry_set_len(&sgt[i + 1], frag_len); + sgt[i + 1].bpid = FSL_DPAA_BPID_INV; + sgt[i + 1].offset = 0; /* keep the offset in the address */ - qm_sg_entry_set64(&sgt[i], addr); - frag_len = frag->size; + qm_sg_entry_set64(&sgt[i + 1], addr); } - qm_sg_entry_set_f(&sgt[i - 1], frag_len); + + /* Set the final bit in the last used entry of the SGT */ + qm_sg_entry_set_f(&sgt[nr_frags], frag_len); qm_fd_set_sg(fd, priv->tx_headroom, skb->len); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index faea674094b96743168f1c8edc6e875d0467f59c..85306d1b2acf5b3dbfb64a6d5b2dae46818accec 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, if (epause->rx_pause) newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; if (epause->tx_pause) - newadv |= ADVERTISED_Asym_Pause; + newadv ^= ADVERTISED_Asym_Pause; oldadv = phydev->advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause); diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index ea43b497414986c55d07ce9b175082202f951044..7af31ddd093f8520a276a092ffd3481528177999 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) set_bucket(dtsec->regs, bucket, true); /* Create element to be added to the driver hash table */ - hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); + hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); if (!hash_entry) return -ENOMEM; hash_entry->addr = addr; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3bdeb295514bde273404c516d07b6c0c024aa025..63daae120b2d45baa03765689b57ad63bfe329cb 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3072,9 +3072,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); - /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, ndev); - /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. @@ -3145,13 +3142,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) continue; } + gfar_process_frame(ndev, skb); + /* Increment the number of packets */ total_pkts++; total_bytes += skb->len; skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); /* Send the packet up the stack */ napi_gro_receive(&rx_queue->grp->napi_rx, skb); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3ae02b0620bc9656131fd2ba17ba2322a2fe7e6a..98493be7b4afedf1ad367ba6b21155ddcb25478b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -933,6 +933,35 @@ static int ibmvnic_open(struct net_device *netdev) return rc; } +static void clean_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + u64 rx_entries; + int rx_scrqs; + int i, j; + + if (!adapter->rx_pool) + return; + + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_entries = adapter->req_rx_add_entries_per_subcrq; + + /* Free any remaining skbs in the rx buffer pools */ + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + if (!rx_pool) + continue; + + netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); + for (j = 0; j < rx_entries; j++) { + if (rx_pool->rx_buff[j].skb) { + dev_kfree_skb_any(rx_pool->rx_buff[j].skb); + rx_pool->rx_buff[j].skb = NULL; + } + } + } +} + static void clean_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -1010,7 +1039,7 @@ static int __ibmvnic_close(struct net_device *netdev) } } } - + clean_rx_pools(adapter); clean_tx_pools(adapter); adapter->state = VNIC_CLOSED; return rc; @@ -1460,8 +1489,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } - netif_carrier_on(netdev); - /* kick napi */ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); @@ -1469,6 +1496,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (adapter->reset_reason != VNIC_RESET_FAILOVER) netdev_notify_peers(netdev); + netif_carrier_on(netdev); + return 0; } @@ -1636,6 +1665,12 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; + dev_kfree_skb_any(rx_buff->skb); + remove_buff_from_pool(adapter, rx_buff); + continue; + } else if (!rx_buff->skb) { + /* free the entry */ + next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); continue; } @@ -1927,6 +1962,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, } memset(scrq->msgs, 0, 4 * PAGE_SIZE); + atomic_set(&scrq->used, 0); scrq->cur = 0; rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 31277d3bb7dc1241032695d2d9424779654f4f5f..ff308b05d68cc91efa7b195db9952d368553557f 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1602,7 +1602,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * we have already determined whether we have link or not. */ if (!mac->autoneg) - return -E1000_ERR_CONFIG; + return 1; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index f457c5703d0c45d4c9f661395acca1a9814de686..db735644b31214e8c35216d35642531c713abbb1 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -450,7 +450,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * we have already determined whether we have link or not. */ if (!mac->autoneg) - return -E1000_ERR_CONFIG; + return 1; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 991c2a0dd67e073c5521544baca9537560564c47..7a226537877b29a877c86ebbb6a4d4699a44db2c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2329,8 +2329,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; - ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, - GFP_KERNEL); + ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); if (!ring->desc) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d36b799116e4839ac62a3d2a4346510078773a2a..04dbf64fb1cb8b396a39ffc3c451f2dfccaa8cdc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7196,6 +7196,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } i40e_get_oem_version(&pf->hw); + if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && + ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || + hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { + /* The following delay is necessary for 4.33 firmware and older + * to recover after EMP reset. 200 ms should suffice but we + * put here 300 ms to be sure that FW is ready to operate + * after reset. + */ + mdelay(300); + } + /* re-verify the eeprom if we just had an EMP reset */ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9e30cfeac04b17f3a7306c615977dd6bb1951580..20a8018d41ef6b9875ca1003b6cc46e5dfecc7ee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7658,7 +7658,8 @@ static void ixgbe_service_task(struct work_struct *work) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); - ixgbe_ptp_rx_hang(adapter); + if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER) + ixgbe_ptp_rx_hang(adapter); ixgbe_ptp_tx_hang(adapter); } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a539263cd79ce4be8fcc0cbfe6bfdd196336cd38..d28f873169a9041129853cdd1e82f0f64878a669 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1112,6 +1112,7 @@ static void mvneta_port_up(struct mvneta_port *pp) } mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + q_map = 0; /* Enable all initialized RXQs. */ for (queue = 0; queue < rxq_number; queue++) { struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 67f74fcb265e77ea800d2e24aa27b0d3f8aa5322..5fe56dc4cfae079e1914b7fa9c943b2b0ac422f4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev, if (!coal->tx_max_coalesced_frames_irq) return -EINVAL; + if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME || + coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) { + netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n", + __func__, MLX4_EN_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS || + coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) { + netdev_info(dev, "%s: maximum coalesced frames supported is %d\n", + __func__, MLX4_EN_MAX_COAL_PKTS); + return -ERANGE; + } + priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? MLX4_EN_RX_COAL_TARGET : diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c097eef41a9c82dd19a493848f2ee5f1061c2080..faa4bd21f148a3fed25dd42fa350fdbf1aac414a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3318,12 +3318,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_ring[t]) { err = -ENOMEM; - goto err_free_tx; + goto out; } priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, GFP_KERNEL); if (!priv->tx_cq[t]) { - kfree(priv->tx_ring[t]); err = -ENOMEM; goto out; } @@ -3576,11 +3575,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, return 0; -err_free_tx: - while (t--) { - kfree(priv->tx_ring[t]); - kfree(priv->tx_cq[t]); - } out: mlx4_en_destroy_netdev(dev); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e61c99ef741d94bc7101c917f0a2cd0e520bfd19..c273a3ebb8e8e65464670931d79b4dbb8b7e5289 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3007,6 +3007,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create file for port %d\n", port); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); @@ -3028,9 +3029,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) &info->port_attr); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } - return err; + return 0; } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2c1a5ff6acfaf1c0f8b6b521c0c2da2220fdecb9..09f4764a3f3922960745ac00ae43972871707c89 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -131,6 +131,9 @@ #define MLX4_EN_TX_COAL_PKTS 16 #define MLX4_EN_TX_COAL_TIME 0x10 +#define MLX4_EN_MAX_COAL_PKTS U16_MAX +#define MLX4_EN_MAX_COAL_TIME U16_MAX + #define MLX4_EN_RX_RATE_LOW 400000 #define MLX4_EN_RX_COAL_TIME_LOW 0 #define MLX4_EN_RX_RATE_HIGH 450000 @@ -547,8 +550,8 @@ struct mlx4_en_priv { u16 rx_usecs_low; u32 pkt_rate_high; u16 rx_usecs_high; - u16 sample_interval; - u16 adaptive_rx_coal; + u32 sample_interval; + u32 adaptive_rx_coal; u32 msg_enable; u32 loopback_ok; u32 validate_loopback; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index fdaef00465d77818bb1fe74786c404ef360686d7..576b61c119bb9e61c307d8d2a2da6985a5fdef3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -46,7 +46,7 @@ config MLX5_MPFS config MLX5_ESWITCH bool "Mellanox Technologies MLX5 SRIOV E-Switch support" - depends on MLX5_CORE_EN + depends on MLX5_CORE_EN && NET_SWITCHDEV default y ---help--- Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e9a1fbcc4adfa6e692902b551d0c535bfe019a9a..3efe45bc247127bfbf15567c5204c91d1c396ea3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->checksum_disabled = 1; cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; if (cmd->cmdif_rev > CMD_IF_REV) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 225b2ad3e15f47822b51e3c36542649d3f1f3181..337ce94237946b7bc34d51896585ac2cc15e386d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4022,7 +4022,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@ -4126,7 +4126,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) +#if IS_ENABLED(CONFIG_MLX5_ESWITCH) if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 5ffd1db4e797693b57aff20ca7c374f14d9a615b..4727e7390834f1e3cc77dcfd821867792df69159 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -825,9 +825,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; -#ifdef CONFIG_NET_SWITCHDEV netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; -#endif netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 42bab73a9f408b82b7d4bef1d6dde7c627180927..e28f9dab9ceb30977ed98c4da3bed7061d9092af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -780,6 +780,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, f->mask); addr_type = key->addr_type; + /* the HW doesn't support frag first/later */ + if (mask->flags & FLOW_DIS_FIRST_FRAG) + return -EOPNOTSUPP; + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, @@ -1383,7 +1387,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, } ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { + if (modify_ip_header && ip_proto != IPPROTO_TCP && + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } @@ -2013,7 +2018,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) + if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || + tcf_vlan_push_prio(a)) return -EOPNOTSUPP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index eea7f931cad373f90b2402ade9ce7678d7402920..d560047c0bf9eacbeb48200dfe7574e2d44cb12d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -234,7 +234,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -252,7 +252,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) - return -ENOMEM; + goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; @@ -264,6 +264,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, } return num_dma; + +dma_unmap_wqe_err: + mlx5e_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; } static inline void @@ -355,17 +359,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -594,17 +596,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, (struct mlx5_wqe_data_seg *)cseg + ds_cnt); if (unlikely(num_dma < 0)) - goto dma_unmap_wqe_err; + goto err_drop; mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, num_bytes, num_dma, wi, cseg); return NETDEV_TX_OK; -dma_unmap_wqe_err: +err_drop: sq->stats.dropped++; - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c77f4c0c776931d11ab93b53229b8a96fb4bc309..82e37250ed01c4c65a4e6ed5a4486027bc613137 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2054,26 +2054,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, memset(vf_stats, 0, sizeof(*vf_stats)); vf_stats->rx_packets = MLX5_GET_CTR(out, received_eth_unicast.packets) + + MLX5_GET_CTR(out, received_ib_unicast.packets) + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets) + MLX5_GET_CTR(out, received_eth_broadcast.packets); vf_stats->rx_bytes = MLX5_GET_CTR(out, received_eth_unicast.octets) + + MLX5_GET_CTR(out, received_ib_unicast.octets) + MLX5_GET_CTR(out, received_eth_multicast.octets) + + MLX5_GET_CTR(out, received_ib_multicast.octets) + MLX5_GET_CTR(out, received_eth_broadcast.octets); vf_stats->tx_packets = MLX5_GET_CTR(out, transmitted_eth_unicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.packets) + MLX5_GET_CTR(out, transmitted_eth_multicast.packets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.packets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); vf_stats->tx_bytes = MLX5_GET_CTR(out, transmitted_eth_unicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_unicast.octets) + MLX5_GET_CTR(out, transmitted_eth_multicast.octets) + + MLX5_GET_CTR(out, transmitted_ib_multicast.octets) + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); vf_stats->multicast = - MLX5_GET_CTR(out, received_eth_multicast.packets); + MLX5_GET_CTR(out, received_eth_multicast.packets) + + MLX5_GET_CTR(out, received_ib_multicast.packets); vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5a7bea688ec873066a74a607ecaddcba7ab3b959..33e5ff081e36ee14e58558bbd5e4e18ae1e4ea8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -174,6 +174,7 @@ static void del_flow_group(struct fs_node *node); static void del_fte(struct fs_node *node); static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2); +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); static struct mlx5_flow_rule * find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); @@ -2041,23 +2042,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) static int init_root_ns(struct mlx5_flow_steering *steering) { + int err; + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); if (!steering->root_ns) - goto cleanup; + return -ENOMEM; - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) - goto cleanup; + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); + if (err) + goto out_err; set_prio_attrs(steering->root_ns); - - if (create_anchor_flow_table(steering)) - goto cleanup; + err = create_anchor_flow_table(steering); + if (err) + goto out_err; return 0; -cleanup: - mlx5_cleanup_fs(steering->dev); - return -ENOMEM; +out_err: + cleanup_root_ns(steering->root_ns); + steering->root_ns = NULL; + return err; } static void clean_tree(struct fs_node *node) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a550cc2997183d980b8a4300f637baa..122506daa586070321c079d53c9d9ff6a9037c45 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), - MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), - MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), - MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), - MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), - MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), + MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), + MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), + MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), }; -#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 +#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 struct mlxsw_afk_element_inst { /* element instance in actual block */ const struct mlxsw_afk_element_info *info; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 99bd6e88ebc7e261933c512a723bf9f7a6d3cecd..8b48338b4a70e6e13eba24ea5875276a39754983 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1417,6 +1417,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; + mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1444,8 +1445,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) + if (mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan->ref_count++; return mlxsw_sp_port_vlan; + } return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); } @@ -1454,6 +1457,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + if (--mlxsw_sp_port_vlan->ref_count != 0) + return; + if (mlxsw_sp_port_vlan->bridge_port) mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); else if (fid) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 88892d47acaebf4df7a2b16c895ea604b8ef7ec3..8c4ce0a0cc825e93999f5130ede9f136052da35c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -194,6 +194,7 @@ struct mlxsw_sp_port_vlan { struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; + unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05488b3bcdfc133cb08d69bcec7c3b..54262af4e98f713b7533cb758c2dd6eda181eabc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index af106be8cc080bcd57986e4bbbb14bc2c09cc547..629bfa0cd3f03d6627c8c4aa10ef836bc3e98671 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2471,7 +2471,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) if (rc) return rc; - /* Free Task CXT */ + /* Free Task CXT ( Intentionally RoCE as task-id is shared between + * RoCE and iWARP ) + */ + proto = PROTOCOLID_ROCE; rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, qed_cxt_get_proto_tid_count(p_hwfn, proto)); if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 6fb99518a61fdef22b42b61ad1b93946257beec4..1b65548661384e1500cbb4445f2e1b0acd131d6d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -360,6 +360,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); qed_rdma_resc_free(p_hwfn); + qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); } static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid) diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 6fc854b120b03520783e16e750d30466fddbdffa..d50cc26354776267882db47d1adfa7c78011f31e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -320,13 +320,11 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. + /* Fence required to flush the write combined buffer, since another + * CPU may write to the same doorbell address and data may be lost + * due to relaxed order nature of write combined bar. */ - mmiowb(); + wmb(); } static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, @@ -1247,16 +1245,10 @@ static int qede_rx_process_cqe(struct qede_dev *edev, csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { - if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) { + if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) rxq->rx_ip_frags++; - } else { - DP_NOTICE(edev, - "CQE has error, flags = %x, dropping incoming packet\n", - parse_flag); + else rxq->rx_hw_errors++; - qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); - return 0; - } } /* Basic validation passed; Need to prepare an SKB. This would also diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 3ed9033e56dbe9231583b8063128eb775abc9671..44f797ab5d15099e0e66fa813ae362926c649f23 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1204,9 +1204,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) while (tx_q->tpd.consume_idx != hw_consume_idx) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); if (tpbuf->dma_addr) { - dma_unmap_single(adpt->netdev->dev.parent, - tpbuf->dma_addr, tpbuf->length, - DMA_TO_DEVICE); + dma_unmap_page(adpt->netdev->dev.parent, + tpbuf->dma_addr, tpbuf->length, + DMA_TO_DEVICE); tpbuf->dma_addr = 0; } @@ -1363,9 +1363,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data, tpbuf->length, - DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data), + offset_in_page(skb->data), + tpbuf->length, + DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) @@ -1381,9 +1383,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, if (mapped_len < len) { tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); tpbuf->length = len - mapped_len; - tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, - skb->data + mapped_len, - tpbuf->length, DMA_TO_DEVICE); + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, + virt_to_page(skb->data + + mapped_len), + offset_in_page(skb->data + + mapped_len), + tpbuf->length, DMA_TO_DEVICE); ret = dma_mapping_error(adpt->netdev->dev.parent, tpbuf->dma_addr); if (ret) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index df6f4ab0cd92b872e91c7dc7bfba8c0a9d1d6c84..bbe23053cb33ff7d46d5a7a393b040bc495532e5 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -22,6 +22,8 @@ #include "rmnet_vnd.h" #include "rmnet_private.h" #include "rmnet_map.h" +#include +#include /* Locking scheme - * The shared resource which needs to be protected is realdev->rx_handler_data. @@ -44,9 +46,10 @@ /* Local Definitions and Declarations */ -static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = { +static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 2] = { [IFLA_RMNET_MUX_ID] = { .type = NLA_U16 }, [IFLA_RMNET_FLAGS] = { .len = sizeof(struct ifla_rmnet_flags) }, + [IFLA_VLAN_EGRESS_QOS] = { .len = sizeof(struct tcmsg) }, }; static int rmnet_is_real_dev_registered(const struct net_device *real_dev) @@ -286,12 +289,15 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], { u16 mux_id; - if (!data || !data[IFLA_RMNET_MUX_ID]) + if (!data) { return -EINVAL; - - mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) - return -ERANGE; + } else { + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) + return -ERANGE; + } + } return 0; } @@ -334,6 +340,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], port->data_format = flags->flags & flags->mask; } + if (data[IFLA_VLAN_EGRESS_QOS]) { + struct tcmsg *tcm; + + tcm = nla_data(data[IFLA_VLAN_EGRESS_QOS]); + qmi_rmnet_change_link(dev, port, tcm); + } + return 0; } @@ -343,7 +356,8 @@ static size_t rmnet_get_size(const struct net_device *dev) /* IFLA_RMNET_MUX_ID */ nla_total_size(2) + /* IFLA_RMNET_FLAGS */ - nla_total_size(sizeof(struct ifla_rmnet_flags)); + nla_total_size(sizeof(struct ifla_rmnet_flags)) + + nla_total_size(sizeof(struct tcmsg)); } static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) @@ -463,6 +477,65 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, return 0; } +#ifdef CONFIG_QCOM_QMI_DFC +void *rmnet_get_qmi_pt(void *port) +{ + if (port) + return ((struct rmnet_port *)port)->qmi_info; + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_qmi_pt); + +void *rmnet_get_qos_pt(struct net_device *dev) +{ + if (dev) + return ((struct rmnet_priv *)netdev_priv(dev))->qos_info; + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_qos_pt); + +void *rmnet_get_rmnet_port(struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (dev) { + priv = (struct rmnet_priv *)netdev_priv(dev); + return (void *)rmnet_get_port(priv->real_dev); + } + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_rmnet_port); + +struct net_device *rmnet_get_rmnet_dev(void *port, uint8_t mux_id) +{ + struct rmnet_endpoint *ep; + + ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id); + if (ep) + return ep->egress_dev; + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_rmnet_dev); + +void rmnet_reset_qmi_pt(void *port) +{ + if (port) + ((struct rmnet_port *)port)->qmi_info = NULL; +} +EXPORT_SYMBOL(rmnet_reset_qmi_pt); + +void rmnet_init_qmi_pt(void *port, void *qmi) +{ + if (port) + ((struct rmnet_port *)port)->qmi_info = qmi; +} +EXPORT_SYMBOL(rmnet_init_qmi_pt); +#endif + /* Startup/Shutdown */ static int __init rmnet_init(void) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index fd2c94e337d888e5db6b445100289039d8b607a8..2dff6f6885822027fda01b15dc432d0bd5f22c09 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -50,6 +50,8 @@ struct rmnet_port { struct timespec agg_time; struct timespec agg_last; struct hrtimer hrtimer; + + void *qmi_info; }; extern struct rtnl_link_ops rmnet_link_ops; @@ -85,6 +87,7 @@ struct rmnet_priv { struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; struct rmnet_priv_stats stats; + void *qos_info; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index cb02e1a015c1a207db2dda91518562844d343a1f..d644ce998cdde4a3bb9e96f9e0c787ac9969e7c0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -23,6 +23,8 @@ #include "rmnet_map.h" #include "rmnet_vnd.h" +#include + /* RX/TX Fixup */ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) @@ -61,6 +63,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, priv = netdev_priv(dev); if (priv->real_dev) { rmnet_egress_handler(skb); + qmi_rmnet_burst_fc_check(dev, skb); } else { this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); @@ -108,6 +111,9 @@ static void rmnet_vnd_uninit(struct net_device *dev) gro_cells_destroy(&priv->gro_cells); free_percpu(priv->pcpu_stats); + + qmi_rmnet_qos_exit(dev); + priv->qos_info = NULL; } static void rmnet_get_stats64(struct net_device *dev, @@ -254,6 +260,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, priv = netdev_priv(rmnet_dev); priv->mux_id = id; priv->real_dev = real_dev; + priv->qos_info = qmi_rmnet_qos_init(real_dev, id); netdev_dbg(rmnet_dev, "rmnet dev created\n"); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d24b47b8e0b27e0f44243f5a1011779c0ebd09f9..d118da5a10a2059df4255d46a2652714f5c44a93 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index db553d4e8d2298ca84b9f43e37350e665cee4b81..b98fcc9e93e5ace2df1ea69f06136f074cd33b77 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4886,6 +4886,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) static void rtl_pll_power_up(struct rtl8169_private *tp) { rtl_generic_op(tp, tp->pll_power_ops.up); + + /* give MAC/PHY some time to resume */ + msleep(20); } static void rtl_init_pll_power_ops(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index db31963c5d9dd940f1a46973e5a614ecf565b2c3..38080e95a82dccc6286a0cd40829be35ac9612ee 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -753,6 +753,7 @@ static struct sh_eth_cpu_data sh7757_data = { .rpadir = 1, .rpadir_value = 2 << 16, .rtrate = 1, + .dual_port = 1, }; #define SH_GIGA_ETH_BASE 0xfee00000UL @@ -831,6 +832,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = { .no_trimd = 1, .no_ade = 1, .tsu = 1, + .dual_port = 1, }; /* SH7734 */ @@ -901,6 +903,7 @@ static struct sh_eth_cpu_data sh7763_data = { .tsu = 1, .irq_flags = IRQF_SHARED, .magic = 1, + .dual_port = 1, }; static struct sh_eth_cpu_data sh7619_data = { @@ -933,6 +936,7 @@ static struct sh_eth_cpu_data sh771x_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, .tsu = 1, + .dual_port = 1, }; static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) @@ -2911,7 +2915,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, /* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { - if (sh_eth_is_rz_fast_ether(mdp)) { + if (!mdp->cd->dual_port) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); /* Enable POST registers */ diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711cd0dc331e132eaa7d0f20795f10c..6ab3d46d4f28611981257216bc026ad66fa6eaaa 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -509,6 +509,7 @@ struct sh_eth_cpu_data { unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ + unsigned dual_port:1; /* Dual EtherC/E-DMAC */ }; struct sh_eth_private { diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 012fb66eed8dd618d63fbeaad184accb0c08fc39..f0afb88d7bc2b02de3dc1054ec2ec5803f452a35 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) pdata = netdev_priv(dev); BUG_ON(!pdata); BUG_ON(!pdata->ioaddr); - WARN_ON(dev->phydev); SMSC_TRACE(pdata, ifdown, "Stopping driver"); + unregister_netdev(dev); + mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); - unregister_netdev(dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d0cc737950560fe644d901a1ea67e4bfd3f2482a..9866d2e34cdd8165fce6ad75c859566496c272d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1829,6 +1829,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) if (unlikely(status & tx_dma_own)) break; + /* Make sure descriptor fields are read after reading + * the own bit. + */ + dma_rmb(); + /* Just consider the last segment and ...*/ if (likely(!(status & tx_not_ls))) { /* ... verify the status error condition */ @@ -2368,7 +2373,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; - priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); + priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); } } @@ -2918,8 +2923,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); /* If context desc is used to change MSS */ - if (mss_desc) + if (mss_desc) { + /* Make sure that first descriptor has been completely + * written, including its own bit. This is because MSS is + * actually before first descriptor, so we need to make + * sure that MSS's own bit is the last thing written. + */ + dma_wmb(); priv->hw->desc->set_tx_owner(mss_desc); + } /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90e5028ac147f3134a8ec2348d0bb5..e92f41d20a2cfabc0fee27fcc1a878dfb6ec5693 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; - len -= ETH_FCS_LEN; + append_size = len + ETH_HLEN + ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; @@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; - append_size = rcr_size; if (num_rcr == 1) { int ptype; @@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) - append_size = len - skb->len; + append_size = append_size - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 0b95105f706007ae9e0ff4325ea9983f9d514ee4..65347d2f139b7145413682c1ec2543faac7d099c 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -311,7 +311,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->ethtool_ops = &vnet_ethtool_ops; dev->watchdog_timeo = VNET_TX_TIMEOUT; - dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 992c43b1868f97a627a475d91f2c3293286f9d1d..8cb44eabc28355b2e2da41c70953fe8b651e520f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1260,6 +1260,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, slave_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); } static void soft_reset_slave(struct cpsw_slave *slave) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4f3afcf92a7c0eac6993d24c039173d7abc0eada..01017dd88802bb8829f7d4c29baf80692676e77a 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -179,7 +179,7 @@ struct rndis_device { u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; - u16 ind_table[ITAB_NUM]; + u16 rx_table[ITAB_NUM]; }; @@ -192,7 +192,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct net_device_context *ndc, +int netvsc_send(struct net_device *net, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *page_buffer, @@ -208,7 +208,6 @@ void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); void rndis_set_subchannel(struct work_struct *w); -bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, @@ -659,6 +658,10 @@ struct nvsp_message { #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 +#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \ + NETIF_F_TSO | NETIF_F_IPV6_CSUM | \ + NETIF_F_TSO6) + #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_DEFAULT 8 @@ -734,7 +737,7 @@ struct net_device_context { u32 tx_checksum_mask; - u32 tx_send_table[VRSS_SEND_TAB_SIZE]; + u32 tx_table[VRSS_SEND_TAB_SIZE]; /* Ethtool settings */ bool udp4_l4_hash; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index a6bafcf55776075c1f7468ec9a05cee1b139a9b0..4647ecbe6f36df2f9410c3474fe18717ed089eb2 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -89,6 +89,11 @@ static void free_netvsc_device(struct rcu_head *head) = container_of(head, struct netvsc_device, rcu); int i; + kfree(nvdev->extension); + vfree(nvdev->recv_buf); + vfree(nvdev->send_buf); + kfree(nvdev->send_section_map); + for (i = 0; i < VRSS_CHANNEL_MAX; i++) vfree(nvdev->chan_table[i].mrc.slots); @@ -100,12 +105,11 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev) call_rcu(&nvdev->rcu, free_netvsc_device); } -static void netvsc_destroy_buf(struct hv_device *device) +static void netvsc_revoke_recv_buf(struct hv_device *device, + struct netvsc_device *net_device) { - struct nvsp_message *revoke_packet; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *ndc = netdev_priv(ndev); - struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev); + struct nvsp_message *revoke_packet; int ret; /* @@ -147,28 +151,14 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->recv_section_cnt = 0; } +} - /* Teardown the gpadl on the vsp end */ - if (net_device->recv_buf_gpadl_handle) { - ret = vmbus_teardown_gpadl(device->channel, - net_device->recv_buf_gpadl_handle); - - /* If we failed here, we might as well return and have a leak - * rather than continue and a bugchk - */ - if (ret != 0) { - netdev_err(ndev, - "unable to teardown receive buffer's gpadl\n"); - return; - } - net_device->recv_buf_gpadl_handle = 0; - } - - if (net_device->recv_buf) { - /* Free up the receive buffer */ - vfree(net_device->recv_buf); - net_device->recv_buf = NULL; - } +static void netvsc_revoke_send_buf(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + struct nvsp_message *revoke_packet; + int ret; /* Deal with the send buffer we may have setup. * If we got a send section size, it means we received a @@ -210,7 +200,36 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->send_section_cnt = 0; } - /* Teardown the gpadl on the vsp end */ +} + +static void netvsc_teardown_recv_gpadl(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + int ret; + + if (net_device->recv_buf_gpadl_handle) { + ret = vmbus_teardown_gpadl(device->channel, + net_device->recv_buf_gpadl_handle); + + /* If we failed here, we might as well return and have a leak + * rather than continue and a bugchk + */ + if (ret != 0) { + netdev_err(ndev, + "unable to teardown receive buffer's gpadl\n"); + return; + } + net_device->recv_buf_gpadl_handle = 0; + } +} + +static void netvsc_teardown_send_gpadl(struct hv_device *device, + struct netvsc_device *net_device) +{ + struct net_device *ndev = hv_get_drvdata(device); + int ret; + if (net_device->send_buf_gpadl_handle) { ret = vmbus_teardown_gpadl(device->channel, net_device->send_buf_gpadl_handle); @@ -225,12 +244,6 @@ static void netvsc_destroy_buf(struct hv_device *device) } net_device->send_buf_gpadl_handle = 0; } - if (net_device->send_buf) { - /* Free up the send buffer */ - vfree(net_device->send_buf); - net_device->send_buf = NULL; - } - kfree(net_device->send_section_map); } int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) @@ -425,7 +438,10 @@ static int netvsc_init_buf(struct hv_device *device, goto exit; cleanup: - netvsc_destroy_buf(device); + netvsc_revoke_recv_buf(device, net_device); + netvsc_revoke_send_buf(device, net_device); + netvsc_teardown_recv_gpadl(device, net_device); + netvsc_teardown_send_gpadl(device, net_device); exit: return ret; @@ -544,11 +560,6 @@ static int netvsc_connect_vsp(struct hv_device *device, return ret; } -static void netvsc_disconnect_vsp(struct hv_device *device) -{ - netvsc_destroy_buf(device); -} - /* * netvsc_device_remove - Callback when the root bus device is removed */ @@ -560,12 +571,24 @@ void netvsc_device_remove(struct hv_device *device) = rtnl_dereference(net_device_ctx->nvdev); int i; - cancel_work_sync(&net_device->subchan_work); + /* + * Revoke receive buffer. If host is pre-Win2016 then tear down + * receive buffer GPADL. Do the same for send buffer. + */ + netvsc_revoke_recv_buf(device, net_device); + if (vmbus_proto_version < VERSION_WIN10) + netvsc_teardown_recv_gpadl(device, net_device); - netvsc_disconnect_vsp(device); + netvsc_revoke_send_buf(device, net_device); + if (vmbus_proto_version < VERSION_WIN10) + netvsc_teardown_send_gpadl(device, net_device); RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + /* And disassociate NAPI context from device */ + for (i = 0; i < net_device->num_chn; i++) + netif_napi_del(&net_device->chan_table[i].napi); + /* * At this point, no one should be accessing net_device * except in here @@ -575,9 +598,14 @@ void netvsc_device_remove(struct hv_device *device) /* Now, we can close the channel safely */ vmbus_close(device->channel); - /* And dissassociate NAPI context from device */ - for (i = 0; i < net_device->num_chn; i++) - netif_napi_del(&net_device->chan_table[i].napi); + /* + * If host is Win2016 or higher then we do the GPADL tear down + * here after VMBus is closed. + */ + if (vmbus_proto_version >= VERSION_WIN10) { + netvsc_teardown_recv_gpadl(device, net_device); + netvsc_teardown_send_gpadl(device, net_device); + } /* Release all resources */ free_netvsc_device_rcu(net_device); @@ -643,13 +671,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, queue_sends = atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); - if (net_device->destroy && queue_sends == 0) - wake_up(&net_device->wait_drain); + if (unlikely(net_device->destroy)) { + if (queue_sends == 0) + wake_up(&net_device->wait_drain); + } else { + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || - queue_sends < 1)) - netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); + if (netif_tx_queue_stopped(txq) && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) { + netif_tx_wake_queue(txq); + } + } } static void netvsc_send_completion(struct netvsc_device *net_device, @@ -697,13 +730,13 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) return NETVSC_INVALID_INDEX; } -static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, - unsigned int section_index, - u32 pend_size, - struct hv_netvsc_packet *packet, - struct rndis_message *rndis_msg, - struct hv_page_buffer *pb, - struct sk_buff *skb) +static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + unsigned int section_index, + u32 pend_size, + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer *pb, + bool xmit_more) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) @@ -716,7 +749,8 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, packet->page_buf_cnt; /* Add padding */ - if (skb->xmit_more && remain && !packet->cp_partial) { + remain = packet->total_data_buflen & (net_device->pkt_align - 1); + if (xmit_more && remain) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -736,8 +770,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, memset(dest, 0, padding); msg_size += padding; } - - return msg_size; } static inline int netvsc_send_pkt( @@ -825,12 +857,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ -int netvsc_send(struct net_device_context *ndev_ctx, +int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, struct sk_buff *skb) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; @@ -841,20 +874,12 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct sk_buff *msd_skb = NULL; - bool try_batch; - bool xmit_more = (skb != NULL) ? skb->xmit_more : false; + bool try_batch, xmit_more; /* If device is rescinded, return error and packet will get dropped. */ if (unlikely(!net_device || net_device->destroy)) return -ENODEV; - /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get - * here before the negotiation with the host is finished and - * send_section_map may not be allocated yet. - */ - if (unlikely(!net_device->send_section_map)) - return -EAGAIN; - nvchan = &net_device->chan_table[packet->q_idx]; packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; @@ -862,10 +887,8 @@ int netvsc_send(struct net_device_context *ndev_ctx, /* Send control message directly without accessing msd (Multi-Send * Data) field which may be changed during data packet processing. */ - if (!skb) { - cur_send = packet; - goto send_now; - } + if (!skb) + return netvsc_send_pkt(device, packet, net_device, pb, skb); /* batch packets in send buffer if possible */ msdp = &nvchan->msd; @@ -893,10 +916,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } + /* Keep aggregating only if stack says more data is coming + * and not doing mixed modes send and not flow blocked + */ + xmit_more = skb->xmit_more && + !packet->cp_partial && + !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); + if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet, rndis_msg, pb, skb); + packet, rndis_msg, pb, xmit_more); packet->send_buf_index = section_index; @@ -916,7 +946,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, if (msdp->skb) dev_consume_skb_any(msdp->skb); - if (xmit_more && !packet->cp_partial) { + if (xmit_more) { msdp->skb = skb; msdp->pkt = packet; msdp->count++; @@ -942,7 +972,6 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } -send_now: if (cur_send) ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); @@ -1107,7 +1136,7 @@ static void netvsc_send_table(struct hv_device *hdev, nvmsg->msg.v5_msg.send_table.offset); for (i = 0; i < count; i++) - net_device_ctx->tx_send_table[i] = tab[i]; + net_device_ctx->tx_table[i] = tab[i]; } static void netvsc_send_vf(struct net_device_context *net_device_ctx, @@ -1206,9 +1235,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) if (send_recv_completions(ndev, net_device, nvchan) == 0 && work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound)) { + hv_end_read(&channel->inbound) && + napi_schedule_prep(napi)) { hv_begin_read(&channel->inbound); - napi_reschedule(napi); + __napi_schedule(napi); } /* Driver may overshoot since multiple packets per descriptor */ @@ -1231,7 +1261,7 @@ void netvsc_channel_cb(void *context) /* disable interupts from host */ hv_begin_read(rbi); - __napi_schedule(&nvchan->napi); + __napi_schedule_irqoff(&nvchan->napi); } } @@ -1252,6 +1282,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, if (!net_device) return ERR_PTR(-ENOMEM); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + net_device_ctx->tx_table[i] = 0; + net_device->ring_size = ring_size; /* Because the device uses NAPI, all the interrupt batching and @@ -1286,7 +1319,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, net_device->chan_table); if (ret != 0) { - netif_napi_del(&net_device->chan_table[0].napi); netdev_err(ndev, "unable to open channel: %d\n", ret); goto cleanup; } @@ -1296,11 +1328,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, napi_enable(&net_device->chan_table[0].napi); - /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is - * populated. - */ - rcu_assign_pointer(net_device_ctx->nvdev, net_device); - /* Connect with the NetVsp */ ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { @@ -1309,6 +1336,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, goto close; } + /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is + * populated. + */ + rcu_assign_pointer(net_device_ctx->nvdev, net_device); + return net_device; close: @@ -1319,6 +1351,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, vmbus_close(device->channel); cleanup: + netif_napi_del(&net_device->chan_table[0].napi); free_netvsc_device(&net_device->rcu); return ERR_PTR(ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c849de3cb0465da8cb993379c9898263f4481b37..3a7241c8713cf15530856c3509e42e82b9024741 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -45,7 +45,10 @@ #include "hyperv_net.h" -#define RING_SIZE_MIN 64 +#define RING_SIZE_MIN 64 +#define RETRY_US_LO 5000 +#define RETRY_US_HI 10000 +#define RETRY_MAX 2000 /* >10 sec */ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -63,12 +66,43 @@ static int debug = -1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -static void netvsc_set_multicast_list(struct net_device *net) +static void netvsc_change_rx_flags(struct net_device *net, int change) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + int inc; + + if (!vf_netdev) + return; + + if (change & IFF_PROMISC) { + inc = (net->flags & IFF_PROMISC) ? 1 : -1; + dev_set_promiscuity(vf_netdev, inc); + } + + if (change & IFF_ALLMULTI) { + inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; + dev_set_allmulti(vf_netdev, inc); + } +} - rndis_filter_update(nvdev); +static void netvsc_set_rx_mode(struct net_device *net) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev; + struct netvsc_device *nvdev; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) { + dev_uc_sync(vf_netdev, net); + dev_mc_sync(vf_netdev, net); + } + + nvdev = rcu_dereference(ndev_ctx->nvdev); + if (nvdev) + rndis_filter_update(nvdev); + rcu_read_unlock(); } static int netvsc_open(struct net_device *net) @@ -88,10 +122,7 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_wake_all_queues(net); - rdev = nvdev->extension; - if (!rdev->link_state) netif_carrier_on(net); @@ -109,36 +140,25 @@ static int netvsc_open(struct net_device *net) return 0; } -static int netvsc_close(struct net_device *net) +static int netvsc_wait_until_empty(struct netvsc_device *nvdev) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct net_device *vf_netdev - = rtnl_dereference(net_device_ctx->vf_netdev); - struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - int ret = 0; - u32 aread, i, msec = 10, retry = 0, retry_max = 20; - struct vmbus_channel *chn; - - netif_tx_disable(net); - - /* No need to close rndis filter if it is removed already */ - if (!nvdev) - goto out; - - ret = rndis_filter_close(nvdev); - if (ret != 0) { - netdev_err(net, "unable to close device (ret %d).\n", ret); - return ret; - } + unsigned int retry = 0; + int i; /* Ensure pending bytes in ring are read */ - while (true) { - aread = 0; + for (;;) { + u32 aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { - chn = nvdev->chan_table[i].channel; + struct vmbus_channel *chn + = nvdev->chan_table[i].channel; + if (!chn) continue; + /* make sure receive not running now */ + napi_synchronize(&nvdev->chan_table[i].napi); + aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break; @@ -148,22 +168,40 @@ static int netvsc_close(struct net_device *net) break; } - retry++; - if (retry > retry_max || aread == 0) - break; + if (aread == 0) + return 0; - msleep(msec); + if (++retry > RETRY_MAX) + return -ETIMEDOUT; - if (msec < 1000) - msec *= 2; + usleep_range(RETRY_US_LO, RETRY_US_HI); } +} - if (aread) { - netdev_err(net, "Ring buffer not empty after closing rndis\n"); - ret = -ETIMEDOUT; +static int netvsc_close(struct net_device *net) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); + int ret; + + netif_tx_disable(net); + + /* No need to close rndis filter if it is removed already */ + if (!nvdev) + return 0; + + ret = rndis_filter_close(nvdev); + if (ret != 0) { + netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; } -out: + ret = netvsc_wait_until_empty(nvdev); + if (ret) + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + if (vf_netdev) dev_close(vf_netdev); @@ -234,8 +272,8 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sock *sk = skb->sk; int q_idx; - q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) & - (VRSS_SEND_TAB_SIZE - 1)]; + q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & + (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ if (q_idx != old_idx && @@ -284,8 +322,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, rcu_read_lock(); vf_netdev = rcu_dereference(ndc->vf_netdev); if (vf_netdev) { - txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; + + if (vf_ops->ndo_select_queue) + txq = vf_ops->ndo_select_queue(vf_netdev, skb, + accel_priv, fallback); + else + txq = fallback(vf_netdev, skb); + + /* Record the queue selected by VF so that it can be + * used for common case where VF has more queues than + * the synthetic device. + */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; } else { txq = netvsc_pick_tx(ndev, skb); } @@ -614,7 +663,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); + ret = netvsc_send(net, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; @@ -810,16 +859,81 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_detach(struct net_device *ndev, + struct netvsc_device *nvdev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + int ret; + + /* Don't try continuing to try and setup sub channels */ + if (cancel_work_sync(&nvdev->subchan_work)) + nvdev->num_chn = 1; + + /* If device was up (receiving) then shutdown */ + if (netif_running(ndev)) { + netif_tx_disable(ndev); + + ret = rndis_filter_close(nvdev); + if (ret) { + netdev_err(ndev, + "unable to close device (ret %d).\n", ret); + return ret; + } + + ret = netvsc_wait_until_empty(nvdev); + if (ret) { + netdev_err(ndev, + "Ring buffer not empty after closing rndis\n"); + return ret; + } + } + + netif_device_detach(ndev); + + rndis_filter_device_remove(hdev, nvdev); + + return 0; +} + +static int netvsc_attach(struct net_device *ndev, + struct netvsc_device_info *dev_info) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct hv_device *hdev = ndev_ctx->device_ctx; + struct netvsc_device *nvdev; + struct rndis_device *rdev; + int ret; + + nvdev = rndis_filter_device_add(hdev, dev_info); + if (IS_ERR(nvdev)) + return PTR_ERR(nvdev); + + /* Note: enable and attach happen when sub-channels setup */ + + netif_carrier_off(ndev); + + if (netif_running(ndev)) { + ret = rndis_filter_open(nvdev); + if (ret) + return ret; + + rdev = nvdev->extension; + if (!rdev->link_state) + netif_carrier_on(ndev); + } + + return 0; +} + static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int orig, count = channels->combined_count; struct netvsc_device_info device_info; - bool was_opened; - int ret = 0; + int ret; /* We do not support separate count for rx, tx, or other */ if (count == 0 || @@ -836,9 +950,6 @@ static int netvsc_set_channels(struct net_device *net, return -EINVAL; orig = nvdev->num_chn; - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; @@ -848,28 +959,17 @@ static int netvsc_set_channels(struct net_device *net, device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(dev, nvdev); + ret = netvsc_detach(net, nvdev); + if (ret) + return ret; - nvdev = rndis_filter_device_add(dev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_attach(net, &device_info); + if (ret) { device_info.num_chn = orig; - nvdev = rndis_filter_device_add(dev, &device_info); - - if (IS_ERR(nvdev)) { - netdev_err(net, "restoring channel setting failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } + if (netvsc_attach(net, &device_info)) + netdev_err(net, "restoring channel setting failed\n"); } - if (was_opened) - rndis_filter_open(nvdev); - - /* We may have missed link change notifications */ - net_device_ctx->last_reconfig = 0; - schedule_delayed_work(&net_device_ctx->dwork, 0); - return ret; } @@ -936,10 +1036,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device_context *ndevctx = netdev_priv(ndev); struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -952,11 +1050,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; @@ -965,35 +1058,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.recv_sections = nvdev->recv_section_cnt; device_info.recv_section_size = nvdev->recv_section_size; - rndis_filter_device_remove(hdev, nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + goto rollback_vf; ndev->mtu = mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); - - /* Attempt rollback to original MTU */ - ndev->mtu = orig_mtu; - nvdev = rndis_filter_device_add(hdev, &device_info); - - if (vf_netdev) - dev_set_mtu(vf_netdev, orig_mtu); - - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring mtu failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } + ret = netvsc_attach(ndev, &device_info); + if (ret) + goto rollback; - if (was_opened) - rndis_filter_open(nvdev); + return 0; - netif_device_attach(ndev); +rollback: + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; - /* We may have missed link change notifications */ - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring mtu failed\n"); +rollback_vf: + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); return ret; } @@ -1378,7 +1463,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - indir[i] = rndis_dev->ind_table[i]; + indir[i] = rndis_dev->rx_table[i]; } if (key) @@ -1408,7 +1493,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return -EINVAL; for (i = 0; i < ITAB_NUM; i++) - rndis_dev->ind_table[i] = indir[i]; + rndis_dev->rx_table[i] = indir[i]; } if (!key) { @@ -1459,11 +1544,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, { struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); - struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; struct ethtool_ringparam orig; u32 new_tx, new_rx; - bool was_opened; int ret = 0; if (!nvdev || nvdev->destroy) @@ -1489,34 +1572,18 @@ static int netvsc_set_ringparam(struct net_device *ndev, device_info.recv_sections = new_rx; device_info.recv_section_size = nvdev->recv_section_size; - netif_device_detach(ndev); - was_opened = rndis_filter_opened(nvdev); - if (was_opened) - rndis_filter_close(nvdev); - - rndis_filter_device_remove(hdev, nvdev); - - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - ret = PTR_ERR(nvdev); + ret = netvsc_detach(ndev, nvdev); + if (ret) + return ret; + ret = netvsc_attach(ndev, &device_info); + if (ret) { device_info.send_sections = orig.tx_pending; device_info.recv_sections = orig.rx_pending; - nvdev = rndis_filter_device_add(hdev, &device_info); - if (IS_ERR(nvdev)) { - netdev_err(ndev, "restoring ringparam failed: %ld\n", - PTR_ERR(nvdev)); - return ret; - } - } - if (was_opened) - rndis_filter_open(nvdev); - netif_device_attach(ndev); - - /* We may have missed link change notifications */ - ndevctx->last_reconfig = 0; - schedule_delayed_work(&ndevctx->dwork, 0); + if (netvsc_attach(ndev, &device_info)) + netdev_err(ndev, "restoring ringparam failed"); + } return ret; } @@ -1546,7 +1613,8 @@ static const struct net_device_ops device_ops = { .ndo_open = netvsc_open, .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, - .ndo_set_rx_mode = netvsc_set_multicast_list, + .ndo_change_rx_flags = netvsc_change_rx_flags, + .ndo_set_rx_mode = netvsc_set_rx_mode, .ndo_change_mtu = netvsc_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = netvsc_set_mac_addr, @@ -1742,7 +1810,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto rx_handler_failed; } - ret = netdev_upper_dev_link(vf_netdev, ndev); + ret = netdev_master_upper_dev_link(vf_netdev, ndev, + NULL, NULL); if (ret != 0) { netdev_err(vf_netdev, "can not set master device %s (err = %d)\n", @@ -1777,6 +1846,15 @@ static void __netvsc_vf_setup(struct net_device *ndev, netdev_warn(vf_netdev, "unable to change mtu to %u\n", ndev->mtu); + /* set multicast etc flags on VF */ + dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + + /* sync address list from ndev to VF */ + netif_addr_lock_bh(ndev); + dev_uc_sync(vf_netdev, ndev); + dev_mc_sync(vf_netdev, ndev); + netif_addr_unlock_bh(ndev); + if (netif_running(ndev)) { ret = dev_open(vf_netdev); if (ret) @@ -1931,6 +2009,12 @@ static int netvsc_probe(struct hv_device *dev, /* We always need headroom for rndis header */ net->needed_headroom = RNDIS_AND_PPI_SIZE; + /* Initialize the number of queues to be 1, we may change it if more + * channels are offered later. + */ + netif_set_real_num_tx_queues(net, 1); + netif_set_real_num_rx_queues(net, 1); + /* Notify the netvsc driver of the new device */ memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; @@ -1949,7 +2033,7 @@ static int netvsc_probe(struct hv_device *dev, memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); - /* hw_features computed in rndis_filter_device_add */ + /* hw_features computed in rndis_netdev_set_hwcaps() */ net->features = net->hw_features | NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -1986,8 +2070,8 @@ static int netvsc_probe(struct hv_device *dev, static int netvsc_remove(struct hv_device *dev) { struct net_device_context *ndev_ctx; - struct net_device *vf_netdev; - struct net_device *net; + struct net_device *vf_netdev, *net; + struct netvsc_device *nvdev; net = hv_get_drvdata(dev); if (net == NULL) { @@ -1997,10 +2081,14 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); - netif_device_detach(net); - cancel_delayed_work_sync(&ndev_ctx->dwork); + rcu_read_lock(); + nvdev = rcu_dereference(ndev_ctx->nvdev); + + if (nvdev) + cancel_work_sync(&nvdev->subchan_work); + /* * Call to the vsc driver to let it know that the device is being * removed. Also blocks mtu and channel changes. @@ -2010,11 +2098,13 @@ static int netvsc_remove(struct hv_device *dev) if (vf_netdev) netvsc_unregister_vf(vf_netdev); + if (nvdev) + rndis_filter_device_remove(dev, nvdev); + unregister_netdevice(net); - rndis_filter_device_remove(dev, - rtnl_dereference(ndev_ctx->nvdev)); rtnl_unlock(); + rcu_read_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 065b204d8e17f6bde17be931ff8d1758f529e6cc..d1ae184008b41866d642561c21b5ead9983c449b 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -217,7 +217,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); int ret; /* Setup the packet to send it */ @@ -245,7 +244,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; @@ -267,13 +266,23 @@ static void rndis_set_link_state(struct rndis_device *rdev, } } -static void rndis_filter_receive_response(struct rndis_device *dev, - struct rndis_message *resp) +static void rndis_filter_receive_response(struct net_device *ndev, + struct netvsc_device *nvdev, + const struct rndis_message *resp) { + struct rndis_device *dev = nvdev->extension; struct rndis_request *request = NULL; bool found = false; unsigned long flags; - struct net_device *ndev = dev->ndev; + + /* This should never happen, it means control message + * response received after device removed. + */ + if (dev->state == RNDIS_DEV_UNINITIALIZED) { + netdev_err(ndev, + "got rndis message uninitialized\n"); + return; + } spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { @@ -354,7 +363,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) } static int rndis_filter_receive_data(struct net_device *ndev, - struct rndis_device *dev, + struct netvsc_device *nvdev, struct rndis_message *msg, struct vmbus_channel *channel, void *data, u32 data_buflen) @@ -374,7 +383,7 @@ static int rndis_filter_receive_data(struct net_device *ndev, * should be the data packet size plus the trailer padding size */ if (unlikely(data_buflen < rndis_pkt->data_len)) { - netdev_err(dev->ndev, "rndis message buffer " + netdev_err(ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", data_buflen, rndis_pkt->data_len); @@ -402,34 +411,20 @@ int rndis_filter_receive(struct net_device *ndev, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct rndis_device *rndis_dev = net_dev->extension; struct rndis_message *rndis_msg = data; - /* Make sure the rndis device state is initialized */ - if (unlikely(!rndis_dev)) { - netif_err(net_device_ctx, rx_err, ndev, - "got rndis message but no rndis device!\n"); - return NVSP_STAT_FAIL; - } - - if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { - netif_err(net_device_ctx, rx_err, ndev, - "got rndis message uninitialized\n"); - return NVSP_STAT_FAIL; - } - if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(dev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: - return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg, + return rndis_filter_receive_data(ndev, net_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: /* completion msgs */ - rndis_filter_receive_response(rndis_dev, rndis_msg); + rndis_filter_receive_response(ndev, net_dev, rndis_msg); break; case RNDIS_MSG_INDICATE: @@ -759,7 +754,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) - itab[i] = rdev->ind_table[i]; + itab[i] = rdev->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); @@ -855,15 +850,19 @@ static void rndis_set_multicast(struct work_struct *w) { struct rndis_device *rdev = container_of(w, struct rndis_device, mcast_work); + u32 filter = NDIS_PACKET_TYPE_DIRECTED; + unsigned int flags = rdev->ndev->flags; - if (rdev->ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); + if (flags & IFF_PROMISC) { + filter = NDIS_PACKET_TYPE_PROMISCUOUS; + } else { + if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI)) + filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; + if (flags & IFF_BROADCAST) + filter |= NDIS_PACKET_TYPE_BROADCAST; + } + + rndis_filter_set_packet_filter(rdev, filter); } void rndis_filter_update(struct netvsc_device *nvdev) @@ -1114,6 +1113,10 @@ void rndis_set_subchannel(struct work_struct *w) netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); + for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) + ndev_ctx->tx_table[i] = i % nvdev->num_chn; + + netif_device_attach(ndev); rtnl_unlock(); return; @@ -1124,73 +1127,26 @@ void rndis_set_subchannel(struct work_struct *w) nvdev->max_chn = 1; nvdev->num_chn = 1; + + netif_device_attach(ndev); unlock: rtnl_unlock(); } -struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *device_info) +static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, + struct netvsc_device *nvdev) { - struct net_device *net = hv_get_drvdata(dev); + struct net_device *net = rndis_device->ndev; struct net_device_context *net_device_ctx = netdev_priv(net); - struct netvsc_device *net_device; - struct rndis_device *rndis_device; struct ndis_offload hwcaps; struct ndis_offload_params offloads; - struct ndis_recv_scale_cap rsscap; - u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); unsigned int gso_max_size = GSO_MAX_SIZE; - u32 mtu, size; - const struct cpumask *node_cpu_mask; - u32 num_possible_rss_qs; - int i, ret; - - rndis_device = get_rndis_device(); - if (!rndis_device) - return ERR_PTR(-ENODEV); - - /* - * Let the inner driver handle this first to create the netvsc channel - * NOTE! Once the channel is created, we may get a receive callback - * (RndisFilterOnReceive()) before this call is completed - */ - net_device = netvsc_device_add(dev, device_info); - if (IS_ERR(net_device)) { - kfree(rndis_device); - return net_device; - } - - /* Initialize the rndis device */ - net_device->max_chn = 1; - net_device->num_chn = 1; - - net_device->extension = rndis_device; - rndis_device->ndev = net; - - /* Send the rndis initialization message */ - ret = rndis_filter_init_device(rndis_device, net_device); - if (ret != 0) - goto err_dev_remv; - - /* Get the MTU from the host */ - size = sizeof(u32); - ret = rndis_filter_query_device(rndis_device, net_device, - RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, - &mtu, &size); - if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) - net->mtu = mtu; - - /* Get the mac address */ - ret = rndis_filter_query_device_mac(rndis_device, net_device); - if (ret != 0) - goto err_dev_remv; - - memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + int ret; /* Find HW offload capabilities */ - ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps); + ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps); if (ret != 0) - goto err_dev_remv; + return ret; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1198,8 +1154,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, /* Linux does not care about IP checksum, always does in kernel */ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; + /* Reset previously set hw_features flags */ + net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES; + net_device_ctx->tx_checksum_mask = 0; + /* Compute tx offload settings based on hw capabilities */ - net->hw_features = NETIF_F_RXCSUM; + net->hw_features |= NETIF_F_RXCSUM; if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { /* Can checksum TCP */ @@ -1243,10 +1203,74 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, } } + /* In case some hw_features disappeared we need to remove them from + * net->features list as they're no longer supported. + */ + net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features; + netif_set_gso_max_size(net, gso_max_size); - ret = rndis_filter_set_offload_params(net, net_device, &offloads); - if (ret) + ret = rndis_filter_set_offload_params(net, nvdev, &offloads); + + return ret; +} + +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *device_info) +{ + struct net_device *net = hv_get_drvdata(dev); + struct netvsc_device *net_device; + struct rndis_device *rndis_device; + struct ndis_recv_scale_cap rsscap; + u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); + u32 mtu, size; + u32 num_possible_rss_qs; + int i, ret; + + rndis_device = get_rndis_device(); + if (!rndis_device) + return ERR_PTR(-ENODEV); + + /* Let the inner driver handle this first to create the netvsc channel + * NOTE! Once the channel is created, we may get a receive callback + * (RndisFilterOnReceive()) before this call is completed + */ + net_device = netvsc_device_add(dev, device_info); + if (IS_ERR(net_device)) { + kfree(rndis_device); + return net_device; + } + + /* Initialize the rndis device */ + net_device->max_chn = 1; + net_device->num_chn = 1; + + net_device->extension = rndis_device; + rndis_device->ndev = net; + + /* Send the rndis initialization message */ + ret = rndis_filter_init_device(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; + + /* Get the MTU from the host */ + size = sizeof(u32); + ret = rndis_filter_query_device(rndis_device, net_device, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + &mtu, &size); + if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) + net->mtu = mtu; + + /* Get the mac address */ + ret = rndis_filter_query_device_mac(rndis_device, net_device); + if (ret != 0) + goto err_dev_remv; + + memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + + /* Query and set hardware capabilities */ + ret = rndis_netdev_set_hwcaps(rndis_device, net_device); + if (ret != 0) goto err_dev_remv; rndis_filter_query_device_link_status(rndis_device, net_device); @@ -1256,7 +1280,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return net_device; + goto out; rndis_filter_query_link_speed(rndis_device, net_device); @@ -1268,14 +1292,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; - /* - * We will limit the VRSS channels to the number CPUs in the NUMA node - * the primary channel is currently bound to. - * - * This also guarantees that num_possible_rss_qs <= num_online_cpus - */ - node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); - num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask), + /* This guarantees that num_possible_rss_qs <= num_online_cpus */ + num_possible_rss_qs = min_t(u32, num_online_cpus(), rsscap.num_recv_que); net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); @@ -1284,8 +1302,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = min(net_device->max_chn, device_info->num_chn); for (i = 0; i < ITAB_NUM; i++) - rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, - net_device->num_chn); + rndis_device->rx_table[i] = ethtool_rxfh_indir_default( + i, net_device->num_chn); atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1313,6 +1331,10 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, net_device->num_chn = 1; } + /* No sub channels, device is ready */ + if (net_device->num_chn == 1) + netif_device_attach(net); + return net_device; err_dev_remv: @@ -1331,7 +1353,6 @@ void rndis_filter_device_remove(struct hv_device *dev, net_dev->extension = NULL; netvsc_device_remove(dev); - kfree(rndis_dev); } int rndis_filter_open(struct netvsc_device *nvdev) @@ -1355,8 +1376,3 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } - -bool rndis_filter_opened(const struct netvsc_device *nvdev) -{ - return atomic_read(&nvdev->open_cnt) > 0; -} diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 24a1eabbbc9da3bf1e34a966a1b14b5f737f5a35..22e466ea919a8f309fb67767513e693b4fa74769 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write( struct ca8210_priv *priv = filp->private_data; u8 command[CA8210_SPI_BUF_SIZE]; - if (len > CA8210_SPI_BUF_SIZE) { + memset(command, SPI_IDLE, 6); + if (len > CA8210_SPI_BUF_SIZE || len < 2) { dev_warn( &priv->spi->dev, - "userspace requested erroneously long write (%zu)\n", + "userspace requested erroneous write length (%zu)\n", len ); - return -EMSGSIZE; + return -EBADE; } ret = copy_from_user(command, in_buf, len); @@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write( ); return -EIO; } + if (len != command[1] + 2) { + dev_err( + &priv->spi->dev, + "write len does not match packet length field\n" + ); + return -EBADE; + } ret = ca8210_test_check_upstream(command, priv->spi); if (ret == 0) { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0f35597553f4fa8c5eed4aff1c988182064b0345..963a02c988e950a407388b1ee831dfc5ebc0b61f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1448,7 +1448,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(dev)) + if (create && macvlan_port_get_rtnl(lowerdev)) macvlan_port_destroy(port->dev); return err; } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cbd629822f043bf4324f8695ef0234f390f30742..26fbbd3ffe330d5a03b1ba9ad8c44c1ae5d569e6 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev) kfree(dp83640); } +static int dp83640_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + + /* From DP83640 datasheet: "Software driver code must wait 3 us + * following a software reset before allowing further serial MII + * operations with the DP83640." + */ + udelay(10); /* Taking udelay inaccuracy into account */ + + return 0; +} + static int dp83640_config_init(struct phy_device *phydev) { struct dp83640_private *dp83640 = phydev->priv; @@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = { .flags = PHY_HAS_INTERRUPT, .probe = dp83640_probe, .remove = dp83640_remove, + .soft_reset = dp83640_soft_reset, .config_init = dp83640_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 1fb464837b3e7cfce5e2c81157aead81a4c4195c..9881edc568ba7803a967ae2608b93d36e27da979 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2083,10 +2083,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) dev->fc_autoneg = phydev->autoneg; - phy_start(phydev); - - netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); - return 0; error: @@ -2352,6 +2348,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) u32 buf; int ret = 0; unsigned long timeout; + u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); buf |= HW_CFG_LRST_; @@ -2451,6 +2448,15 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; + + if (dev->chipid == ID_REV_CHIP_ID_7800_) { + ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); + if (!ret && sig != EEPROM_INDICATOR) { + /* Implies there is no external eeprom. Set mac speed */ + netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + } + } ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_read_reg(dev, MAC_TX, &buf); @@ -2513,9 +2519,9 @@ static int lan78xx_open(struct net_device *net) if (ret < 0) goto done; - ret = lan78xx_phy_init(dev); - if (ret < 0) - goto done; + phy_start(net->phydev); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); /* for Link Check */ if (dev->urb_intr) { @@ -2576,13 +2582,8 @@ static int lan78xx_stop(struct net_device *net) if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - - phy_stop(net->phydev); - phy_disconnect(net->phydev); - - net->phydev = NULL; + if (net->phydev) + phy_stop(net->phydev); clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); @@ -3497,8 +3498,13 @@ static void lan78xx_disconnect(struct usb_interface *intf) return; udev = interface_to_usbdev(intf); - net = dev->net; + + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); + phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); + + phy_disconnect(net->phydev); + unregister_netdev(net); cancel_delayed_work_sync(&dev->wq); @@ -3658,8 +3664,14 @@ static int lan78xx_probe(struct usb_interface *intf, pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); + ret = lan78xx_phy_init(dev); + if (ret < 0) + goto out4; + return 0; +out4: + unregister_netdev(netdev); out3: lan78xx_unbind(dev, intf); out2: @@ -4007,7 +4019,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf) lan78xx_reset(dev); - lan78xx_phy_init(dev); + phy_start(dev->net->phydev); return lan78xx_resume(intf); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 1aad0568dcc6656f553646ec915b5f908cac58d2..8e06f308ce44d1f0d773d7a5daa877cbd200c087 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1105,6 +1105,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ + {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ @@ -1181,6 +1184,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ @@ -1241,6 +1245,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ @@ -1338,6 +1343,18 @@ static int qmi_wwan_probe(struct usb_interface *intf, id->driver_info = (unsigned long)&qmi_wwan_info; } + /* There are devices where the same interface number can be + * configured as different functions. We should only bind to + * vendor specific functions when matching on interface number + */ + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { + dev_dbg(&intf->dev, + "Rejecting interface number match for class %02x\n", + desc->bInterfaceClass); + return -ENODEV; + } + /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */ if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) { dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n"); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d51d9abf7986b203350167d6a2fcdcfcddf8b972..aa88b640cb6c22c7e897f1f309dbc31d37c13769 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1793,7 +1793,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) tx_data += len; agg->skb_len += len; - agg->skb_num++; + agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195acae86931c51eea50b94ddadd487..7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); - if (ret < 0) + if (ret < 0) { netdev_warn(dev->net, "Error writing RFE_CTL\n"); - - return ret; + return ret; + } + return 0; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 42baad125a7d0bc90da6a10a1daaf90810b9f2fc..32fc69539126b8756a722cc2b997426e78346fff 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { @@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) if (skb->protocol == 0) skb->protocol = eth_type_trans (skb, dev->net); - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->rx_packets++; stats64->rx_bytes += skb->len; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof (struct ethhdr), skb->protocol); @@ -1250,11 +1251,12 @@ static void tx_complete (struct urb *urb) if (urb->status == 0) { struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + unsigned long flags; - u64_stats_update_begin(&stats64->syncp); + flags = u64_stats_update_begin_irqsave(&stats64->syncp); stats64->tx_packets += entry->packets; stats64->tx_bytes += entry->length; - u64_stats_update_end(&stats64->syncp); + u64_stats_update_end_irqrestore(&stats64->syncp, flags); } else { dev->net->stats.tx_errors++; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bb15b3012aa5fe90e05a4214b622671446ef5fb0..948611317c9767cf14e02d35a7b775a835cdc2f5 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -513,7 +513,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *orig_data; u32 act; - if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) + if (unlikely(hdr->hdr.gso_type)) goto err_xdp; if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { @@ -2655,8 +2655,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* Assume link up if device can't report link status, otherwise get link status from config. */ + netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - netif_carrier_off(dev); schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index cf95290b160c565ed650846cd91076ec2a957ff9..3628fd7e606fd001ac51b2b2762423ad23d7f725 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { + /* Prevent any &gdesc->tcd field from being (speculatively) + * read before (&gdesc->tcd)->gen is read. + */ + dma_rmb(); + completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( &gdesc->tcd), tq, adapter->pdev, adapter); @@ -1099,6 +1104,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.tci = skb_vlan_tag_get(skb); } + /* Ensure that the write to (&gdesc->txd)->gen will be observed after + * all other writes to &gdesc->txd. + */ + dma_wmb(); + /* finally flips the GEN bit of the SOP desc. */ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ VMXNET3_TXD_GEN); @@ -1286,6 +1296,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, */ break; } + + /* Prevent any rcd field from being (speculatively) read before + * rcd->gen is read. + */ + dma_rmb(); + BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && rcd->rqID != rq->dataRingQid); idx = rcd->rxdIdx; @@ -1515,6 +1531,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, ring->next2comp = idx; num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); ring = rq->rx_ring + ring_idx; + + /* Ensure that the writes to rxd->gen bits will be observed + * after all other writes to rxd objects. + */ + dma_wmb(); + while (num_to_alloc) { vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, &rxCmdDesc); @@ -2675,7 +2697,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p) /* ==================== initialization and cleanup routines ============ */ static int -vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter) { int err; unsigned long mmio_start, mmio_len; @@ -2687,30 +2709,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) return err; } - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = true; - } else { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - dev_err(&pdev->dev, - "pci_set_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = false; - } - err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { dev_err(&pdev->dev, "Failed to request region for adapter: error %d\n", err); - goto err_set_mask; + goto err_enable_device; } pci_set_master(pdev); @@ -2738,7 +2742,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) iounmap(adapter->hw_addr0); err_ioremap: pci_release_selected_regions(pdev, (1 << 2) - 1); -err_set_mask: +err_enable_device: pci_disable_device(pdev); return err; } @@ -3243,7 +3247,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, #endif }; int err; - bool dma64 = false; /* stupid gcc */ + bool dma64; u32 ver; struct net_device *netdev; struct vmxnet3_adapter *adapter; @@ -3289,6 +3293,24 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = true; + } else { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = false; + } + spin_lock_init(&adapter->cmd_lock); adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), @@ -3296,7 +3318,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { dev_err(&pdev->dev, "Failed to map dma\n"); err = -EFAULT; - goto err_dma_map; + goto err_set_mask; } adapter->shared = dma_alloc_coherent( &adapter->pdev->dev, @@ -3347,7 +3369,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, } #endif /* VMXNET3_RSS */ - err = vmxnet3_alloc_pci_resources(adapter, &dma64); + err = vmxnet3_alloc_pci_resources(adapter); if (err < 0) goto err_alloc_pci; @@ -3493,7 +3515,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); -err_dma_map: +err_set_mask: free_netdev(netdev); return err; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index c1772215702a8c31ec60afdf8cda3690a54715c5..df11bb44998888c52632c72b856f06444be7a85b 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -7059,10 +7059,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k_vif *arvif = (void *)vif->drv_priv; + struct ath10k_peer *peer; u32 bw, smps; spin_lock_bh(&ar->data_lock); + peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", + sta->addr, arvif->vdev_id); + return; + } + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->bandwidth, sta->rx_nss, @@ -7810,6 +7820,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { .max_interfaces = 8, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | @@ -7933,6 +7944,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { .max_interfaces = 16, .num_different_channels = 1, .beacon_int_infra_match = true, + .beacon_int_min_gcd = 1, #ifdef CONFIG_ATH10K_DFS_CERTIFIED .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 5e77fe1f5b0db41eb6e6226c454f8b3d7d201894..a41bcbda1d9e8eeb27cf57dfcc88f3da852eaaf4 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) { int i = 0; int ret = 0; + struct rchan_buf *buf; struct rchan *rc = spec_priv->rfs_chan_spec_scan; - for_each_online_cpu(i) - ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); - - i = num_online_cpus(); + for_each_possible_cpu(i) { + if ((buf = *per_cpu_ptr(rc->buf, i))) { + ret += relay_buf_full(buf); + } + } - if (ret == i) + if (ret) return 1; else return 0; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index c3b373268502336a7d62ac6b9319eed678a8ec0e..eb0895a55b74bdb6a860c47f585844618974a772 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -25,6 +25,7 @@ #include "fw.h" #define WIL_MAX_ROC_DURATION_MS 5000 +#define CTRY_CHINA "CN" bool disable_ap_sme; module_param(disable_ap_sme, bool, 0444); @@ -36,6 +37,10 @@ static struct wiphy_wowlan_support wil_wowlan_support = { }; #endif +static bool country_specific_board_file; +module_param(country_specific_board_file, bool, 0444); +MODULE_PARM_DESC(country_specific_board_file, " switch board file upon regulatory domain change (Default: false)"); + static bool ignore_reg_hints = true; module_param(ignore_reg_hints, bool, 0444); MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)"); @@ -2107,6 +2112,65 @@ wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int wil_switch_board_file(struct wil6210_priv *wil, + const u8 *new_regdomain) +{ + int rc = 0; + + if (!country_specific_board_file) + return 0; + + if (memcmp(wil->regdomain, CTRY_CHINA, 2) == 0) { + wil_info(wil, "moving out of China reg domain, use default board file\n"); + wil->board_file_country[0] = '\0'; + } else if (memcmp(new_regdomain, CTRY_CHINA, 2) == 0) { + wil_info(wil, "moving into China reg domain, use country specific board file\n"); + strlcpy(wil->board_file_country, CTRY_CHINA, + sizeof(wil->board_file_country)); + } else { + return 0; + } + + /* need to switch board file - reset the device */ + + mutex_lock(&wil->mutex); + + if (!wil_has_active_ifaces(wil, true, false) || + wil_is_recovery_blocked(wil)) + /* new board file will be used in next FW load */ + goto out; + + __wil_down(wil); + rc = __wil_up(wil); + +out: + mutex_unlock(&wil->mutex); + return rc; +} + +static void wil_cfg80211_reg_notify(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + + wil_info(wil, "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n", + request->alpha2[0], request->alpha2[1], + request->intersect ? " intersect" : "", + request->processed ? " processed" : "", + request->initiator, request->user_reg_hint_type); + + if (memcmp(wil->regdomain, request->alpha2, 2) == 0) + /* reg domain did not change */ + return; + + rc = wil_switch_board_file(wil, request->alpha2); + if (rc) + wil_err(wil, "switch board file failed %d\n", rc); + + memcpy(wil->regdomain, request->alpha2, 2); +} + static const struct cfg80211_ops wil_cfg80211_ops = { .add_virtual_intf = wil_cfg80211_add_iface, .del_virtual_intf = wil_cfg80211_del_iface, @@ -2178,6 +2242,8 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->mgmt_stypes = wil_mgmt_stypes; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; + wiphy->reg_notifier = wil_cfg80211_reg_notify; + wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands); wiphy->vendor_commands = wil_nl80211_vendor_commands; wiphy->vendor_events = wil_nl80211_vendor_events; diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 540fc20984d8fe8c65ef56b074f47030da518084..3e2bbbceca06e9dcbf0f83dd64f79596c8a2dd43 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +23,8 @@ MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT); MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS); MODULE_FIRMWARE(WIL_BOARD_FILE_NAME); +MODULE_FIRMWARE(WIL_FW_NAME_TALYN); +MODULE_FIRMWARE(WIL_BRD_NAME_TALYN); static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index d7e112da6a8d3ba520aa9ed232053cd9481f16a8..5d287a8e1b458a8aca674275a55c007df944e4c3 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -625,6 +625,15 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) wil6210_unmask_irq_misc(wil, false); + /* in non-triple MSI case, this is done inside wil6210_thread_irq + * because it has to be done after unmasking the pseudo. + */ + if (wil->n_msi == 3 && wil->suspend_resp_rcvd) { + wil_dbg_irq(wil, "set suspend_resp_comp to true\n"); + wil->suspend_resp_comp = true; + wake_up_interruptible(&wil->wq); + } + return IRQ_HANDLED; } @@ -782,6 +791,40 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) return rc; } +static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) +{ + int rc; + + /* IRQ's are in the following order: + * - Tx + * - Rx + * - Misc + */ + rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED, + WIL_NAME "_tx", wil); + if (rc) + return rc; + + rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED, + WIL_NAME "_rx", wil); + if (rc) + goto free0; + + rc = request_threaded_irq(irq + 2, wil6210_irq_misc, + wil6210_irq_misc_thread, + IRQF_SHARED, WIL_NAME "_misc", wil); + if (rc) + goto free1; + + return 0; +free1: + free_irq(irq + 1, wil); +free0: + free_irq(irq, wil); + + return rc; +} + /* can't use wil_ioread32_and_clear because ICC value is not set yet */ static inline void wil_clear32(void __iomem *addr) { @@ -822,11 +865,12 @@ void wil6210_clear_halp(struct wil6210_priv *wil) wil6210_unmask_halp(wil); } -int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) +int wil6210_init_irq(struct wil6210_priv *wil, int irq) { int rc; - wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); + wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n", + wil->n_msi ? "MSI" : "INTx", wil->n_msi); if (wil->use_enhanced_dma_hw) { wil->txrx_ops.irq_tx = wil6210_irq_tx_edma; @@ -835,10 +879,14 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) wil->txrx_ops.irq_tx = wil6210_irq_tx; wil->txrx_ops.irq_rx = wil6210_irq_rx; } - rc = request_threaded_irq(irq, wil6210_hardirq, - wil6210_thread_irq, - use_msi ? 0 : IRQF_SHARED, - WIL_NAME, wil); + + if (wil->n_msi == 3) + rc = wil6210_request_3msi(wil, irq); + else + rc = request_threaded_irq(irq, wil6210_hardirq, + wil6210_thread_irq, + wil->n_msi ? 0 : IRQF_SHARED, + WIL_NAME, wil); return rc; } @@ -848,4 +896,8 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq) wil_mask_irq(wil); free_irq(irq, wil); + if (wil->n_msi == 3) { + free_irq(irq + 1, wil); + free_irq(irq + 2, wil); + } } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 5d90e99aeaed31676a401979c6f5684cd89c0182..980b84e6390026ac931bcebc2250c90adddda070 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -28,6 +28,7 @@ #define WAIT_FOR_HALP_VOTE_MS 100 #define WAIT_FOR_SCAN_ABORT_MS 1000 #define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1 +#define WIL_BOARD_FILE_MAX_NAMELEN 128 bool debug_fw; /* = false; */ module_param(debug_fw, bool, 0444); @@ -698,6 +699,7 @@ void wil_priv_deinit(struct wil6210_priv *wil) wmi_event_flush(wil); destroy_workqueue(wil->wq_service); destroy_workqueue(wil->wmi_wq); + kfree(wil->board_file); } static void wil_shutdown_bl(struct wil6210_priv *wil) @@ -1119,6 +1121,9 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil) wil->platform_capa)) ? BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0; + if (wil->n_msi == 3) + features |= BIT(WIL_PLATFORM_FEATURE_TRIPLE_MSI); + wil->platform_ops.set_features(wil->platform_handle, features); } } @@ -1132,6 +1137,44 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) le32_to_cpus(&r->head); } +/* construct actual board file name to use */ +void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len) +{ + const char *board_file; + const char *ext; + int prefix_len; + const char *wil_talyn_fw_name = ftm_mode ? WIL_FW_NAME_FTM_TALYN : + WIL_FW_NAME_TALYN; + + if (wil->board_file) { + board_file = wil->board_file; + } else { + /* If specific FW file is used for Talyn, + * use specific board file + */ + if (strcmp(wil->wil_fw_name, wil_talyn_fw_name) == 0) + board_file = WIL_BRD_NAME_TALYN; + else + board_file = WIL_BOARD_FILE_NAME; + } + + if (wil->board_file_country[0] == '\0') { + strlcpy(buf, board_file, len); + return; + } + + /* use country specific board file */ + if (len < strlen(board_file) + 4 /* for _XX and terminating null */) + return; + + ext = strrchr(board_file, '.'); + prefix_len = (ext ? ext - board_file : strlen(board_file)); + snprintf(buf, len, "%.*s_%.2s", + prefix_len, board_file, wil->board_file_country); + if (ext) + strlcat(buf, ext, len); +} + static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil->main_ndev; @@ -1512,8 +1555,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_set_oob_mode(wil, oob_mode); if (load_fw) { + char board_file[WIL_BOARD_FILE_MAX_NAMELEN]; + + board_file[0] = '\0'; + wil_get_board_file(wil, board_file, sizeof(board_file)); wil_info(wil, "Use firmware <%s> + board <%s>\n", - wil->wil_fw_name, WIL_BOARD_FILE_NAME); + wil->wil_fw_name, board_file); if (wil->secured_boot) { wil_err(wil, "secured boot is not supported\n"); @@ -1530,11 +1577,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) goto out; if (wil->brd_file_addr) - rc = wil_request_board(wil, WIL_BOARD_FILE_NAME); + rc = wil_request_board(wil, board_file); else - rc = wil_request_firmware(wil, - WIL_BOARD_FILE_NAME, - true); + rc = wil_request_firmware(wil, board_file, true); if (rc) goto out; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index f534e5e82d06a64e175c39f72862b368f9715cfd..4dbc78e8e1c591343b2c309671ef864928330dc6 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -24,11 +24,11 @@ #include #include -static bool use_msi = true; -module_param(use_msi, bool, 0444); -MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +static int n_msi = 3; +module_param(n_msi, int, 0444); +MODULE_PARM_DESC(n_msi, " Use MSI interrupt: 0 - use INTx, 1 - single, or 3 - (default) "); -static bool ftm_mode; +bool ftm_mode; module_param(ftm_mode, bool, 0444); MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false"); @@ -157,12 +157,24 @@ int wil_set_capabilities(struct wil6210_priv *wil) void wil_disable_irq(struct wil6210_priv *wil) { - disable_irq(wil->pdev->irq); + int irq = wil->pdev->irq; + + disable_irq(irq); + if (wil->n_msi == 3) { + disable_irq(irq + 1); + disable_irq(irq + 2); + } } void wil_enable_irq(struct wil6210_priv *wil) { - enable_irq(wil->pdev->irq); + int irq = wil->pdev->irq; + + enable_irq(irq); + if (wil->n_msi == 3) { + enable_irq(irq + 1); + enable_irq(irq + 2); + } } static void wil_remove_all_additional_vifs(struct wil6210_priv *wil) @@ -189,28 +201,47 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) * and only MSI should be used */ int msi_only = pdev->msi_enabled; - bool _use_msi = use_msi; wil_dbg_misc(wil, "if_pcie_enable\n"); pci_set_master(pdev); - wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx"); + /* how many MSI interrupts to request? */ + switch (n_msi) { + case 3: + case 1: + wil_dbg_misc(wil, "Setup %d MSI interrupts\n", n_msi); + break; + case 0: + wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); + break; + default: + wil_err(wil, "Invalid n_msi=%d, default to 1\n", n_msi); + n_msi = 1; + } + + if (n_msi == 3 && + pci_alloc_irq_vectors(pdev, n_msi, n_msi, PCI_IRQ_MSI) < n_msi) { + wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); + n_msi = 1; + } - if (use_msi && pci_enable_msi(pdev)) { + if (n_msi == 1 && pci_enable_msi(pdev)) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); - _use_msi = false; + n_msi = 0; } - if (!_use_msi && msi_only) { + wil->n_msi = n_msi; + + if (wil->n_msi == 0 && msi_only) { wil_err(wil, "Interrupt pin not routed, unable to use INTx\n"); rc = -ENODEV; goto stop_master; } - rc = wil6210_init_irq(wil, pdev->irq, _use_msi); + rc = wil6210_init_irq(wil, pdev->irq); if (rc) - goto stop_master; + goto release_vectors; /* need reset here to obtain MAC */ mutex_lock(&wil->mutex); @@ -223,8 +254,9 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) release_irq: wil6210_fini_irq(wil, pdev->irq); - /* safe to call if no MSI */ - pci_disable_msi(pdev); + release_vectors: + /* safe to call if no allocation */ + pci_free_irq_vectors(pdev); stop_master: pci_clear_master(pdev); return rc; diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c index 21d26fd628933767f9d5e022e291254c98c5884c..6431e4caced52f8fd3a52ec95df5bd4d20436f19 100644 --- a/drivers/net/wireless/ath/wil6210/sysfs.c +++ b/drivers/net/wireless/ath/wil6210/sysfs.c @@ -96,6 +96,52 @@ static DEVICE_ATTR(ftm_txrx_offset, 0644, wil_ftm_txrx_offset_sysfs_show, wil_ftm_txrx_offset_sysfs_store); +static ssize_t +wil_board_file_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + + wil_get_board_file(wil, buf, PAGE_SIZE); + strlcat(buf, "\n", PAGE_SIZE); + return strlen(buf); +} + +static ssize_t +wil_board_file_sysfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + size_t len; + + mutex_lock(&wil->mutex); + + kfree(wil->board_file); + wil->board_file = NULL; + + len = count; + if (buf[count - 1] == '\n') + len--; + len = strnlen(buf, len); + if (len > 0) { + wil->board_file = kmalloc(len + 1, GFP_KERNEL); + if (!wil->board_file) { + mutex_unlock(&wil->mutex); + return -ENOMEM; + } + strlcpy(wil->board_file, buf, len + 1); + } + mutex_unlock(&wil->mutex); + + return count; +} + +static DEVICE_ATTR(board_file, 0644, + wil_board_file_sysfs_show, + wil_board_file_sysfs_store); + static ssize_t wil_tt_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -310,6 +356,7 @@ static DEVICE_ATTR(snr_thresh, 0644, static struct attribute *wil6210_sysfs_entries[] = { &dev_attr_ftm_txrx_offset.attr, + &dev_attr_board_file.attr, &dev_attr_thermal_throttling.attr, &dev_attr_fst_link_loss.attr, &dev_attr_snr_thresh.attr, diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index b9014d20b933f4c16c97a4b8b37301d152b0b58f..2fe3c9aaba04c349ec3d5d9e8404a7bac62d7055 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -40,6 +40,7 @@ extern bool debug_fw; extern bool disable_ap_sme; extern bool use_rx_hw_reordering; extern bool use_compressed_rx_status; +extern bool ftm_mode; struct wil6210_priv; struct wil6210_vif; @@ -55,6 +56,7 @@ union wil_tx_desc; #define WIL_FW_NAME_TALYN "wil6436.fw" #define WIL_FW_NAME_FTM_TALYN "wil6436_ftm.fw" +#define WIL_BRD_NAME_TALYN "wil6436.brd" #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ @@ -868,6 +870,7 @@ struct wil6210_priv { u32 bar_size; struct wiphy *wiphy; struct net_device *main_ndev; + int n_msi; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); u8 fw_version[ETHTOOL_FWVERS_LEN]; @@ -876,6 +879,7 @@ struct wil6210_priv { const char *hw_name; const char *wil_fw_name; char *board_file; + char board_file_country[3]; /* alpha2 */ u32 brd_file_addr; u32 brd_file_max_size; DECLARE_BITMAP(hw_capa, hw_capa_last); @@ -980,6 +984,9 @@ struct wil6210_priv { short direct; } snr_thresh; + /* current reg domain configured in kernel */ + char regdomain[3]; /* alpha2 */ + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; @@ -1080,6 +1087,8 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) wil_w(wil, reg, wil_r(wil, reg) & ~val); } +void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len); + #if defined(CONFIG_DYNAMIC_DEBUG) #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ @@ -1204,7 +1213,7 @@ int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); void wil6210_clear_irq(struct wil6210_priv *wil); -int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); +int wil6210_init_irq(struct wil6210_priv *wil, int irq); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); void wil_mask_irq(struct wil6210_priv *wil); void wil_unmask_irq(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 4157c90ad9736b9b20ab086585eca22ff80a60c9..0d635556f6c5c00188d0999414dcdda12d6b4c41 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2841,7 +2841,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg); - struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; struct ieee80211_supported_band *band; struct brcmu_chan ch; @@ -2851,7 +2850,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, u16 notify_interval; u8 *notify_ie; size_t notify_ielen; - s32 notify_signal; + struct cfg80211_inform_bss bss_data = {}; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { brcmf_err("Bss info is larger than buffer. Discarding\n"); @@ -2871,27 +2870,28 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, band = wiphy->bands[NL80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); - notify_channel = ieee80211_get_channel(wiphy, freq); + bss_data.chan = ieee80211_get_channel(wiphy, freq); + bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; + bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); - notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; + bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100; brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID); brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq); brcmf_dbg(CONN, "Capability: %X\n", notify_capability); brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); - brcmf_dbg(CONN, "Signal: %d\n", notify_signal); + brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, - CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *)bi->BSSID, - 0, notify_capability, - notify_interval, notify_ie, - notify_ielen, notify_signal, - GFP_KERNEL); + bss = cfg80211_inform_bss_data(wiphy, &bss_data, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, GFP_KERNEL); if (!bss) return -ENOMEM; @@ -6916,7 +6916,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, return; /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) + for (i = 0; i < 2; i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n", req->alpha2[0], req->alpha2[1]); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 3721a3ed358b830fb94925c01273aba1a4e633cf..f824bebceb06081e915a07d746420b602f024fd5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -211,7 +211,7 @@ enum { * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_START_IMMEDIATELY: start time event immediately * @TE_V2_DEP_OTHER: depends on another time event * @TE_V2_DEP_TSF: depends on a specific time * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC @@ -230,7 +230,7 @@ enum iwl_time_event_policy { TE_V2_NOTIF_HOST_FRAG_END = BIT(5), TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), + TE_V2_START_IMMEDIATELY = BIT(11), /* placement characteristics */ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index f5dd7d83cd0a8eb8da5fc150ebb3d035c2c03a97..2fa7ec466275d40b4f96955d6283d7cce936cc6a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -928,7 +930,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) out: iwl_fw_free_dump_desc(fwrt); - fwrt->dump.trig = NULL; clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); } IWL_EXPORT_SYMBOL(iwl_fw_error_dump); @@ -1084,6 +1085,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work) fwrt->ops->dump_start(fwrt->ops_ctx)) return; + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) { + IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); + iwl_fw_free_dump_desc(fwrt); + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); + goto out; + } + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* stop recording */ iwl_fw_dbg_stop_recording(fwrt); @@ -1117,7 +1126,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); } } - +out: if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 223fb77a3aa9d64456244dd4c5156b8885cec6fd..72259bff9922f7308a3d78250247ccfd29d84a8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; + fwrt->dump.trig = NULL; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 50cfb6d795a52e249e7bb5364acbb41772e1b913..fb1ad3c5c93cbee453e6dec83b7917dc4cf03316 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@ struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); + bool (*fw_running)(void *ctx); }; #define MAX_NUM_LMAC 2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index e97904c2c4d4d406432a56d2b401cb92c312902c..714996187236ef14da2a2e6e03b5dcfcc5c6ea56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1209,9 +1211,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, { int ret; - if (!iwl_mvm_firmware_running(mvm)) - return -EIO; - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 2f22e14e00fe881bc9868a22c25ba41286a9ea51..8ba16fc24e3af0bd6bc07b6de7195e375d3f0cb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } /* Allocate the CAB queue for softAP and GO interfaces */ - if (vif->type == NL80211_IFTYPE_AP) { + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a9ac872226fdf79d87d6bc8b9643d9d6d86cd793..db1fab9aa1c656293dae7b2068564f50a159df01 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -2127,15 +2128,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (ret) goto out_remove; - ret = iwl_mvm_add_mcast_sta(mvm, vif); - if (ret) - goto out_unbind; - - /* Send the bcast station. At this stage the TBTT and DTIM time events - * are added and applied to the scheduler */ - ret = iwl_mvm_send_add_bcast_sta(mvm, vif); - if (ret) - goto out_rm_mcast; + /* + * This is not very nice, but the simplest: + * For older FWs adding the mcast sta before the bcast station may + * cause assert 0x2b00. + * This is fixed in later FW so make the order of removal depend on + * the TLV + */ + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) + goto out_unbind; + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) { + iwl_mvm_rm_mcast_sta(mvm, vif); + goto out_unbind; + } + } else { + /* + * Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); + if (ret) + goto out_unbind; + ret = iwl_mvm_add_mcast_sta(mvm, vif); + if (ret) { + iwl_mvm_send_rm_bcast_sta(mvm, vif); + goto out_unbind; + } + } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; @@ -2165,7 +2191,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); -out_rm_mcast: iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); @@ -2703,6 +2728,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); + ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { @@ -3468,6 +3497,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = 0; goto out; case NL80211_IFTYPE_STATION: + mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ @@ -3511,7 +3541,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 2 * vif->bss_conf.beacon_int; + u32 duration = 3 * vif->bss_conf.beacon_int; /* iwl_mvm_protect_session() reads directly from the * device (the system time), so make sure it is @@ -3524,6 +3554,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, /* Protect the session to make sure we hear the first * beacon on the new channel. */ + mvmvif->csa_bcn_pending = true; iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); @@ -3967,6 +3998,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; + mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2ec27ceb8af9a126dc029d7419c5db66843da457..736c176f1fd6c034041e81542601ea2ebe57d575 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -434,6 +434,9 @@ struct iwl_mvm_vif { bool csa_failed; u16 csa_target_freq; + /* Indicates that we are waiting for a beacon on a new channel */ + bool csa_bcn_pending; + /* TCP Checksum Offload */ netdev_features_t features; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9fb40955d5f4f2d16f16d4e994ec5b9f987900df..54f411b83beae5d1ff44a9416a06a811d4e4f9d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -553,9 +555,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); } +static bool iwl_mvm_fwrt_fw_running(void *ctx) +{ + return iwl_mvm_firmware_running(ctx); +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, + .fw_running = iwl_mvm_fwrt_fw_running, }; static struct iwl_op_mode * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index d22cef7381baa496a7ee8df68efa9dac2c402d81..386fdee23eb0ef35f9523ff37b3b7297ce415175 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2690,7 +2690,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, - struct rs_rate *rate) + struct rs_rate *rate, + bool init) { int i, nentries; unsigned long active_rate; @@ -2744,14 +2745,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, */ if (sta->vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { - switch (sta->bandwidth) { - case IEEE80211_STA_RX_BW_160: - case IEEE80211_STA_RX_BW_80: - case IEEE80211_STA_RX_BW_40: + /* + * In AP mode, when a new station associates, rs is initialized + * immediately upon association completion, before the phy + * context is updated with the association parameters, so the + * sta bandwidth might be wider than the phy context allows. + * To avoid this issue, always initialize rs with 20mhz + * bandwidth rate, and after authorization, when the phy context + * is already up-to-date, re-init rs with the correct bw. + */ + u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + + switch (bw) { + case RATE_MCS_CHAN_WIDTH_40: + case RATE_MCS_CHAN_WIDTH_80: + case RATE_MCS_CHAN_WIDTH_160: initial_rates = rs_optimal_rates_vht; nentries = ARRAY_SIZE(rs_optimal_rates_vht); break; - case IEEE80211_STA_RX_BW_20: + case RATE_MCS_CHAN_WIDTH_20: initial_rates = rs_optimal_rates_vht_20mhz; nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); break; @@ -2762,7 +2774,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; - rate->bw = rs_bw_from_sta_bw(sta); + rate->bw = bw; } else if (sta->ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; @@ -2844,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, sta, lq_sta, band, rate); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 819e6f66a5b5ca5e704abd87bdf106cc19d3b1ee..e2196dc35dc6cf7a00fda5456695e6f802c37d88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; + int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; @@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, pn[4] = extiv[1]; pn[5] = extiv[0]; - if (memcmp(pn, ptk_pn->q[queue].pn[tid], - IEEE80211_CCMP_PN_LEN) <= 0) + res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); + if (res < 0) + return -1; + if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; - if (!(stats->flag & RX_FLAG_AMSDU_MORE)) - memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; @@ -310,28 +312,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, } /* - * returns true if a packet outside BA session is a duplicate and - * should be dropped + * returns true if a packet is a duplicate and should be dropped. + * Updates AMSDU PN tracking info */ -static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, - struct ieee80211_rx_status *rx_status, - struct ieee80211_hdr *hdr, - struct iwl_rx_mpdu_desc *desc) +static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, + struct ieee80211_rx_status *rx_status, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; - u8 baid, tid, sub_frame_idx; + u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; - baid = (le32_to_cpu(desc->reorder_data) & - IWL_RX_MPDU_REORDER_BAID_MASK) >> - IWL_RX_MPDU_REORDER_BAID_SHIFT; - - if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) - return false; - mvm_sta = iwl_mvm_sta_from_mac80211(sta); dup_data = &mvm_sta->dup_data[queue]; @@ -361,6 +356,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; + /* Allow same PN as the first subframe for following sub frames */ + if (dup_data->last_seq[tid] == hdr->seq_ctrl && + sub_frame_idx > dup_data->last_sub_frame[tid] && + desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) + rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; + dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; @@ -929,7 +930,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); - if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { + if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 0d7929799942f6f0fdd55fb523d8b9745927131e..d31d84eebc5d02add8a7fbc07692f1fd04555658 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1679,7 +1679,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; @@ -2023,7 +2024,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = msta->sta_id, - .tid = IWL_MAX_TID_COUNT, + .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; @@ -2036,6 +2037,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; + /* + * In IBSS, ieee80211_check_queues() sets the cab_queue to be + * invalid, so make sure we use the queue we want. + * Note that this is done here as we want to avoid making DQA + * changes in mac80211 layer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) { + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->cab_queue = vif->cab_queue; + } + /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. @@ -2063,24 +2075,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, msta->sta_id, - IWL_MAX_TID_COUNT, + 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_STA_TYPE)) { - /* - * In IBSS, ieee80211_check_queues() sets the cab_queue to be - * invalid, so make sure we use the queue we want. - * Note that this is done here as we want to avoid making DQA - * changes in mac80211 layer. - */ - if (vif->type == NL80211_IFTYPE_ADHOC) { - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - mvmvif->cab_queue = vif->cab_queue; - } + IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, &cfg, timeout); - } return 0; } @@ -2099,7 +2100,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); + 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2435,28 +2436,12 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* * Note the possible cases: - * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed - * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free - * one and mark it as reserved - * 3. In DQA mode, but no traffic yet on this TID: same treatment as in - * non-DQA mode, since the TXQ hasn't yet been allocated - * Don't support case 3 for new TX path as it is not expected to happen - * and aggregation will be offloaded soon anyway + * 1. An enabled TXQ - TXQ needs to become agg'ed + * 2. The TXQ hasn't yet been enabled, so find a free one and mark + * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (txq_id == IWL_MVM_INVALID_QUEUE) { - ret = -ENXIO; - goto release_locks; - } - } else if (unlikely(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_SHARED)) { - ret = -ENXIO; - IWL_DEBUG_TX_QUEUES(mvm, - "Can't start tid %d agg on shared queue!\n", - tid); - goto release_locks; - } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + if (txq_id == IWL_MVM_INVALID_QUEUE) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); @@ -2465,16 +2450,16 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } - /* - * TXQ shouldn't be in inactive mode for non-DQA, so getting - * an inactive queue from iwl_mvm_find_free_queue() is - * certainly a bug - */ - WARN_ON(mvm->queue_info[txq_id].status == - IWL_MVM_QUEUE_INACTIVE); /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; + } else if (unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED)) { + ret = -ENXIO; + IWL_DEBUG_TX_QUEUES(mvm, + "Can't start tid %d agg on shared queue!\n", + tid); + goto release_locks; } spin_unlock(&mvm->queue_info_lock); @@ -2645,8 +2630,10 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, - u16 txq_id) + struct iwl_mvm_tid_data *tid_data) { + u16 txq_id = tid_data->txq_id; + if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2658,8 +2645,10 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ - if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + tid_data->txq_id = IWL_MVM_INVALID_QUEUE; + } spin_unlock_bh(&mvm->queue_info_lock); } @@ -2690,7 +2679,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: @@ -2757,7 +2746,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id); + iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); @@ -3119,8 +3108,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, int ret, size; u32 status; + /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) - return -EINVAL; + return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); @@ -3181,17 +3171,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } sta_id = mvm_sta->sta_id; - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { - ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, - false); - goto end; - } - /* * It is possible that the 'sta' parameter is NULL, and thus - * there is a need to retrieve the sta from the local station + * there is a need to retrieve the sta from the local station * table. */ if (!sta) { @@ -3206,6 +3188,17 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; + } else { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + sta_id = mvmvif->mcast_sta.sta_id; + } + + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { + ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); + goto end; } /* If the key_offset is not pre-assigned, we need to find a diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index e25cda9fbf6c34d951b7441b40574bfb9c0a67b7..342ca1778efdec25d54801ae445a2e639d3171fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -203,9 +200,13 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type != NL80211_IFTYPE_STATION) return false; - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) + + if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && + vif->bss_conf.dtim_period) return false; if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); @@ -349,7 +350,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, - "No association and the time event is over already..."); + "No beacon heard and the time event is over already..."); break; default: break; @@ -621,7 +622,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); if (!wait_for_notif) { iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); @@ -814,7 +815,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | - T2_V2_START_IMMEDIATELY); + TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } @@ -924,6 +925,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, time_cmd.interval = cpu_to_le32(1); time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE); + if (!apply_time) + time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 887a504ce64a5e98f714917953461ebe57778291..6c014c27392277c625e5d581a070e04004bbe029 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; + enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: + type = TX_CMD_SEC_GCMP; + /* Fall through */ + case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ - tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { - sta_id = mvmvif->bcast_sta.sta_id; + if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + sta_id = mvmvif->bcast_sta.sta_id; + else + sta_id = mvmvif->mcast_sta.sta_id; + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); if (queue < 0) @@ -1872,14 +1879,12 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags) struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (internal) - return iwl_mvm_flush_sta_tids(mvm, int_sta->sta_id, - BIT(IWL_MGMT_TID), flags); + BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != + offsetof(struct iwl_mvm_sta, sta_id)); + if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, - 0xFF, flags); - } + 0xff | BIT(IWL_MGMT_TID), flags); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 43ab172d31cb17e50decf14eb8740b46b9b7add4..d2cada0ab426455e810d864a114c748e0400d195 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -810,12 +810,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; - bool remove_mac_queue = true; + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; int ret; + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) + return -EINVAL; + if (iwl_mvm_has_new_tx_api(mvm)) { spin_lock_bh(&mvm->queue_info_lock); - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + spin_unlock_bh(&mvm->queue_info_lock); iwl_trans_txq_free(mvm->trans, queue); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index a2c1ca5c76d1c7d5b9191d8ad0f35aba949dfcc7..e1660b92b20c7793c88ea5d470e1d793f495bbf6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, /* * Determine IFS values - * - Use TXOP_BACKOFF for probe and management frames except beacons + * - Use TXOP_BACKOFF for management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ - if ((ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_beacon(hdr->frame_control)) || - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 9cff6bc4049c993a78ab7db6b512110a213834a0..cf551785eb089d1a695148c825ddd9b33d252dee 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; - else if (rtlpriv->dm.dynamic_txhighpower_lvl == - TXHIGHPWRLEVEL_BT2) - writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 8d3a4839b6ef1db82d27a8df069b3b16c8f03a9d..370161ca2a1c34097cd8e1fd0f1b0502b2d842ee 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -636,11 +636,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, u32 *read_buf, u16 size) { u32 addr_on_bus, *data; - u32 align[2] = {}; u16 ms_addr; int status; - data = PTR_ALIGN(&align[0], 8); + data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data) + return -ENOMEM; + + data = PTR_ALIGN(data, 8); ms_addr = (addr >> 16); status = rsi_sdio_master_access_msword(adapter, ms_addr); @@ -648,7 +651,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto err; } addr &= 0xFFFF; @@ -666,7 +669,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, (u8 *)data, 4); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__); - return status; + goto err; } if (size == 2) { if ((addr & 0x3) == 0) @@ -688,17 +691,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, *read_buf = *data; } - return 0; +err: + kfree(data); + return status; } static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, unsigned long addr, unsigned long data, u16 size) { - unsigned long data1[2], *data_aligned; + unsigned long *data_aligned; int status; - data_aligned = PTR_ALIGN(&data1[0], 8); + data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL); + if (!data_aligned) + return -ENOMEM; + + data_aligned = PTR_ALIGN(data_aligned, 8); if (size == 2) { *data_aligned = ((data << 16) | (data & 0xFFFF)); @@ -717,6 +726,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); + kfree(data_aligned); return -EIO; } addr = addr & 0xFFFF; @@ -726,12 +736,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, (adapter, (addr | RSI_SD_REQUEST_MASTER), (u8 *)data_aligned, size); - if (status < 0) { + if (status < 0) rsi_dbg(ERR_ZONE, "%s: Unable to do AHB reg write\n", __func__); - return status; - } - return 0; + + kfree(data_aligned); + return status; } /** diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 95e4bed57bafcaee1137b21114d921627c4ac959..90339203920016dfc4fc90c8d8d2aa979ee18c27 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -46,6 +46,8 @@ enum sdio_interrupt_type { #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 +#define RSI_MASTER_REG_BUF_SIZE 12 + #define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3 #define RSI_FN1_INT_REGISTER 0xf9 #define RSI_SD_REQUEST_MASTER 0x10000 diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 8cd42544c90e1a40d7a60a9c60a72107d1baa09d..740aae51e1c6382f9038a6bc2696ec9da0c24b37 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -606,8 +606,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->discovery_nqn = !(strcmp(opts->subsysnqn, NVME_DISC_SUBSYS_NAME)); - if (opts->discovery_nqn) + if (opts->discovery_nqn) { + opts->kato = 0; opts->nr_io_queues = 0; + } break; case NVMF_OPT_TRADDR: p = match_strdup(args); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index eab17405e81584ada43a72bd5574a0ed07547de6..3d4724e38aa996d3f1b07870a51c4f66ce7dd257 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1013,12 +1013,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) if (!(csts & NVME_CSTS_CFS) && !nssro) return false; - /* If PCI error recovery process is happening, we cannot reset or - * the recovery mechanism will surely fail. - */ - if (pci_channel_offline(to_pci_dev(dev->dev))) - return false; - return true; } @@ -1049,6 +1043,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_command cmd; u32 csts = readl(dev->bar + NVME_REG_CSTS); + /* If PCI error recovery process is happening, we cannot reset or + * the recovery mechanism will surely fail. + */ + mb(); + if (pci_channel_offline(to_pci_dev(dev->dev))) + return BLK_EH_RESET_TIMER; + /* * Reset immediately if the controller is failed */ @@ -1322,7 +1323,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) - return result; + goto release_vector; result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) @@ -1336,9 +1337,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) return result; release_sq: + dev->online_queues--; adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); + release_vector: + nvmeq->cq_vector = -1; return result; } @@ -1766,7 +1770,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) int result, nr_io_queues; unsigned long size; - nr_io_queues = num_present_cpus(); + nr_io_queues = num_possible_cpus(); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; @@ -2310,10 +2314,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { /* * Samsung SSD 960 EVO drops off the PCIe bus after system - * suspend on a Ryzen board, ASUS PRIME B350M-A. + * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as + * within few minutes after bootup on a Coffee Lake board - + * ASUS PRIME Z370-A */ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && - dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) + (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || + dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) return NVME_QUIRK_NO_APST; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 645ba7eee35db7a66a0249d39c7adba514173229..240b0d628222026966db95be698c37031fb0c2c3 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -505,9 +505,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, goto fail; } - /* either variant of SGLs is fine, as we don't support metadata */ - if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && - (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { + /* + * For fabrics, PSDT field shall describe metadata pointer (MPTR) that + * contains an address of a single contiguous physical buffer that is + * byte aligned. + */ + if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto fail; } diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 41b740aed3a346e4bbc610959281649447f83bd4..69bd98421eb167cf093052df043bd4a206e1788d 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d) WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); } - /* Set HF mode as the default (vs. -1 mode). */ + + /* + * Hard Fail vs. Soft Fail on PCI "Master Abort". + * + * "Master Abort" means the MMIO transaction timed out - usually due to + * the device not responding to an MMIO read. We would like HF to be + * enabled to find driver problems, though it means the system will + * crash with a HPMC. + * + * In SoftFail mode "~0L" is returned as a result of a timeout on the + * pci bus. This is like how PCI busses on x86 and most other + * architectures behave. In order to increase compatibility with + * existing (x86) PCI hardware and existing Linux drivers we enable + * Soft Faul mode on PA-RISC now too. + */ stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); +#if defined(ENABLE_HARDFAIL) WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); +#else + WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); +#endif /* ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 73b724143be02a19ff2eb364a9316a288f1d44c1..c91662927de0e48e1f831f19e5b329cc0f8b68ed 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -531,6 +531,8 @@ struct hv_pci_compl { s32 completion_status; }; +static void hv_pci_onchannelcallback(void *context); + /** * hv_pci_generic_compl() - Invoked for a completion packet * @context: Set up by the sender of the packet. @@ -675,6 +677,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, } } +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) +{ + u16 ret; + unsigned long flags; + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + + PCI_VENDOR_ID; + + spin_lock_irqsave(&hpdev->hbus->config_lock, flags); + + /* Choose the function to be read. (See comment above) */ + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); + /* Read from that function's config space. */ + ret = readw(addr); + /* + * mb() is not required here, because the spin_unlock_irqrestore() + * is a barrier. + */ + + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); + + return ret; +} + /** * _hv_pcifront_write_config() - Internal PCI config write * @hpdev: The PCI driver's representation of the device @@ -1121,8 +1148,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * Since this function is called with IRQ locks held, can't * do normal wait for completion; instead poll. */ - while (!try_wait_for_completion(&comp.comp_pkt.host_event)) + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { + /* 0xFFFF means an invalid PCI VENDOR ID. */ + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { + dev_err_once(&hbus->hdev->device, + "the device has gone\n"); + goto free_int_desc; + } + + /* + * When the higher level interrupt code calls us with + * interrupt disabled, we must poll the channel by calling + * the channel callback directly when channel->target_cpu is + * the current CPU. When the higher level interrupt code + * calls us with interrupt enabled, let's add the + * local_bh_disable()/enable() to avoid race. + */ + local_bh_disable(); + + if (hbus->hdev->channel->target_cpu == smp_processor_id()) + hv_pci_onchannelcallback(hbus); + + local_bh_enable(); + + if (hpdev->state == hv_pcichild_ejecting) { + dev_err_once(&hbus->hdev->device, + "the device is being ejected\n"); + goto free_int_desc; + } + udelay(100); + } if (comp.comp_pkt.completion_status < 0) { dev_err(&hbus->hdev->device, diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index ee2adce6bbe80169e11b5fdce55f5901c69010ef..3aacf2d205b5f4fb0bfe9d0bf2793fc9e8449627 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -6512,17 +6512,24 @@ static int msm_pcie_pm_resume(struct pci_dev *dev, dev->bus->number, dev->bus->primary); if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) { - PCIE_DBG(pcie_dev, - "RC%d: entry of PCI framework restore state\n", - pcie_dev->rc_idx); + if (pcie_dev->saved_state) { + PCIE_DBG(pcie_dev, + "RC%d: entry of PCI framework restore state\n", + pcie_dev->rc_idx); - pci_load_and_free_saved_state(dev, - &pcie_dev->saved_state); - pci_restore_state(dev); + pci_load_and_free_saved_state(dev, + &pcie_dev->saved_state); + pci_restore_state(dev); - PCIE_DBG(pcie_dev, - "RC%d: exit of PCI framework restore state\n", - pcie_dev->rc_idx); + PCIE_DBG(pcie_dev, + "RC%d: exit of PCI framework restore state\n", + pcie_dev->rc_idx); + } else { + PCIE_DBG(pcie_dev, + "RC%d: restore rc config space using shadow recovery\n", + pcie_dev->rc_idx); + msm_pcie_cfg_recover(pcie_dev, true); + } } if (pcie_dev->bridge_found) { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index bb0927de79dd7aa220ec72263ca0704c66837c5f..ea69b4dbab6611455d1a45c03852724cf5539f0f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1164,11 +1164,14 @@ static int pci_pm_runtime_suspend(struct device *dev) int error; /* - * If pci_dev->driver is not set (unbound), the device should - * always remain in D0 regardless of the runtime PM status + * If pci_dev->driver is not set (unbound), we leave the device in D0, + * but it may go to D3cold when the bridge above it runtime suspends. + * Save its config space in case that happens. */ - if (!pci_dev->driver) + if (!pci_dev->driver) { + pci_save_state(pci_dev); return 0; + } if (!pm || !pm->runtime_suspend) return -ENOSYS; @@ -1216,16 +1219,18 @@ static int pci_pm_runtime_resume(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* - * If pci_dev->driver is not set (unbound), the device should - * always remain in D0 regardless of the runtime PM status + * Restoring config space is necessary even if the device is not bound + * to a driver because although we left it in D0, it may have gone to + * D3cold when the bridge above it runtime suspended. */ + pci_restore_standard_config(pci_dev); + if (!pci_dev->driver) return 0; if (!pm || !pm->runtime_resume) return -ENOSYS; - pci_restore_standard_config(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); pci_fixup_device(pci_fixup_resume, pci_dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 116127a0accb7f255fac27d66c05b676ca0feebe..929d68f744af2bf4f7283f93009aa4de7e7fe36e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3896,6 +3896,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, + quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index c3b615c94b4bffaaff0ce3579e47c4c890a6a2c0..8c8caec3a72cc2b6bc4b6cf1af814b7674976eeb 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt) static int socket_suspend(struct pcmcia_socket *skt) { - if (skt->state & SOCKET_SUSPEND) + if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME)) return -EBUSY; mutex_lock(&skt->ops_mutex); - skt->suspended_state = skt->state; + /* store state on first suspend, but not after spurious wakeups */ + if (!(skt->state & SOCKET_IN_RESUME)) + skt->suspended_state = skt->state; skt->socket = dead_socket; skt->ops->set_socket(skt, &skt->socket); if (skt->ops->suspend) skt->ops->suspend(skt); skt->state |= SOCKET_SUSPEND; + skt->state &= ~SOCKET_IN_RESUME; mutex_unlock(&skt->ops_mutex); return 0; } @@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt) skt->ops->set_socket(skt, &skt->socket); if (skt->state & SOCKET_PRESENT) skt->resume_status = socket_setup(skt, resume_delay); + skt->state |= SOCKET_IN_RESUME; mutex_unlock(&skt->ops_mutex); return 0; } @@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt) int ret = 0; mutex_lock(&skt->ops_mutex); - skt->state &= ~SOCKET_SUSPEND; + skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME); mutex_unlock(&skt->ops_mutex); if (!(skt->state & SOCKET_PRESENT)) { diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index e86cd6b31773ba7fdc9d9b3662bc4fc97b0c4d00..384629ce48f54dffe5f96ed7078bbd5a2a7596c4 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h @@ -70,6 +70,7 @@ struct pccard_resource_ops { /* Flags in socket state */ #define SOCKET_PRESENT 0x0008 #define SOCKET_INUSE 0x0010 +#define SOCKET_IN_RESUME 0x0040 #define SOCKET_SUSPEND 0x0080 #define SOCKET_WIN_REQ(i) (0x0100<<(i)) #define SOCKET_CARDBUS 0x8000 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index e17f0351ccc2ec27681fd6aebd02bbb1e2fd74f5..2526971f99299e6e0e40e0cd8571fd6befab298a 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy) struct qmp_phy *qphy = phy_get_drvdata(phy); struct qcom_qmp *qmp = qphy->qmp; - clk_disable_unprepare(qphy->pipe_clk); - regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs); return 0; @@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy) const struct qmp_phy_cfg *cfg = qmp->cfg; int i = cfg->num_clks; + clk_disable_unprepare(qphy->pipe_clk); + /* PHY reset */ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h index 5d45115ed5f4e552d193b9bba05e577cb656b6e0..8db9ff5e0c93f63d4fd23fd4f4f06c8129fa14fb 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h +++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.h @@ -129,6 +129,7 @@ #define QSERDES_RX0_AC_JTAG_ENABLE RX_OFF(0, 0x68) #define QSERDES_RX0_UCDR_FO_GAIN RX_OFF(0, 0x08) #define QSERDES_RX0_UCDR_SO_GAIN RX_OFF(0, 0x14) +#define QSERDES_RX0_AC_JTAG_MODE RX_OFF(0, 0x78) #define QSERDES_RX1_SIGDET_LVL RX_OFF(1, 0x120) #define QSERDES_RX1_SIGDET_CNTRL RX_OFF(1, 0x11C) @@ -165,6 +166,7 @@ #define QSERDES_RX1_AC_JTAG_ENABLE RX_OFF(1, 0x68) #define QSERDES_RX1_UCDR_FO_GAIN RX_OFF(1, 0x08) #define QSERDES_RX1_UCDR_SO_GAIN RX_OFF(1, 0x14) +#define QSERDES_RX1_AC_JTAG_MODE RX_OFF(1, 0x78) #define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1) @@ -245,7 +247,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH3, 0x3B), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_10_HIGH4, 0xB1), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0xFF), - UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6F), + UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43), @@ -257,7 +259,8 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = { UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HS_GEAR_BAND, 0x06), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HSGEAR_CAPABILITY, 0x03), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_HSGEAR_CAPABILITY, 0x03), - UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_AC_JTAG_ENABLE, 0x00), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_AC_JTAG_ENABLE, 0x01), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_AC_JTAG_MODE, 0x01), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FO_GAIN, 0x0C), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_GAIN, 0x04), }; @@ -302,7 +305,8 @@ static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = { UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH3, 0x3B), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_MODE_10_HIGH4, 0xB1), UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02), - UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_AC_JTAG_ENABLE, 0x00), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_AC_JTAG_ENABLE, 0x01), + UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_AC_JTAG_MODE, 0x01), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FO_GAIN, 0x0C), UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_GAIN, 0x04), }; diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c index f1b24f18e9b22d1e1071f0f870724e08b0d0c19e..b0d10934413f2fd19637533b92eda3204c56fc23 100644 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c @@ -76,6 +76,10 @@ #define PHYCTRL_OTAPDLYSEL_MASK 0xf #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 +#define PHYCTRL_IS_CALDONE(x) \ + ((((x) >> PHYCTRL_CALDONE_SHIFT) & \ + PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE) + struct rockchip_emmc_phy { unsigned int reg_offset; struct regmap *reg_base; @@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) unsigned int freqsel = PHYCTRL_FREQSEL_200M; unsigned long rate; unsigned long timeout; + int ret; /* * Keep phyctrl_pdb and phyctrl_endll low to allow @@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) PHYCTRL_PDB_SHIFT)); /* - * According to the user manual, it asks driver to - * wait 5us for calpad busy trimming + * According to the user manual, it asks driver to wait 5us for + * calpad busy trimming. However it is documented that this value is + * PVT(A.K.A process,voltage and temperature) relevant, so some + * failure cases are found which indicates we should be more tolerant + * to calpad busy trimming. */ - udelay(5); - regmap_read(rk_phy->reg_base, - rk_phy->reg_offset + GRF_EMMCPHY_STATUS, - &caldone); - caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK; - if (caldone != PHYCTRL_CALDONE_DONE) { - pr_err("rockchip_emmc_phy_power: caldone timeout.\n"); - return -ETIMEDOUT; + ret = regmap_read_poll_timeout(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_STATUS, + caldone, PHYCTRL_IS_CALDONE(caldone), + 0, 50); + if (ret) { + pr_err("%s: caldone failed, ret=%d\n", __func__, ret); + return ret; } /* Set the frequency of the DLL operation */ diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 1ff6c357349337b448ba0926018847e079fd206b..b601039d6c69a28d771eff622f0001d70bf84204 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -122,8 +122,10 @@ static int dt_to_map_one_config(struct pinctrl *p, /* OK let's just assume this will appear later then */ return -EPROBE_DEFER; } - if (!pctldev) - pctldev = get_pinctrl_dev_from_of_node(np_pctldev); + /* If we're creating a hog we can use the passed pctldev */ + if (pctldev && (np_pctldev == p->dev->of_node)) + break; + pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; /* Do not defer probing of hogs (circular loop) */ diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 447763aad8150382fa4ad75bbeedfcca44092d46..db9cca4a83ff1439b382704aa51bec3294c36d4b 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -779,6 +779,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, { int status, ret; bool mirror = false; + struct regmap_config *one_regmap_config = NULL; mutex_init(&mcp->lock); @@ -799,22 +800,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, switch (type) { #ifdef CONFIG_SPI_MASTER case MCP_TYPE_S08: - mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x08_regmap); - mcp->reg_shift = 0; - mcp->chip.ngpio = 8; - mcp->chip.label = "mcp23s08"; - break; - case MCP_TYPE_S17: + switch (type) { + case MCP_TYPE_S08: + one_regmap_config = + devm_kmemdup(dev, &mcp23x08_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + mcp->reg_shift = 0; + mcp->chip.ngpio = 8; + mcp->chip.label = "mcp23s08"; + break; + case MCP_TYPE_S17: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + mcp->reg_shift = 1; + mcp->chip.ngpio = 16; + mcp->chip.label = "mcp23s17"; + break; + } + if (!one_regmap_config) + return -ENOMEM; + + one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1); mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); - mcp->reg_shift = 1; - mcp->chip.ngpio = 16; - mcp->chip.label = "mcp23s17"; + one_regmap_config); break; case MCP_TYPE_S18: + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, &mcp23x17_regmap); mcp->reg_shift = 1; diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 062ee60ba17bacdc2247d29856d47440af93c4eb..e2e8997f94d6d75122f308502151a70e977a78b1 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -157,6 +157,15 @@ config PINCTRL_SM8150 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SM8150 platform. +config PINCTRL_SDMMAGPIE + tristate "Qualcomm Technologies Inc SDMMAGPIE pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc TLMM block found on the Qualcomm + Technologies Inc SDMMAGPIE platform. + config PINCTRL_SDMSHRIKE tristate "Qualcomm Technologies Inc SDMSHRIKE pin controller driver" depends on GPIOLIB && OF @@ -174,4 +183,14 @@ config PINCTRL_SM6150 This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SM6150 platform. + +config PINCTRL_SDXPRAIRIE + tristate "Qualcomm Technologies Inc SDXPRAIRIE pin controller driver" + depends on GPIOLIB && OF + select PINCTRL_MSM + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for + the Qualcomm Technologies Inc TLMM block found on the Qualcomm + Technologies Inc SDXPRAIRIE platform. + endif diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 8cfd857a8ff6a3102fb3740c08fd6944cc843960..567ac4b0844e82924951b653eb017a923946e04b 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -22,3 +22,5 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o obj-$(CONFIG_PINCTRL_SDMSHRIKE) += pinctrl-sdmshrike.o obj-$(CONFIG_PINCTRL_SM6150) += pinctrl-sm6150.o +obj-$(CONFIG_PINCTRL_SDXPRAIRIE) += pinctrl-sdxprairie.o +obj-$(CONFIG_PINCTRL_SDMMAGPIE) += pinctrl-sdmmagpie.o diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 85660fc49a20518502963e0f8f08d90a545cf7fd..7dc276b59dfa2d0be70851dfa4b242f1d749c767 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -610,6 +611,9 @@ static void msm_gpio_irq_mask(struct irq_data *d) clear_bit(d->hwirq, pctrl->enabled_irqs); raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + if (d->parent_data) + irq_chip_mask_parent(d); } static void msm_gpio_irq_enable(struct irq_data *d) @@ -638,6 +642,9 @@ static void msm_gpio_irq_enable(struct irq_data *d) set_bit(d->hwirq, pctrl->enabled_irqs); raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + if (d->parent_data) + irq_chip_enable_parent(d); } static void msm_gpio_irq_unmask(struct irq_data *d) @@ -659,6 +666,9 @@ static void msm_gpio_irq_unmask(struct irq_data *d) set_bit(d->hwirq, pctrl->enabled_irqs); raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + if (d->parent_data) + irq_chip_unmask_parent(d); } static void msm_gpio_irq_ack(struct irq_data *d) @@ -772,6 +782,9 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) raw_spin_unlock_irqrestore(&pctrl->lock, flags); + if (d->parent_data) + irq_chip_set_type_parent(d, type); + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) @@ -792,9 +805,35 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) raw_spin_unlock_irqrestore(&pctrl->lock, flags); + if (d->parent_data) + irq_chip_set_wake_parent(d, on); + return 0; } +static int msm_gpiochip_irq_reqres(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + if (!try_module_get(chip->owner)) + return -ENODEV; + + if (gpiochip_lock_as_irq(chip, d->hwirq)) { + pr_err("unable to lock HW IRQ %lu for IRQ\n", d->hwirq); + module_put(chip->owner); + return -EINVAL; + } + return 0; +} + +static void msm_gpiochip_irq_relres(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + gpiochip_unlock_as_irq(chip, d->hwirq); + module_put(chip->owner); +} + static struct irq_chip msm_gpio_irq_chip = { .name = "msmgpio", .irq_enable = msm_gpio_irq_enable, @@ -803,6 +842,61 @@ static struct irq_chip msm_gpio_irq_chip = { .irq_ack = msm_gpio_irq_ack, .irq_set_type = msm_gpio_irq_set_type, .irq_set_wake = msm_gpio_irq_set_wake, + .irq_request_resources = msm_gpiochip_irq_reqres, + .irq_release_resources = msm_gpiochip_irq_relres, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, +}; + +static void msm_gpio_domain_set_info(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) +{ + struct gpio_chip *gc = d->host_data; + + irq_domain_set_info(d, irq, hwirq, gc->irqchip, d->host_data, + gc->irq_handler, NULL, NULL); + + if (gc->can_sleep) + irq_set_nested_thread(irq, 1); + + irq_set_noprobe(irq); +} + +static int msm_gpio_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) +{ + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 2) + return -EINVAL; + if (hwirq) + *hwirq = fwspec->param[0]; + if (type) + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + return 0; + } + + return -EINVAL; +} + +static int msm_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int ret; + irq_hw_number_t hwirq; + struct irq_fwspec *fwspec = arg; + + ret = msm_gpio_domain_translate(domain, fwspec, &hwirq, NULL); + if (ret) + return ret; + + msm_gpio_domain_set_info(domain, virq, hwirq); + return ret; +} + +static const struct irq_domain_ops msm_gpio_domain_ops = { + .translate = msm_gpio_domain_translate, + .alloc = msm_gpio_domain_alloc, + .free = irq_domain_free_irqs_top, }; static bool is_gpio_dual_edge(struct irq_data *d, irq_hw_number_t *dir_conn_irq) @@ -1196,7 +1290,7 @@ static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl) parent_irq = irq_create_fwspec_mapping(&fwspec); if (dirconn->gpio != 0) { - irq = irq_find_mapping(pctrl->chip.irqdomain, + irq = irq_create_mapping(pctrl->chip.irqdomain, dirconn->gpio); irq_set_parent(irq, parent_irq); @@ -1211,11 +1305,25 @@ static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl) } } +static int msm_gpiochip_to_irq(struct gpio_chip *chip, unsigned int offset) +{ + struct irq_fwspec fwspec; + + fwspec.fwnode = of_node_to_fwnode(chip->of_node); + fwspec.param[0] = offset; + fwspec.param[1] = IRQ_TYPE_NONE; + fwspec.param_count = 2; + + return irq_create_fwspec_mapping(&fwspec); +} + static int msm_gpio_init(struct msm_pinctrl *pctrl) { struct gpio_chip *chip; int ret; unsigned ngpio = pctrl->soc->ngpios; + struct device_node *irq_parent = NULL; + struct irq_domain *domain_parent; if (WARN_ON(ngpio > MAX_NR_GPIO)) return -EINVAL; @@ -1227,6 +1335,11 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) chip->parent = pctrl->dev; chip->owner = THIS_MODULE; chip->of_node = pctrl->dev->of_node; + chip->irqchip = &msm_gpio_irq_chip; + chip->irq_handler = handle_fasteoi_irq; + chip->irq_default_type = IRQ_TYPE_NONE; + chip->to_irq = msm_gpiochip_to_irq; + chip->lock_key = NULL; ret = gpiochip_add_data(&pctrl->chip, pctrl); if (ret) { @@ -1241,19 +1354,45 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) return ret; } - ret = gpiochip_irqchip_add(chip, - &msm_gpio_irq_chip, - 0, - handle_fasteoi_irq, - IRQ_TYPE_NONE); - if (ret) { - dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n"); - gpiochip_remove(&pctrl->chip); - return -ENOSYS; - } + irq_parent = of_irq_find_parent(chip->of_node); + if (of_device_is_compatible(irq_parent, "qcom,mpm-gpio")) { + chip->irqchip = &msm_gpio_irq_chip; + chip->irq_handler = handle_fasteoi_irq; + chip->irq_default_type = IRQ_TYPE_NONE; + chip->to_irq = msm_gpiochip_to_irq; + chip->lock_key = NULL; + domain_parent = irq_find_host(irq_parent); + if (!domain_parent) { + pr_err("unable to find parent domain\n"); + gpiochip_remove(&pctrl->chip); + return -ENXIO; + } - gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq, - msm_gpio_irq_handler); + chip->irqdomain = irq_domain_add_hierarchy(domain_parent, 0, + chip->ngpio, + chip->of_node, + &msm_gpio_domain_ops, + chip); + if (!chip->irqdomain) { + dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n"); + chip->irqchip = NULL; + gpiochip_remove(&pctrl->chip); + return -ENXIO; + } + } else { + ret = gpiochip_irqchip_add(chip, + &msm_gpio_irq_chip, + 0, + handle_fasteoi_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n"); + gpiochip_remove(&pctrl->chip); + return ret; + } + } + gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, + pctrl->irq, msm_gpio_irq_handler); msm_gpio_setup_dir_connects(pctrl); return 0; diff --git a/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c b/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c new file mode 100644 index 0000000000000000000000000000000000000000..e0a6bedf20b4d28f45ba6eaf308b6a39147c9e48 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sdmmagpie.c @@ -0,0 +1,1494 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define NORTH 0x00500000 +#define SOUTH 0x00900000 +#define WEST 0x00100000 +#define DUMMY 0x0 +#define REG_SIZE 0x1000 +#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = base + REG_SIZE * id, \ + .io_reg = base + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ + .intr_status_reg = base + 0xc + REG_SIZE * id, \ + .intr_target_reg = base + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc sdmmagpie_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "GPIO_108"), + PINCTRL_PIN(109, "GPIO_109"), + PINCTRL_PIN(110, "GPIO_110"), + PINCTRL_PIN(111, "GPIO_111"), + PINCTRL_PIN(112, "GPIO_112"), + PINCTRL_PIN(113, "GPIO_113"), + PINCTRL_PIN(114, "GPIO_114"), + PINCTRL_PIN(115, "GPIO_115"), + PINCTRL_PIN(116, "GPIO_116"), + PINCTRL_PIN(117, "GPIO_117"), + PINCTRL_PIN(118, "GPIO_118"), + PINCTRL_PIN(119, "SDC1_RCLK"), + PINCTRL_PIN(120, "SDC1_CLK"), + PINCTRL_PIN(121, "SDC1_CMD"), + PINCTRL_PIN(122, "SDC1_DATA"), + PINCTRL_PIN(123, "SDC2_CLK"), + PINCTRL_PIN(124, "SDC2_CMD"), + PINCTRL_PIN(125, "SDC2_DATA"), + PINCTRL_PIN(126, "UFS_RESET"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); +DECLARE_MSM_GPIO_PINS(108); +DECLARE_MSM_GPIO_PINS(109); +DECLARE_MSM_GPIO_PINS(110); +DECLARE_MSM_GPIO_PINS(111); +DECLARE_MSM_GPIO_PINS(112); +DECLARE_MSM_GPIO_PINS(113); +DECLARE_MSM_GPIO_PINS(114); +DECLARE_MSM_GPIO_PINS(115); +DECLARE_MSM_GPIO_PINS(116); +DECLARE_MSM_GPIO_PINS(117); +DECLARE_MSM_GPIO_PINS(118); + +static const unsigned int sdc1_rclk_pins[] = { 119 }; +static const unsigned int sdc1_clk_pins[] = { 120 }; +static const unsigned int sdc1_cmd_pins[] = { 121 }; +static const unsigned int sdc1_data_pins[] = { 122 }; +static const unsigned int sdc2_clk_pins[] = { 123 }; +static const unsigned int sdc2_cmd_pins[] = { 124 }; +static const unsigned int sdc2_data_pins[] = { 125 }; +static const unsigned int ufs_reset_pins[] = { 126 }; + +enum sdmmagpie_functions { + msm_mux_qup01, + msm_mux_gpio, + msm_mux_phase_flag0, + msm_mux_phase_flag1, + msm_mux_phase_flag3, + msm_mux_dbg_out, + msm_mux_qdss_cti, + msm_mux_qup11, + msm_mux_phase_flag6, + msm_mux_ddr_pxi0, + msm_mux_ddr_bist, + msm_mux_phase_flag9, + msm_mux_atest_tsens2, + msm_mux_vsense_trigger, + msm_mux_atest_usb1, + msm_mux_GP_PDM1, + msm_mux_mdp_vsync, + msm_mux_phase_flag2, + msm_mux_wlan2_adc1, + msm_mux_atest_usb11, + msm_mux_ddr_pxi2, + msm_mux_edp_lcd, + msm_mux_phase_flag24, + msm_mux_wlan2_adc0, + msm_mux_atest_usb10, + msm_mux_m_voc, + msm_mux_phase_flag4, + msm_mux_ddr_pxi3, + msm_mux_cam_mclk, + msm_mux_pll_bypassnl, + msm_mux_phase_flag5, + msm_mux_qdss_gpio0, + msm_mux_pll_reset, + msm_mux_phase_flag27, + msm_mux_qdss_gpio1, + msm_mux_phase_flag10, + msm_mux_qdss_gpio2, + msm_mux_phase_flag23, + msm_mux_qdss_gpio3, + msm_mux_cci_i2c, + msm_mux_phase_flag12, + msm_mux_qdss_gpio4, + msm_mux_qdss_gpio5, + msm_mux_qdss_gpio6, + msm_mux_qdss_gpio7, + msm_mux_cci_timer0, + msm_mux_gcc_gp2, + msm_mux_qdss_gpio8, + msm_mux_cci_timer1, + msm_mux_gcc_gp3, + msm_mux_qdss_gpio, + msm_mux_cci_timer2, + msm_mux_qdss_gpio9, + msm_mux_cci_timer3, + msm_mux_cci_async, + msm_mux_phase_flag19, + msm_mux_qdss_gpio10, + msm_mux_cci_timer4, + msm_mux_phase_flag18, + msm_mux_qdss_gpio11, + msm_mux_JITTER_BIST, + msm_mux_phase_flag17, + msm_mux_qdss_gpio12, + msm_mux_PLL_BIST, + msm_mux_phase_flag16, + msm_mux_qdss_gpio13, + msm_mux_AGERA_PLL, + msm_mux_phase_flag15, + msm_mux_qdss_gpio14, + msm_mux_phase_flag14, + msm_mux_qdss_gpio15, + msm_mux_atest_tsens, + msm_mux_phase_flag13, + msm_mux_sd_write, + msm_mux_qup02, + msm_mux_phase_flag28, + msm_mux_phase_flag29, + msm_mux_GP_PDM0, + msm_mux_phase_flag30, + msm_mux_qup03, + msm_mux_phase_flag31, + msm_mux_phase_flag20, + msm_mux_wlan1_adc0, + msm_mux_atest_usb12, + msm_mux_ddr_pxi1, + msm_mux_qup12, + msm_mux_phase_flag22, + msm_mux_phase_flag21, + msm_mux_wlan1_adc1, + msm_mux_atest_usb13, + msm_mux_qup13, + msm_mux_gcc_gp1, + msm_mux_pri_mi2s, + msm_mux_qup00, + msm_mux_wsa_clk, + msm_mux_pri_mi2s_ws, + msm_mux_wsa_data, + msm_mux_atest_usb2, + msm_mux_atest_usb23, + msm_mux_ter_mi2s, + msm_mux_qup04, + msm_mux_atest_usb22, + msm_mux_atest_usb21, + msm_mux_atest_usb20, + msm_mux_phase_flag26, + msm_mux_sec_mi2s, + msm_mux_GP_PDM2, + msm_mux_phase_flag25, + msm_mux_qua_mi2s, + msm_mux_qup10, + msm_mux_tsif1_error, + msm_mux_phase_flag11, + msm_mux_tsif1_sync, + msm_mux_phase_flag8, + msm_mux_tsif1_clk, + msm_mux_tgu_ch3, + msm_mux_phase_flag7, + msm_mux_tsif1_en, + msm_mux_mdp_vsync0, + msm_mux_mdp_vsync1, + msm_mux_mdp_vsync2, + msm_mux_mdp_vsync3, + msm_mux_tgu_ch0, + msm_mux_tsif1_data, + msm_mux_sdc4_cmd, + msm_mux_tgu_ch1, + msm_mux_tsif2_error, + msm_mux_sdc43, + msm_mux_vfr_1, + msm_mux_tgu_ch2, + msm_mux_tsif2_clk, + msm_mux_sdc4_clk, + msm_mux_pci_e, + msm_mux_tsif2_en, + msm_mux_sdc42, + msm_mux_tsif2_data, + msm_mux_sdc41, + msm_mux_tsif2_sync, + msm_mux_sdc40, + msm_mux_ldo_en, + msm_mux_ldo_update, + msm_mux_prng_rosc, + msm_mux_uim2_data, + msm_mux_uim2_clk, + msm_mux_uim2_reset, + msm_mux_uim2_present, + msm_mux_uim1_data, + msm_mux_uim1_clk, + msm_mux_uim1_reset, + msm_mux_uim1_present, + msm_mux_NAV_PPS, + msm_mux_GPS_TX, + msm_mux_uim_batt, + msm_mux_edp_hot, + msm_mux_aoss_cti, + msm_mux_atest_char, + msm_mux_adsp_ext, + msm_mux_atest_char3, + msm_mux_atest_char2, + msm_mux_atest_char1, + msm_mux_atest_char0, + msm_mux_qup15, + msm_mux_qlink_request, + msm_mux_qlink_enable, + msm_mux_pa_indicator, + msm_mux_usb_phy, + msm_mux_mss_lte, + msm_mux_qup14, + msm_mux_NA, +}; + +static const char * const qup01_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio12", "gpio37", +}; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", + "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", + "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", + "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", + "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", + "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91", + "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", + "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", + "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110", + "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116", + "gpio117", "gpio118", +}; +static const char * const phase_flag0_groups[] = { + "gpio0", +}; +static const char * const phase_flag1_groups[] = { + "gpio1", +}; +static const char * const phase_flag3_groups[] = { + "gpio2", +}; +static const char * const dbg_out_groups[] = { + "gpio3", +}; +static const char * const qdss_cti_groups[] = { + "gpio4", "gpio5", "gpio32", "gpio44", "gpio45", "gpio63", +}; +static const char * const qup11_groups[] = { + "gpio6", "gpio7", "gpio8", "gpio9", +}; +static const char * const phase_flag6_groups[] = { + "gpio6", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio6", "gpio7", +}; +static const char * const ddr_bist_groups[] = { + "gpio7", "gpio8", "gpio9", "gpio10", +}; +static const char * const phase_flag9_groups[] = { + "gpio7", +}; +static const char * const atest_tsens2_groups[] = { + "gpio7", +}; +static const char * const vsense_trigger_groups[] = { + "gpio7", +}; +static const char * const atest_usb1_groups[] = { + "gpio7", +}; +static const char * const GP_PDM1_groups[] = { + "gpio8", "gpio50", +}; +static const char * const mdp_vsync_groups[] = { + "gpio10", "gpio11", "gpio12", "gpio70", "gpio71", +}; +static const char * const phase_flag2_groups[] = { + "gpio10", +}; +static const char * const wlan2_adc1_groups[] = { + "gpio10", +}; +static const char * const atest_usb11_groups[] = { + "gpio10", +}; +static const char * const ddr_pxi2_groups[] = { + "gpio10", "gpio11", +}; +static const char * const edp_lcd_groups[] = { + "gpio11", +}; +static const char * const phase_flag24_groups[] = { + "gpio11", +}; +static const char * const wlan2_adc0_groups[] = { + "gpio11", +}; +static const char * const atest_usb10_groups[] = { + "gpio11", +}; +static const char * const m_voc_groups[] = { + "gpio12", +}; +static const char * const phase_flag4_groups[] = { + "gpio12", +}; +static const char * const ddr_pxi3_groups[] = { + "gpio12", "gpio13", +}; +static const char * const cam_mclk_groups[] = { + "gpio13", "gpio14", "gpio15", "gpio16", +}; +static const char * const pll_bypassnl_groups[] = { + "gpio13", +}; +static const char * const phase_flag5_groups[] = { + "gpio13", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio13", "gpio86", +}; +static const char * const pll_reset_groups[] = { + "gpio14", +}; +static const char * const phase_flag27_groups[] = { + "gpio14", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio14", "gpio87", +}; +static const char * const phase_flag10_groups[] = { + "gpio15", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio15", "gpio88", +}; +static const char * const phase_flag23_groups[] = { + "gpio16", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio16", "gpio89", +}; +static const char * const cci_i2c_groups[] = { + "gpio17", "gpio18", "gpio19", "gpio20", "gpio27", "gpio28", +}; +static const char * const phase_flag12_groups[] = { + "gpio17", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio17", "gpio90", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio18", "gpio91", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio19", "gpio34", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio20", "gpio35", +}; +static const char * const cci_timer0_groups[] = { + "gpio21", +}; +static const char * const gcc_gp2_groups[] = { + "gpio21", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio21", "gpio53", +}; +static const char * const cci_timer1_groups[] = { + "gpio22", +}; +static const char * const gcc_gp3_groups[] = { + "gpio22", +}; +static const char * const qdss_gpio_groups[] = { + "gpio22", "gpio30", "gpio93", "gpio104", +}; +static const char * const cci_timer2_groups[] = { + "gpio23", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio23", "gpio54", +}; +static const char * const cci_timer3_groups[] = { + "gpio24", +}; +static const char * const cci_async_groups[] = { + "gpio24", "gpio25", "gpio26", +}; +static const char * const phase_flag19_groups[] = { + "gpio24", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio24", "gpio55", +}; +static const char * const cci_timer4_groups[] = { + "gpio25", +}; +static const char * const phase_flag18_groups[] = { + "gpio25", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio25", "gpio57", +}; +static const char * const JITTER_BIST_groups[] = { + "gpio26", +}; +static const char * const phase_flag17_groups[] = { + "gpio26", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio26", "gpio31", +}; +static const char * const PLL_BIST_groups[] = { + "gpio27", +}; +static const char * const phase_flag16_groups[] = { + "gpio27", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio27", "gpio56", +}; +static const char * const AGERA_PLL_groups[] = { + "gpio28", +}; +static const char * const phase_flag15_groups[] = { + "gpio28", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio28", "gpio36", +}; +static const char * const phase_flag14_groups[] = { + "gpio29", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio29", "gpio37", +}; +static const char * const atest_tsens_groups[] = { + "gpio29", +}; +static const char * const phase_flag13_groups[] = { + "gpio30", +}; +static const char * const sd_write_groups[] = { + "gpio33", +}; +static const char * const qup02_groups[] = { + "gpio34", "gpio35", +}; +static const char * const phase_flag28_groups[] = { + "gpio35", +}; +static const char * const phase_flag29_groups[] = { + "gpio36", +}; +static const char * const GP_PDM0_groups[] = { + "gpio37", "gpio68", +}; +static const char * const phase_flag30_groups[] = { + "gpio37", +}; +static const char * const qup03_groups[] = { + "gpio38", "gpio39", "gpio40", "gpio41", +}; +static const char * const phase_flag31_groups[] = { + "gpio38", +}; +static const char * const phase_flag20_groups[] = { + "gpio39", +}; +static const char * const wlan1_adc0_groups[] = { + "gpio39", +}; +static const char * const atest_usb12_groups[] = { + "gpio39", +}; +static const char * const ddr_pxi1_groups[] = { + "gpio39", "gpio44", +}; +static const char * const qup12_groups[] = { + "gpio42", "gpio43", "gpio44", "gpio45", +}; +static const char * const phase_flag22_groups[] = { + "gpio43", +}; +static const char * const phase_flag21_groups[] = { + "gpio44", +}; +static const char * const wlan1_adc1_groups[] = { + "gpio44", +}; +static const char * const atest_usb13_groups[] = { + "gpio44", +}; +static const char * const qup13_groups[] = { + "gpio46", "gpio47", +}; +static const char * const gcc_gp1_groups[] = { + "gpio48", "gpio56", +}; +static const char * const pri_mi2s_groups[] = { + "gpio49", "gpio51", "gpio52", +}; +static const char * const qup00_groups[] = { + "gpio49", "gpio50", "gpio51", "gpio52", "gpio57", "gpio58", +}; +static const char * const wsa_clk_groups[] = { + "gpio49", +}; +static const char * const pri_mi2s_ws_groups[] = { + "gpio50", +}; +static const char * const wsa_data_groups[] = { + "gpio50", +}; +static const char * const atest_usb2_groups[] = { + "gpio51", +}; +static const char * const atest_usb23_groups[] = { + "gpio52", +}; +static const char * const ter_mi2s_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56", +}; +static const char * const qup04_groups[] = { + "gpio53", "gpio54", "gpio55", "gpio56", +}; +static const char * const atest_usb22_groups[] = { + "gpio53", +}; +static const char * const atest_usb21_groups[] = { + "gpio54", +}; +static const char * const atest_usb20_groups[] = { + "gpio55", +}; +static const char * const phase_flag26_groups[] = { + "gpio56", +}; +static const char * const sec_mi2s_groups[] = { + "gpio57", +}; +static const char * const GP_PDM2_groups[] = { + "gpio57", +}; +static const char * const phase_flag25_groups[] = { + "gpio57", +}; +static const char * const qua_mi2s_groups[] = { + "gpio58", +}; +static const char * const qup10_groups[] = { + "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", +}; +static const char * const tsif1_error_groups[] = { + "gpio60", +}; +static const char * const phase_flag11_groups[] = { + "gpio60", +}; +static const char * const tsif1_sync_groups[] = { + "gpio61", +}; +static const char * const phase_flag8_groups[] = { + "gpio61", +}; +static const char * const tsif1_clk_groups[] = { + "gpio62", +}; +static const char * const tgu_ch3_groups[] = { + "gpio62", +}; +static const char * const phase_flag7_groups[] = { + "gpio62", +}; +static const char * const tsif1_en_groups[] = { + "gpio63", +}; +static const char * const mdp_vsync0_groups[] = { + "gpio63", +}; +static const char * const mdp_vsync1_groups[] = { + "gpio63", +}; +static const char * const mdp_vsync2_groups[] = { + "gpio63", +}; +static const char * const mdp_vsync3_groups[] = { + "gpio63", +}; +static const char * const tgu_ch0_groups[] = { + "gpio63", +}; +static const char * const tsif1_data_groups[] = { + "gpio64", +}; +static const char * const sdc4_cmd_groups[] = { + "gpio64", +}; +static const char * const tgu_ch1_groups[] = { + "gpio64", +}; +static const char * const tsif2_error_groups[] = { + "gpio65", +}; +static const char * const sdc43_groups[] = { + "gpio65", +}; +static const char * const vfr_1_groups[] = { + "gpio65", +}; +static const char * const tgu_ch2_groups[] = { + "gpio65", +}; +static const char * const tsif2_clk_groups[] = { + "gpio66", +}; +static const char * const sdc4_clk_groups[] = { + "gpio66", +}; +static const char * const pci_e_groups[] = { + "gpio66", "gpio67", "gpio68", +}; +static const char * const tsif2_en_groups[] = { + "gpio67", +}; +static const char * const sdc42_groups[] = { + "gpio67", +}; +static const char * const tsif2_data_groups[] = { + "gpio68", +}; +static const char * const sdc41_groups[] = { + "gpio68", +}; +static const char * const tsif2_sync_groups[] = { + "gpio69", +}; +static const char * const sdc40_groups[] = { + "gpio69", +}; +static const char * const ldo_en_groups[] = { + "gpio70", +}; +static const char * const ldo_update_groups[] = { + "gpio71", +}; +static const char * const prng_rosc_groups[] = { + "gpio72", +}; +static const char * const uim2_data_groups[] = { + "gpio75", +}; +static const char * const uim2_clk_groups[] = { + "gpio76", +}; +static const char * const uim2_reset_groups[] = { + "gpio77", +}; +static const char * const uim2_present_groups[] = { + "gpio78", +}; +static const char * const uim1_data_groups[] = { + "gpio79", +}; +static const char * const uim1_clk_groups[] = { + "gpio80", +}; +static const char * const uim1_reset_groups[] = { + "gpio81", +}; +static const char * const uim1_present_groups[] = { + "gpio82", +}; +static const char * const NAV_PPS_groups[] = { + "gpio83", "gpio83", "gpio84", "gpio84", "gpio107", "gpio107", +}; +static const char * const GPS_TX_groups[] = { + "gpio83", "gpio84", "gpio107", "gpio109", +}; +static const char * const uim_batt_groups[] = { + "gpio85", +}; +static const char * const edp_hot_groups[] = { + "gpio85", +}; +static const char * const aoss_cti_groups[] = { + "gpio85", +}; +static const char * const atest_char_groups[] = { + "gpio86", +}; +static const char * const adsp_ext_groups[] = { + "gpio87", +}; +static const char * const atest_char3_groups[] = { + "gpio87", +}; +static const char * const atest_char2_groups[] = { + "gpio88", +}; +static const char * const atest_char1_groups[] = { + "gpio89", +}; +static const char * const atest_char0_groups[] = { + "gpio90", +}; +static const char * const qup15_groups[] = { + "gpio92", "gpio101", "gpio102", "gpio103", +}; +static const char * const qlink_request_groups[] = { + "gpio96", +}; +static const char * const qlink_enable_groups[] = { + "gpio97", +}; +static const char * const pa_indicator_groups[] = { + "gpio99", +}; +static const char * const usb_phy_groups[] = { + "gpio104", +}; +static const char * const mss_lte_groups[] = { + "gpio108", "gpio109", +}; +static const char * const qup14_groups[] = { + "gpio110", "gpio111", "gpio112", "gpio113", +}; + +static const struct msm_function sdmmagpie_functions[] = { + FUNCTION(qup01), + FUNCTION(gpio), + FUNCTION(phase_flag0), + FUNCTION(phase_flag1), + FUNCTION(phase_flag3), + FUNCTION(dbg_out), + FUNCTION(qdss_cti), + FUNCTION(qup11), + FUNCTION(phase_flag6), + FUNCTION(ddr_pxi0), + FUNCTION(ddr_bist), + FUNCTION(phase_flag9), + FUNCTION(atest_tsens2), + FUNCTION(vsense_trigger), + FUNCTION(atest_usb1), + FUNCTION(GP_PDM1), + FUNCTION(mdp_vsync), + FUNCTION(phase_flag2), + FUNCTION(wlan2_adc1), + FUNCTION(atest_usb11), + FUNCTION(ddr_pxi2), + FUNCTION(edp_lcd), + FUNCTION(phase_flag24), + FUNCTION(wlan2_adc0), + FUNCTION(atest_usb10), + FUNCTION(m_voc), + FUNCTION(phase_flag4), + FUNCTION(ddr_pxi3), + FUNCTION(cam_mclk), + FUNCTION(pll_bypassnl), + FUNCTION(phase_flag5), + FUNCTION(qdss_gpio0), + FUNCTION(pll_reset), + FUNCTION(phase_flag27), + FUNCTION(qdss_gpio1), + FUNCTION(phase_flag10), + FUNCTION(qdss_gpio2), + FUNCTION(phase_flag23), + FUNCTION(qdss_gpio3), + FUNCTION(cci_i2c), + FUNCTION(phase_flag12), + FUNCTION(qdss_gpio4), + FUNCTION(qdss_gpio5), + FUNCTION(qdss_gpio6), + FUNCTION(qdss_gpio7), + FUNCTION(cci_timer0), + FUNCTION(gcc_gp2), + FUNCTION(qdss_gpio8), + FUNCTION(cci_timer1), + FUNCTION(gcc_gp3), + FUNCTION(qdss_gpio), + FUNCTION(cci_timer2), + FUNCTION(qdss_gpio9), + FUNCTION(cci_timer3), + FUNCTION(cci_async), + FUNCTION(phase_flag19), + FUNCTION(qdss_gpio10), + FUNCTION(cci_timer4), + FUNCTION(phase_flag18), + FUNCTION(qdss_gpio11), + FUNCTION(JITTER_BIST), + FUNCTION(phase_flag17), + FUNCTION(qdss_gpio12), + FUNCTION(PLL_BIST), + FUNCTION(phase_flag16), + FUNCTION(qdss_gpio13), + FUNCTION(AGERA_PLL), + FUNCTION(phase_flag15), + FUNCTION(qdss_gpio14), + FUNCTION(phase_flag14), + FUNCTION(qdss_gpio15), + FUNCTION(atest_tsens), + FUNCTION(phase_flag13), + FUNCTION(sd_write), + FUNCTION(qup02), + FUNCTION(phase_flag28), + FUNCTION(phase_flag29), + FUNCTION(GP_PDM0), + FUNCTION(phase_flag30), + FUNCTION(qup03), + FUNCTION(phase_flag31), + FUNCTION(phase_flag20), + FUNCTION(wlan1_adc0), + FUNCTION(atest_usb12), + FUNCTION(ddr_pxi1), + FUNCTION(qup12), + FUNCTION(phase_flag22), + FUNCTION(phase_flag21), + FUNCTION(wlan1_adc1), + FUNCTION(atest_usb13), + FUNCTION(qup13), + FUNCTION(gcc_gp1), + FUNCTION(pri_mi2s), + FUNCTION(qup00), + FUNCTION(wsa_clk), + FUNCTION(pri_mi2s_ws), + FUNCTION(wsa_data), + FUNCTION(atest_usb2), + FUNCTION(atest_usb23), + FUNCTION(ter_mi2s), + FUNCTION(qup04), + FUNCTION(atest_usb22), + FUNCTION(atest_usb21), + FUNCTION(atest_usb20), + FUNCTION(phase_flag26), + FUNCTION(sec_mi2s), + FUNCTION(GP_PDM2), + FUNCTION(phase_flag25), + FUNCTION(qua_mi2s), + FUNCTION(qup10), + FUNCTION(tsif1_error), + FUNCTION(phase_flag11), + FUNCTION(tsif1_sync), + FUNCTION(phase_flag8), + FUNCTION(tsif1_clk), + FUNCTION(tgu_ch3), + FUNCTION(phase_flag7), + FUNCTION(tsif1_en), + FUNCTION(mdp_vsync0), + FUNCTION(mdp_vsync1), + FUNCTION(mdp_vsync2), + FUNCTION(mdp_vsync3), + FUNCTION(tgu_ch0), + FUNCTION(tsif1_data), + FUNCTION(sdc4_cmd), + FUNCTION(tgu_ch1), + FUNCTION(tsif2_error), + FUNCTION(sdc43), + FUNCTION(vfr_1), + FUNCTION(tgu_ch2), + FUNCTION(tsif2_clk), + FUNCTION(sdc4_clk), + FUNCTION(pci_e), + FUNCTION(tsif2_en), + FUNCTION(sdc42), + FUNCTION(tsif2_data), + FUNCTION(sdc41), + FUNCTION(tsif2_sync), + FUNCTION(sdc40), + FUNCTION(ldo_en), + FUNCTION(ldo_update), + FUNCTION(prng_rosc), + FUNCTION(uim2_data), + FUNCTION(uim2_clk), + FUNCTION(uim2_reset), + FUNCTION(uim2_present), + FUNCTION(uim1_data), + FUNCTION(uim1_clk), + FUNCTION(uim1_reset), + FUNCTION(uim1_present), + FUNCTION(NAV_PPS), + FUNCTION(GPS_TX), + FUNCTION(uim_batt), + FUNCTION(edp_hot), + FUNCTION(aoss_cti), + FUNCTION(atest_char), + FUNCTION(adsp_ext), + FUNCTION(atest_char3), + FUNCTION(atest_char2), + FUNCTION(atest_char1), + FUNCTION(atest_char0), + FUNCTION(qup15), + FUNCTION(qlink_request), + FUNCTION(qlink_enable), + FUNCTION(pa_indicator), + FUNCTION(usb_phy), + FUNCTION(mss_lte), + FUNCTION(qup14), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup sdmmagpie_groups[] = { + [0] = PINGROUP(0, SOUTH, qup01, NA, phase_flag0, NA, NA, NA, NA, NA, + NA), + [1] = PINGROUP(1, SOUTH, qup01, NA, phase_flag1, NA, NA, NA, NA, NA, + NA), + [2] = PINGROUP(2, SOUTH, qup01, NA, phase_flag3, NA, NA, NA, NA, NA, + NA), + [3] = PINGROUP(3, SOUTH, qup01, dbg_out, NA, NA, NA, NA, NA, NA, NA), + [4] = PINGROUP(4, NORTH, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA), + [5] = PINGROUP(5, NORTH, NA, qdss_cti, NA, NA, NA, NA, NA, NA, NA), + [6] = PINGROUP(6, NORTH, qup11, NA, phase_flag6, ddr_pxi0, NA, NA, NA, + NA, NA), + [7] = PINGROUP(7, NORTH, qup11, ddr_bist, NA, phase_flag9, + atest_tsens2, vsense_trigger, atest_usb1, ddr_pxi0, NA), + [8] = PINGROUP(8, NORTH, qup11, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA, + NA), + [9] = PINGROUP(9, NORTH, qup11, ddr_bist, NA, NA, NA, NA, NA, NA, NA), + [10] = PINGROUP(10, NORTH, mdp_vsync, ddr_bist, NA, phase_flag2, + wlan2_adc1, atest_usb11, ddr_pxi2, NA, NA), + [11] = PINGROUP(11, NORTH, mdp_vsync, edp_lcd, NA, phase_flag24, + wlan2_adc0, atest_usb10, ddr_pxi2, NA, NA), + [12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, qup01, NA, phase_flag4, + ddr_pxi3, NA, NA, NA), + [13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, NA, phase_flag5, + qdss_gpio0, ddr_pxi3, NA, NA, NA), + [14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, NA, phase_flag27, + qdss_gpio1, NA, NA, NA, NA), + [15] = PINGROUP(15, SOUTH, cam_mclk, NA, phase_flag10, qdss_gpio2, NA, + NA, NA, NA, NA), + [16] = PINGROUP(16, SOUTH, cam_mclk, NA, phase_flag23, qdss_gpio3, NA, + NA, NA, NA, NA), + [17] = PINGROUP(17, SOUTH, cci_i2c, NA, phase_flag12, qdss_gpio4, NA, + NA, NA, NA, NA), + [18] = PINGROUP(18, SOUTH, cci_i2c, qdss_gpio5, NA, NA, NA, NA, NA, NA, + NA), + [19] = PINGROUP(19, SOUTH, cci_i2c, qdss_gpio6, NA, NA, NA, NA, NA, NA, + NA), + [20] = PINGROUP(20, SOUTH, cci_i2c, qdss_gpio7, NA, NA, NA, NA, NA, NA, + NA), + [21] = PINGROUP(21, SOUTH, cci_timer0, gcc_gp2, NA, qdss_gpio8, NA, NA, + NA, NA, NA), + [22] = PINGROUP(22, SOUTH, cci_timer1, gcc_gp3, NA, qdss_gpio, NA, NA, + NA, NA, NA), + [23] = PINGROUP(23, SOUTH, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA, + NA, NA), + [24] = PINGROUP(24, SOUTH, cci_timer3, cci_async, NA, phase_flag19, + qdss_gpio10, NA, NA, NA, NA), + [25] = PINGROUP(25, SOUTH, cci_timer4, cci_async, NA, phase_flag18, + qdss_gpio11, NA, NA, NA, NA), + [26] = PINGROUP(26, SOUTH, cci_async, JITTER_BIST, NA, phase_flag17, + qdss_gpio12, NA, NA, NA, NA), + [27] = PINGROUP(27, SOUTH, cci_i2c, PLL_BIST, NA, phase_flag16, + qdss_gpio13, NA, NA, NA, NA), + [28] = PINGROUP(28, SOUTH, cci_i2c, AGERA_PLL, NA, phase_flag15, + qdss_gpio14, NA, NA, NA, NA), + [29] = PINGROUP(29, NORTH, NA, NA, phase_flag14, qdss_gpio15, + atest_tsens, NA, NA, NA, NA), + [30] = PINGROUP(30, SOUTH, NA, phase_flag13, qdss_gpio, NA, NA, NA, NA, + NA, NA), + [31] = PINGROUP(31, WEST, NA, qdss_gpio12, NA, NA, NA, NA, NA, NA, NA), + [32] = PINGROUP(32, NORTH, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA), + [33] = PINGROUP(33, NORTH, sd_write, NA, NA, NA, NA, NA, NA, NA, NA), + [34] = PINGROUP(34, SOUTH, qup02, qdss_gpio6, NA, NA, NA, NA, NA, NA, + NA), + [35] = PINGROUP(35, SOUTH, qup02, NA, phase_flag28, qdss_gpio7, NA, NA, + NA, NA, NA), + [36] = PINGROUP(36, SOUTH, NA, phase_flag29, qdss_gpio14, NA, NA, NA, + NA, NA, NA), + [37] = PINGROUP(37, SOUTH, qup01, GP_PDM0, NA, phase_flag30, + qdss_gpio15, NA, NA, NA, NA), + [38] = PINGROUP(38, SOUTH, qup03, NA, phase_flag31, NA, NA, NA, NA, NA, + NA), + [39] = PINGROUP(39, SOUTH, qup03, NA, phase_flag20, NA, wlan1_adc0, + atest_usb12, ddr_pxi1, NA, NA), + [40] = PINGROUP(40, SOUTH, qup03, NA, NA, NA, NA, NA, NA, NA, NA), + [41] = PINGROUP(41, SOUTH, qup03, NA, NA, NA, NA, NA, NA, NA, NA), + [42] = PINGROUP(42, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA), + [43] = PINGROUP(43, NORTH, qup12, NA, phase_flag22, NA, NA, NA, NA, NA, + NA), + [44] = PINGROUP(44, NORTH, qup12, NA, phase_flag21, qdss_cti, NA, + wlan1_adc1, atest_usb13, ddr_pxi1, NA), + [45] = PINGROUP(45, NORTH, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA), + [46] = PINGROUP(46, NORTH, qup13, NA, NA, NA, NA, NA, NA, NA, NA), + [47] = PINGROUP(47, NORTH, qup13, NA, NA, NA, NA, NA, NA, NA, NA), + [48] = PINGROUP(48, WEST, gcc_gp1, NA, NA, NA, NA, NA, NA, NA, NA), + [49] = PINGROUP(49, WEST, pri_mi2s, qup00, wsa_clk, NA, NA, NA, NA, NA, + NA), + [50] = PINGROUP(50, WEST, pri_mi2s_ws, qup00, wsa_data, GP_PDM1, NA, + NA, NA, NA, NA), + [51] = PINGROUP(51, WEST, pri_mi2s, qup00, atest_usb2, NA, NA, NA, NA, + NA, NA), + [52] = PINGROUP(52, WEST, pri_mi2s, qup00, atest_usb23, NA, NA, NA, NA, + NA, NA), + [53] = PINGROUP(53, WEST, ter_mi2s, qup04, qdss_gpio8, atest_usb22, NA, + NA, NA, NA, NA), + [54] = PINGROUP(54, WEST, ter_mi2s, qup04, qdss_gpio9, atest_usb21, NA, + NA, NA, NA, NA), + [55] = PINGROUP(55, WEST, ter_mi2s, qup04, qdss_gpio10, atest_usb20, + NA, NA, NA, NA, NA), + [56] = PINGROUP(56, WEST, ter_mi2s, qup04, gcc_gp1, NA, phase_flag26, + qdss_gpio13, NA, NA, NA), + [57] = PINGROUP(57, WEST, sec_mi2s, qup00, GP_PDM2, NA, phase_flag25, + qdss_gpio11, NA, NA, NA), + [58] = PINGROUP(58, WEST, qua_mi2s, qup00, NA, NA, NA, NA, NA, NA, NA), + [59] = PINGROUP(59, NORTH, qup10, NA, NA, NA, NA, NA, NA, NA, NA), + [60] = PINGROUP(60, NORTH, qup10, tsif1_error, NA, phase_flag11, NA, + NA, NA, NA, NA), + [61] = PINGROUP(61, NORTH, qup10, tsif1_sync, NA, phase_flag8, NA, NA, + NA, NA, NA), + [62] = PINGROUP(62, NORTH, qup10, tsif1_clk, tgu_ch3, NA, phase_flag7, + NA, NA, NA, NA), + [63] = PINGROUP(63, NORTH, tsif1_en, mdp_vsync0, qup10, mdp_vsync1, + mdp_vsync2, mdp_vsync3, tgu_ch0, qdss_cti, NA), + [64] = PINGROUP(64, NORTH, tsif1_data, sdc4_cmd, qup10, tgu_ch1, NA, + NA, NA, NA, NA), + [65] = PINGROUP(65, NORTH, tsif2_error, sdc43, qup10, vfr_1, tgu_ch2, + NA, NA, NA, NA), + [66] = PINGROUP(66, NORTH, tsif2_clk, sdc4_clk, pci_e, NA, NA, NA, NA, + NA, NA), + [67] = PINGROUP(67, NORTH, tsif2_en, sdc42, pci_e, NA, NA, NA, NA, NA, + NA), + [68] = PINGROUP(68, NORTH, tsif2_data, sdc41, pci_e, GP_PDM0, NA, NA, + NA, NA, NA), + [69] = PINGROUP(69, NORTH, tsif2_sync, sdc40, NA, NA, NA, NA, NA, NA, + NA), + [70] = PINGROUP(70, NORTH, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA, + NA), + [71] = PINGROUP(71, NORTH, NA, mdp_vsync, ldo_update, NA, NA, NA, NA, + NA, NA), + [72] = PINGROUP(72, NORTH, prng_rosc, NA, NA, NA, NA, NA, NA, NA, NA), + [73] = PINGROUP(73, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [74] = PINGROUP(74, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [75] = PINGROUP(75, WEST, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA), + [76] = PINGROUP(76, WEST, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA), + [77] = PINGROUP(77, WEST, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA), + [78] = PINGROUP(78, WEST, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA), + [79] = PINGROUP(79, WEST, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA), + [80] = PINGROUP(80, WEST, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA), + [81] = PINGROUP(81, WEST, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA), + [82] = PINGROUP(82, WEST, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA), + [83] = PINGROUP(83, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA, + NA), + [84] = PINGROUP(84, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA, + NA), + [85] = PINGROUP(85, WEST, uim_batt, edp_hot, aoss_cti, NA, NA, NA, NA, + NA, NA), + [86] = PINGROUP(86, NORTH, qdss_gpio0, atest_char, NA, NA, NA, NA, NA, + NA, NA), + [87] = PINGROUP(87, NORTH, adsp_ext, qdss_gpio1, atest_char3, NA, NA, + NA, NA, NA, NA), + [88] = PINGROUP(88, NORTH, qdss_gpio2, atest_char2, NA, NA, NA, NA, NA, + NA, NA), + [89] = PINGROUP(89, NORTH, qdss_gpio3, atest_char1, NA, NA, NA, NA, NA, + NA, NA), + [90] = PINGROUP(90, NORTH, qdss_gpio4, atest_char0, NA, NA, NA, NA, NA, + NA, NA), + [91] = PINGROUP(91, NORTH, qdss_gpio5, NA, NA, NA, NA, NA, NA, NA, NA), + [92] = PINGROUP(92, NORTH, NA, NA, qup15, NA, NA, NA, NA, NA, NA), + [93] = PINGROUP(93, NORTH, qdss_gpio, NA, NA, NA, NA, NA, NA, NA, NA), + [94] = PINGROUP(94, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [95] = PINGROUP(95, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [96] = PINGROUP(96, WEST, qlink_request, NA, NA, NA, NA, NA, NA, NA, + NA), + [97] = PINGROUP(97, WEST, qlink_enable, NA, NA, NA, NA, NA, NA, NA, NA), + [98] = PINGROUP(98, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [99] = PINGROUP(99, WEST, NA, pa_indicator, NA, NA, NA, NA, NA, NA, NA), + [100] = PINGROUP(100, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [101] = PINGROUP(101, NORTH, NA, NA, qup15, NA, NA, NA, NA, NA, NA), + [102] = PINGROUP(102, NORTH, NA, NA, qup15, NA, NA, NA, NA, NA, NA), + [103] = PINGROUP(103, NORTH, NA, qup15, NA, NA, NA, NA, NA, NA, NA), + [104] = PINGROUP(104, WEST, usb_phy, NA, qdss_gpio, NA, NA, NA, NA, NA, + NA), + [105] = PINGROUP(105, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [106] = PINGROUP(106, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [107] = PINGROUP(107, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, + NA, NA), + [108] = PINGROUP(108, SOUTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA), + [109] = PINGROUP(109, SOUTH, mss_lte, GPS_TX, NA, NA, NA, NA, NA, NA, + NA), + [110] = PINGROUP(110, NORTH, NA, NA, qup14, NA, NA, NA, NA, NA, NA), + [111] = PINGROUP(111, NORTH, NA, NA, qup14, NA, NA, NA, NA, NA, NA), + [112] = PINGROUP(112, NORTH, NA, qup14, NA, NA, NA, NA, NA, NA, NA), + [113] = PINGROUP(113, NORTH, NA, qup14, NA, NA, NA, NA, NA, NA, NA), + [114] = PINGROUP(114, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [115] = PINGROUP(115, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [116] = PINGROUP(116, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [117] = PINGROUP(117, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [118] = PINGROUP(118, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [119] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0), + [120] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6), + [121] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3), + [122] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0), + [123] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6), + [124] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3), + [125] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0), + [126] = UFS_RESET(ufs_reset, 0x9f000), +}; + +static const struct msm_pinctrl_soc_data sdmmagpie_pinctrl = { + .pins = sdmmagpie_pins, + .npins = ARRAY_SIZE(sdmmagpie_pins), + .functions = sdmmagpie_functions, + .nfunctions = ARRAY_SIZE(sdmmagpie_functions), + .groups = sdmmagpie_groups, + .ngroups = ARRAY_SIZE(sdmmagpie_groups), + .ngpios = 119, +}; + +static int sdmmagpie_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &sdmmagpie_pinctrl); +} + +static const struct of_device_id sdmmagpie_pinctrl_of_match[] = { + { .compatible = "qcom,sdmmagpie-pinctrl", }, + { }, +}; + +static struct platform_driver sdmmagpie_pinctrl_driver = { + .driver = { + .name = "sdmmagpie-pinctrl", + .owner = THIS_MODULE, + .of_match_table = sdmmagpie_pinctrl_of_match, + }, + .probe = sdmmagpie_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init sdmmagpie_pinctrl_init(void) +{ + return platform_driver_register(&sdmmagpie_pinctrl_driver); +} +arch_initcall(sdmmagpie_pinctrl_init); + +static void __exit sdmmagpie_pinctrl_exit(void) +{ + platform_driver_unregister(&sdmmagpie_pinctrl_driver); +} +module_exit(sdmmagpie_pinctrl_exit); + +MODULE_DESCRIPTION("QTI sdmmagpie pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, sdmmagpie_pinctrl_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-sdxprairie.c b/drivers/pinctrl/qcom/pinctrl-sdxprairie.c new file mode 100644 index 0000000000000000000000000000000000000000..df3765f213699c221c0f3941a74cb234c346e364 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sdxprairie.c @@ -0,0 +1,1262 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "pinctrl-msm.h" + +#define FUNCTION(fname) \ + [msm_mux_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define REG_BASE 0x0 +#define REG_SIZE 0x1000 +#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \ + { \ + .name = "gpio" #id, \ + .pins = gpio##id##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + msm_mux_gpio, /* gpio mode */ \ + msm_mux_##f1, \ + msm_mux_##f2, \ + msm_mux_##f3, \ + msm_mux_##f4, \ + msm_mux_##f5, \ + msm_mux_##f6, \ + msm_mux_##f7, \ + msm_mux_##f8, \ + msm_mux_##f9 \ + }, \ + .nfuncs = 10, \ + .ctl_reg = REG_BASE + REG_SIZE * id, \ + .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \ + .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \ + .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \ + .mux_bit = 2, \ + .pull_bit = 0, \ + .drv_bit = 6, \ + .oe_bit = 9, \ + .in_bit = 0, \ + .out_bit = 1, \ + .intr_enable_bit = 0, \ + .intr_status_bit = 0, \ + .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ + .intr_raw_status_bit = 4, \ + .intr_polarity_bit = 1, \ + .intr_detection_bit = 2, \ + .intr_detection_width = 2, \ + } + +#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = ctl, \ + .io_reg = 0, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = pull, \ + .drv_bit = drv, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = -1, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } + +#define UFS_RESET(pg_name, offset) \ + { \ + .name = #pg_name, \ + .pins = pg_name##_pins, \ + .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ + .ctl_reg = offset, \ + .io_reg = offset + 0x4, \ + .intr_cfg_reg = 0, \ + .intr_status_reg = 0, \ + .intr_target_reg = 0, \ + .mux_bit = -1, \ + .pull_bit = 3, \ + .drv_bit = 0, \ + .oe_bit = -1, \ + .in_bit = -1, \ + .out_bit = 0, \ + .intr_enable_bit = -1, \ + .intr_status_bit = -1, \ + .intr_target_bit = -1, \ + .intr_raw_status_bit = -1, \ + .intr_polarity_bit = -1, \ + .intr_detection_bit = -1, \ + .intr_detection_width = -1, \ + } +static const struct pinctrl_pin_desc sdxprairie_pins[] = { + PINCTRL_PIN(0, "GPIO_0"), + PINCTRL_PIN(1, "GPIO_1"), + PINCTRL_PIN(2, "GPIO_2"), + PINCTRL_PIN(3, "GPIO_3"), + PINCTRL_PIN(4, "GPIO_4"), + PINCTRL_PIN(5, "GPIO_5"), + PINCTRL_PIN(6, "GPIO_6"), + PINCTRL_PIN(7, "GPIO_7"), + PINCTRL_PIN(8, "GPIO_8"), + PINCTRL_PIN(9, "GPIO_9"), + PINCTRL_PIN(10, "GPIO_10"), + PINCTRL_PIN(11, "GPIO_11"), + PINCTRL_PIN(12, "GPIO_12"), + PINCTRL_PIN(13, "GPIO_13"), + PINCTRL_PIN(14, "GPIO_14"), + PINCTRL_PIN(15, "GPIO_15"), + PINCTRL_PIN(16, "GPIO_16"), + PINCTRL_PIN(17, "GPIO_17"), + PINCTRL_PIN(18, "GPIO_18"), + PINCTRL_PIN(19, "GPIO_19"), + PINCTRL_PIN(20, "GPIO_20"), + PINCTRL_PIN(21, "GPIO_21"), + PINCTRL_PIN(22, "GPIO_22"), + PINCTRL_PIN(23, "GPIO_23"), + PINCTRL_PIN(24, "GPIO_24"), + PINCTRL_PIN(25, "GPIO_25"), + PINCTRL_PIN(26, "GPIO_26"), + PINCTRL_PIN(27, "GPIO_27"), + PINCTRL_PIN(28, "GPIO_28"), + PINCTRL_PIN(29, "GPIO_29"), + PINCTRL_PIN(30, "GPIO_30"), + PINCTRL_PIN(31, "GPIO_31"), + PINCTRL_PIN(32, "GPIO_32"), + PINCTRL_PIN(33, "GPIO_33"), + PINCTRL_PIN(34, "GPIO_34"), + PINCTRL_PIN(35, "GPIO_35"), + PINCTRL_PIN(36, "GPIO_36"), + PINCTRL_PIN(37, "GPIO_37"), + PINCTRL_PIN(38, "GPIO_38"), + PINCTRL_PIN(39, "GPIO_39"), + PINCTRL_PIN(40, "GPIO_40"), + PINCTRL_PIN(41, "GPIO_41"), + PINCTRL_PIN(42, "GPIO_42"), + PINCTRL_PIN(43, "GPIO_43"), + PINCTRL_PIN(44, "GPIO_44"), + PINCTRL_PIN(45, "GPIO_45"), + PINCTRL_PIN(46, "GPIO_46"), + PINCTRL_PIN(47, "GPIO_47"), + PINCTRL_PIN(48, "GPIO_48"), + PINCTRL_PIN(49, "GPIO_49"), + PINCTRL_PIN(50, "GPIO_50"), + PINCTRL_PIN(51, "GPIO_51"), + PINCTRL_PIN(52, "GPIO_52"), + PINCTRL_PIN(53, "GPIO_53"), + PINCTRL_PIN(54, "GPIO_54"), + PINCTRL_PIN(55, "GPIO_55"), + PINCTRL_PIN(56, "GPIO_56"), + PINCTRL_PIN(57, "GPIO_57"), + PINCTRL_PIN(58, "GPIO_58"), + PINCTRL_PIN(59, "GPIO_59"), + PINCTRL_PIN(60, "GPIO_60"), + PINCTRL_PIN(61, "GPIO_61"), + PINCTRL_PIN(62, "GPIO_62"), + PINCTRL_PIN(63, "GPIO_63"), + PINCTRL_PIN(64, "GPIO_64"), + PINCTRL_PIN(65, "GPIO_65"), + PINCTRL_PIN(66, "GPIO_66"), + PINCTRL_PIN(67, "GPIO_67"), + PINCTRL_PIN(68, "GPIO_68"), + PINCTRL_PIN(69, "GPIO_69"), + PINCTRL_PIN(70, "GPIO_70"), + PINCTRL_PIN(71, "GPIO_71"), + PINCTRL_PIN(72, "GPIO_72"), + PINCTRL_PIN(73, "GPIO_73"), + PINCTRL_PIN(74, "GPIO_74"), + PINCTRL_PIN(75, "GPIO_75"), + PINCTRL_PIN(76, "GPIO_76"), + PINCTRL_PIN(77, "GPIO_77"), + PINCTRL_PIN(78, "GPIO_78"), + PINCTRL_PIN(79, "GPIO_79"), + PINCTRL_PIN(80, "GPIO_80"), + PINCTRL_PIN(81, "GPIO_81"), + PINCTRL_PIN(82, "GPIO_82"), + PINCTRL_PIN(83, "GPIO_83"), + PINCTRL_PIN(84, "GPIO_84"), + PINCTRL_PIN(85, "GPIO_85"), + PINCTRL_PIN(86, "GPIO_86"), + PINCTRL_PIN(87, "GPIO_87"), + PINCTRL_PIN(88, "GPIO_88"), + PINCTRL_PIN(89, "GPIO_89"), + PINCTRL_PIN(90, "GPIO_90"), + PINCTRL_PIN(91, "GPIO_91"), + PINCTRL_PIN(92, "GPIO_92"), + PINCTRL_PIN(93, "GPIO_93"), + PINCTRL_PIN(94, "GPIO_94"), + PINCTRL_PIN(95, "GPIO_95"), + PINCTRL_PIN(96, "GPIO_96"), + PINCTRL_PIN(97, "GPIO_97"), + PINCTRL_PIN(98, "GPIO_98"), + PINCTRL_PIN(99, "GPIO_99"), + PINCTRL_PIN(100, "GPIO_100"), + PINCTRL_PIN(101, "GPIO_101"), + PINCTRL_PIN(102, "GPIO_102"), + PINCTRL_PIN(103, "GPIO_103"), + PINCTRL_PIN(104, "GPIO_104"), + PINCTRL_PIN(105, "GPIO_105"), + PINCTRL_PIN(106, "GPIO_106"), + PINCTRL_PIN(107, "GPIO_107"), + PINCTRL_PIN(108, "SDC1_RCLK"), + PINCTRL_PIN(109, "SDC1_CLK"), + PINCTRL_PIN(110, "SDC1_CMD"), + PINCTRL_PIN(111, "SDC1_DATA"), + PINCTRL_PIN(112, "UFS_RESET"), +}; + +#define DECLARE_MSM_GPIO_PINS(pin) \ + static const unsigned int gpio##pin##_pins[] = { pin } +DECLARE_MSM_GPIO_PINS(0); +DECLARE_MSM_GPIO_PINS(1); +DECLARE_MSM_GPIO_PINS(2); +DECLARE_MSM_GPIO_PINS(3); +DECLARE_MSM_GPIO_PINS(4); +DECLARE_MSM_GPIO_PINS(5); +DECLARE_MSM_GPIO_PINS(6); +DECLARE_MSM_GPIO_PINS(7); +DECLARE_MSM_GPIO_PINS(8); +DECLARE_MSM_GPIO_PINS(9); +DECLARE_MSM_GPIO_PINS(10); +DECLARE_MSM_GPIO_PINS(11); +DECLARE_MSM_GPIO_PINS(12); +DECLARE_MSM_GPIO_PINS(13); +DECLARE_MSM_GPIO_PINS(14); +DECLARE_MSM_GPIO_PINS(15); +DECLARE_MSM_GPIO_PINS(16); +DECLARE_MSM_GPIO_PINS(17); +DECLARE_MSM_GPIO_PINS(18); +DECLARE_MSM_GPIO_PINS(19); +DECLARE_MSM_GPIO_PINS(20); +DECLARE_MSM_GPIO_PINS(21); +DECLARE_MSM_GPIO_PINS(22); +DECLARE_MSM_GPIO_PINS(23); +DECLARE_MSM_GPIO_PINS(24); +DECLARE_MSM_GPIO_PINS(25); +DECLARE_MSM_GPIO_PINS(26); +DECLARE_MSM_GPIO_PINS(27); +DECLARE_MSM_GPIO_PINS(28); +DECLARE_MSM_GPIO_PINS(29); +DECLARE_MSM_GPIO_PINS(30); +DECLARE_MSM_GPIO_PINS(31); +DECLARE_MSM_GPIO_PINS(32); +DECLARE_MSM_GPIO_PINS(33); +DECLARE_MSM_GPIO_PINS(34); +DECLARE_MSM_GPIO_PINS(35); +DECLARE_MSM_GPIO_PINS(36); +DECLARE_MSM_GPIO_PINS(37); +DECLARE_MSM_GPIO_PINS(38); +DECLARE_MSM_GPIO_PINS(39); +DECLARE_MSM_GPIO_PINS(40); +DECLARE_MSM_GPIO_PINS(41); +DECLARE_MSM_GPIO_PINS(42); +DECLARE_MSM_GPIO_PINS(43); +DECLARE_MSM_GPIO_PINS(44); +DECLARE_MSM_GPIO_PINS(45); +DECLARE_MSM_GPIO_PINS(46); +DECLARE_MSM_GPIO_PINS(47); +DECLARE_MSM_GPIO_PINS(48); +DECLARE_MSM_GPIO_PINS(49); +DECLARE_MSM_GPIO_PINS(50); +DECLARE_MSM_GPIO_PINS(51); +DECLARE_MSM_GPIO_PINS(52); +DECLARE_MSM_GPIO_PINS(53); +DECLARE_MSM_GPIO_PINS(54); +DECLARE_MSM_GPIO_PINS(55); +DECLARE_MSM_GPIO_PINS(56); +DECLARE_MSM_GPIO_PINS(57); +DECLARE_MSM_GPIO_PINS(58); +DECLARE_MSM_GPIO_PINS(59); +DECLARE_MSM_GPIO_PINS(60); +DECLARE_MSM_GPIO_PINS(61); +DECLARE_MSM_GPIO_PINS(62); +DECLARE_MSM_GPIO_PINS(63); +DECLARE_MSM_GPIO_PINS(64); +DECLARE_MSM_GPIO_PINS(65); +DECLARE_MSM_GPIO_PINS(66); +DECLARE_MSM_GPIO_PINS(67); +DECLARE_MSM_GPIO_PINS(68); +DECLARE_MSM_GPIO_PINS(69); +DECLARE_MSM_GPIO_PINS(70); +DECLARE_MSM_GPIO_PINS(71); +DECLARE_MSM_GPIO_PINS(72); +DECLARE_MSM_GPIO_PINS(73); +DECLARE_MSM_GPIO_PINS(74); +DECLARE_MSM_GPIO_PINS(75); +DECLARE_MSM_GPIO_PINS(76); +DECLARE_MSM_GPIO_PINS(77); +DECLARE_MSM_GPIO_PINS(78); +DECLARE_MSM_GPIO_PINS(79); +DECLARE_MSM_GPIO_PINS(80); +DECLARE_MSM_GPIO_PINS(81); +DECLARE_MSM_GPIO_PINS(82); +DECLARE_MSM_GPIO_PINS(83); +DECLARE_MSM_GPIO_PINS(84); +DECLARE_MSM_GPIO_PINS(85); +DECLARE_MSM_GPIO_PINS(86); +DECLARE_MSM_GPIO_PINS(87); +DECLARE_MSM_GPIO_PINS(88); +DECLARE_MSM_GPIO_PINS(89); +DECLARE_MSM_GPIO_PINS(90); +DECLARE_MSM_GPIO_PINS(91); +DECLARE_MSM_GPIO_PINS(92); +DECLARE_MSM_GPIO_PINS(93); +DECLARE_MSM_GPIO_PINS(94); +DECLARE_MSM_GPIO_PINS(95); +DECLARE_MSM_GPIO_PINS(96); +DECLARE_MSM_GPIO_PINS(97); +DECLARE_MSM_GPIO_PINS(98); +DECLARE_MSM_GPIO_PINS(99); +DECLARE_MSM_GPIO_PINS(100); +DECLARE_MSM_GPIO_PINS(101); +DECLARE_MSM_GPIO_PINS(102); +DECLARE_MSM_GPIO_PINS(103); +DECLARE_MSM_GPIO_PINS(104); +DECLARE_MSM_GPIO_PINS(105); +DECLARE_MSM_GPIO_PINS(106); +DECLARE_MSM_GPIO_PINS(107); + +static const unsigned int sdc1_rclk_pins[] = { 108 }; +static const unsigned int sdc1_clk_pins[] = { 109 }; +static const unsigned int sdc1_cmd_pins[] = { 110 }; +static const unsigned int sdc1_data_pins[] = { 111 }; +static const unsigned int ufs_reset_pins[] = { 112 }; + +enum sdxprairie_functions { + msm_mux_qdss_stm11, + msm_mux_qdss_stm10, + msm_mux_ddr_pxi0, + msm_mux_m_voc, + msm_mux_ddr_bist, + msm_mux_blsp_spi1, + msm_mux_qdss_stm14, + msm_mux_pci_e, + msm_mux_qdss_stm13, + msm_mux_tgu_ch0, + msm_mux_pcie_clkreq, + msm_mux_qdss_stm9, + msm_mux_qdss_stm15, + msm_mux_mgpi_clk, + msm_mux_qdss_stm12, + msm_mux_i2s_mclk, + msm_mux_audio_ref, + msm_mux_ldo_update, + msm_mux_qdss_stm8, + msm_mux_qdss_stm7, + msm_mux_qdss_gpio4, + msm_mux_atest_char, + msm_mux_qdss_stm6, + msm_mux_qdss_gpio5, + msm_mux_atest_char3, + msm_mux_qdss_stm5, + msm_mux_qdss_gpio6, + msm_mux_atest_char2, + msm_mux_qdss_stm4, + msm_mux_qdss_gpio7, + msm_mux_atest_char1, + msm_mux_uim1_data, + msm_mux_atest_char0, + msm_mux_uim1_present, + msm_mux_uim1_reset, + msm_mux_uim1_clk, + msm_mux_qlink1_en, + msm_mux_qlink1_req, + msm_mux_qlink1_wmss, + msm_mux_COEX_UART2, + msm_mux_spmi_vgi, + msm_mux_gcc_plltest, + msm_mux_usb2phy_ac, + msm_mux_emac_PPS1, + msm_mux_emac_PPS0, + msm_mux_uim2_data, + msm_mux_gpio, + msm_mux_qdss_stm31, + msm_mux_ebi0_wrcdc, + msm_mux_uim2_present, + msm_mux_qdss_stm30, + msm_mux_blsp_uart1, + msm_mux_uim2_reset, + msm_mux_blsp_i2c1, + msm_mux_qdss_stm29, + msm_mux_uim2_clk, + msm_mux_qdss_stm28, + msm_mux_blsp_spi2, + msm_mux_blsp_uart2, + msm_mux_qdss_stm23, + msm_mux_qdss_gpio3, + msm_mux_qdss_stm22, + msm_mux_qdss_gpio2, + msm_mux_blsp_i2c2, + msm_mux_char_exec, + msm_mux_qdss_stm21, + msm_mux_qdss_gpio1, + msm_mux_qdss_stm20, + msm_mux_qdss_gpio0, + msm_mux_pri_mi2s, + msm_mux_blsp_spi3, + msm_mux_blsp_uart3, + msm_mux_ext_dbg, + msm_mux_ldo_en, + msm_mux_blsp_i2c3, + msm_mux_gcc_gp3, + msm_mux_qdss_stm19, + msm_mux_qdss_gpio12, + msm_mux_qdss_stm18, + msm_mux_qdss_gpio13, + msm_mux_emac_gcc1, + msm_mux_qdss_stm17, + msm_mux_qdss_gpio14, + msm_mux_bimc_dte0, + msm_mux_native_tsens, + msm_mux_vsense_trigger, + msm_mux_emac_gcc0, + msm_mux_qdss_stm16, + msm_mux_qdss_gpio15, + msm_mux_bimc_dte1, + msm_mux_sec_mi2s, + msm_mux_blsp_spi4, + msm_mux_blsp_uart4, + msm_mux_qdss_cti, + msm_mux_qdss_stm27, + msm_mux_qdss_gpio8, + msm_mux_qdss_stm26, + msm_mux_qdss_gpio9, + msm_mux_blsp_i2c4, + msm_mux_gcc_gp1, + msm_mux_qdss_stm25, + msm_mux_qdss_gpio10, + msm_mux_jitter_bist, + msm_mux_gcc_gp2, + msm_mux_qdss_stm24, + msm_mux_qdss_gpio11, + msm_mux_ebi2_a, + msm_mux_qdss_stm3, + msm_mux_ebi2_lcd, + msm_mux_qdss_stm2, + msm_mux_pll_bist, + msm_mux_qdss_stm1, + msm_mux_qdss_stm0, + msm_mux_adsp_ext, + msm_mux_native_char, + msm_mux_QLINK0_WMSS, + msm_mux_native_char3, + msm_mux_native_char2, + msm_mux_native_tsense, + msm_mux_nav_gpio, + msm_mux_pll_ref, + msm_mux_pa_indicator, + msm_mux_native_char0, + msm_mux_qlink0_en, + msm_mux_qlink0_req, + msm_mux_pll_test, + msm_mux_cri_trng, + msm_mux_dbg_out, + msm_mux_prng_rosc, + msm_mux_cri_trng0, + msm_mux_cri_trng1, + msm_mux_qdss_gpio, + msm_mux_native_char1, + msm_mux_coex_uart, + msm_mux_spmi_coex, + msm_mux_NA, +}; + +static const char * const qdss_stm11_groups[] = { + "gpio44", +}; +static const char * const qdss_stm10_groups[] = { + "gpio45", +}; +static const char * const ddr_pxi0_groups[] = { + "gpio45", "gpio46", +}; +static const char * const m_voc_groups[] = { + "gpio46", "gpio48", "gpio49", "gpio59", "gpio60", +}; +static const char * const ddr_bist_groups[] = { + "gpio46", "gpio47", "gpio48", "gpio49", +}; +static const char * const blsp_spi1_groups[] = { + "gpio52", "gpio62", "gpio71", "gpio80", "gpio81", "gpio82", "gpio83", +}; +static const char * const qdss_stm14_groups[] = { + "gpio52", +}; +static const char * const pci_e_groups[] = { + "gpio53", +}; +static const char * const qdss_stm13_groups[] = { + "gpio53", +}; +static const char * const tgu_ch0_groups[] = { + "gpio55", +}; +static const char * const pcie_clkreq_groups[] = { + "gpio56", +}; +static const char * const qdss_stm9_groups[] = { + "gpio56", +}; +static const char * const qdss_stm15_groups[] = { + "gpio57", +}; +static const char * const mgpi_clk_groups[] = { + "gpio61", "gpio71", +}; +static const char * const qdss_stm12_groups[] = { + "gpio61", +}; +static const char * const i2s_mclk_groups[] = { + "gpio62", +}; +static const char * const audio_ref_groups[] = { + "gpio62", +}; +static const char * const ldo_update_groups[] = { + "gpio62", +}; +static const char * const qdss_stm8_groups[] = { + "gpio62", +}; +static const char * const qdss_stm7_groups[] = { + "gpio63", +}; +static const char * const qdss_gpio4_groups[] = { + "gpio63", +}; +static const char * const atest_char_groups[] = { + "gpio63", +}; +static const char * const qdss_stm6_groups[] = { + "gpio64", +}; +static const char * const qdss_gpio5_groups[] = { + "gpio64", +}; +static const char * const atest_char3_groups[] = { + "gpio64", +}; +static const char * const qdss_stm5_groups[] = { + "gpio65", +}; +static const char * const qdss_gpio6_groups[] = { + "gpio65", +}; +static const char * const atest_char2_groups[] = { + "gpio65", +}; +static const char * const qdss_stm4_groups[] = { + "gpio66", +}; +static const char * const qdss_gpio7_groups[] = { + "gpio66", +}; +static const char * const atest_char1_groups[] = { + "gpio66", +}; +static const char * const uim1_data_groups[] = { + "gpio67", +}; +static const char * const atest_char0_groups[] = { + "gpio67", +}; +static const char * const uim1_present_groups[] = { + "gpio68", +}; +static const char * const uim1_reset_groups[] = { + "gpio69", +}; +static const char * const uim1_clk_groups[] = { + "gpio70", +}; +static const char * const qlink1_en_groups[] = { + "gpio72", +}; +static const char * const qlink1_req_groups[] = { + "gpio73", +}; +static const char * const qlink1_wmss_groups[] = { + "gpio74", +}; +static const char * const COEX_UART2_groups[] = { + "gpio75", "gpio76", +}; +static const char * const spmi_vgi_groups[] = { + "gpio78", "gpio79", +}; +static const char * const gcc_plltest_groups[] = { + "gpio81", "gpio82", +}; +static const char * const usb2phy_ac_groups[] = { + "gpio93", +}; +static const char * const emac_PPS1_groups[] = { + "gpio95", +}; +static const char * const emac_PPS0_groups[] = { + "gpio106", +}; +static const char * const uim2_data_groups[] = { + "gpio0", +}; +static const char * const gpio_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", + "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", + "gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54", + "gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", + "gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", + "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", + "gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", + "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", + "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", + "gpio97", "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", + "gpio103", "gpio104", "gpio105", "gpio106", "gpio107", +}; +static const char * const qdss_stm31_groups[] = { + "gpio0", +}; +static const char * const ebi0_wrcdc_groups[] = { + "gpio0", "gpio2", +}; +static const char * const uim2_present_groups[] = { + "gpio1", +}; +static const char * const qdss_stm30_groups[] = { + "gpio1", +}; +static const char * const blsp_uart1_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22", + "gpio23", +}; +static const char * const uim2_reset_groups[] = { + "gpio2", +}; +static const char * const blsp_i2c1_groups[] = { + "gpio2", "gpio3", "gpio82", "gpio83", +}; +static const char * const qdss_stm29_groups[] = { + "gpio2", +}; +static const char * const uim2_clk_groups[] = { + "gpio3", +}; +static const char * const qdss_stm28_groups[] = { + "gpio3", +}; +static const char * const blsp_spi2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", "gpio52", "gpio62", "gpio71", +}; +static const char * const blsp_uart2_groups[] = { + "gpio4", "gpio5", "gpio6", "gpio7", "gpio63", "gpio64", "gpio65", + "gpio66", +}; +static const char * const qdss_stm23_groups[] = { + "gpio4", +}; +static const char * const qdss_gpio3_groups[] = { + "gpio4", +}; +static const char * const qdss_stm22_groups[] = { + "gpio5", +}; +static const char * const qdss_gpio2_groups[] = { + "gpio5", +}; +static const char * const blsp_i2c2_groups[] = { + "gpio6", "gpio7", "gpio65", "gpio66", +}; +static const char * const char_exec_groups[] = { + "gpio6", "gpio7", +}; +static const char * const qdss_stm21_groups[] = { + "gpio6", +}; +static const char * const qdss_gpio1_groups[] = { + "gpio6", +}; +static const char * const qdss_stm20_groups[] = { + "gpio7", +}; +static const char * const qdss_gpio0_groups[] = { + "gpio7", +}; +static const char * const pri_mi2s_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", +}; +static const char * const blsp_spi3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", "gpio52", "gpio62", "gpio71", +}; +static const char * const blsp_uart3_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const ext_dbg_groups[] = { + "gpio8", "gpio9", "gpio10", "gpio11", +}; +static const char * const ldo_en_groups[] = { + "gpio8", +}; +static const char * const blsp_i2c3_groups[] = { + "gpio10", "gpio11", +}; +static const char * const gcc_gp3_groups[] = { + "gpio11", +}; +static const char * const qdss_stm19_groups[] = { + "gpio12", +}; +static const char * const qdss_gpio12_groups[] = { + "gpio12", +}; +static const char * const qdss_stm18_groups[] = { + "gpio13", +}; +static const char * const qdss_gpio13_groups[] = { + "gpio13", +}; +static const char * const emac_gcc1_groups[] = { + "gpio14", +}; +static const char * const qdss_stm17_groups[] = { + "gpio14", +}; +static const char * const qdss_gpio14_groups[] = { + "gpio14", +}; +static const char * const bimc_dte0_groups[] = { + "gpio14", "gpio59", +}; +static const char * const native_tsens_groups[] = { + "gpio14", +}; +static const char * const vsense_trigger_groups[] = { + "gpio14", +}; +static const char * const emac_gcc0_groups[] = { + "gpio15", +}; +static const char * const qdss_stm16_groups[] = { + "gpio15", +}; +static const char * const qdss_gpio15_groups[] = { + "gpio15", +}; +static const char * const bimc_dte1_groups[] = { + "gpio15", "gpio61", +}; +static const char * const sec_mi2s_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", + "gpio23", +}; +static const char * const blsp_spi4_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71", +}; +static const char * const blsp_uart4_groups[] = { + "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", + "gpio23", +}; +static const char * const qdss_cti_groups[] = { + "gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23", + "gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio60", + "gpio94", "gpio94", "gpio95", "gpio95", +}; +static const char * const qdss_stm27_groups[] = { + "gpio16", +}; +static const char * const qdss_gpio8_groups[] = { + "gpio16", +}; +static const char * const qdss_stm26_groups[] = { + "gpio17", +}; +static const char * const qdss_gpio9_groups[] = { + "gpio17", +}; +static const char * const blsp_i2c4_groups[] = { + "gpio18", "gpio19", "gpio78", "gpio79", +}; +static const char * const gcc_gp1_groups[] = { + "gpio18", +}; +static const char * const qdss_stm25_groups[] = { + "gpio18", +}; +static const char * const qdss_gpio10_groups[] = { + "gpio18", +}; +static const char * const jitter_bist_groups[] = { + "gpio19", +}; +static const char * const gcc_gp2_groups[] = { + "gpio19", +}; +static const char * const qdss_stm24_groups[] = { + "gpio19", +}; +static const char * const qdss_gpio11_groups[] = { + "gpio19", +}; +static const char * const ebi2_a_groups[] = { + "gpio20", +}; +static const char * const qdss_stm3_groups[] = { + "gpio20", +}; +static const char * const ebi2_lcd_groups[] = { + "gpio21", "gpio22", "gpio23", +}; +static const char * const qdss_stm2_groups[] = { + "gpio21", +}; +static const char * const pll_bist_groups[] = { + "gpio22", +}; +static const char * const qdss_stm1_groups[] = { + "gpio22", +}; +static const char * const qdss_stm0_groups[] = { + "gpio23", +}; +static const char * const adsp_ext_groups[] = { + "gpio24", "gpio25", +}; +static const char * const native_char_groups[] = { + "gpio26", +}; +static const char * const QLINK0_WMSS_groups[] = { + "gpio28", +}; +static const char * const native_char3_groups[] = { + "gpio28", +}; +static const char * const native_char2_groups[] = { + "gpio29", +}; +static const char * const native_tsense_groups[] = { + "gpio29", +}; +static const char * const nav_gpio_groups[] = { + "gpio31", "gpio32", "gpio76", +}; +static const char * const pll_ref_groups[] = { + "gpio32", +}; +static const char * const pa_indicator_groups[] = { + "gpio33", +}; +static const char * const native_char0_groups[] = { + "gpio33", +}; +static const char * const qlink0_en_groups[] = { + "gpio34", +}; +static const char * const qlink0_req_groups[] = { + "gpio35", +}; +static const char * const pll_test_groups[] = { + "gpio35", +}; +static const char * const cri_trng_groups[] = { + "gpio36", +}; +static const char * const dbg_out_groups[] = { + "gpio36", +}; +static const char * const prng_rosc_groups[] = { + "gpio38", +}; +static const char * const cri_trng0_groups[] = { + "gpio40", +}; +static const char * const cri_trng1_groups[] = { + "gpio41", +}; +static const char * const qdss_gpio_groups[] = { + "gpio42", "gpio61", +}; +static const char * const native_char1_groups[] = { + "gpio42", +}; +static const char * const coex_uart_groups[] = { + "gpio44", "gpio45", +}; +static const char * const spmi_coex_groups[] = { + "gpio44", "gpio45", +}; + +static const struct msm_function sdxprairie_functions[] = { + FUNCTION(qdss_stm11), + FUNCTION(qdss_stm10), + FUNCTION(ddr_pxi0), + FUNCTION(m_voc), + FUNCTION(ddr_bist), + FUNCTION(blsp_spi1), + FUNCTION(qdss_stm14), + FUNCTION(pci_e), + FUNCTION(qdss_stm13), + FUNCTION(tgu_ch0), + FUNCTION(pcie_clkreq), + FUNCTION(qdss_stm9), + FUNCTION(qdss_stm15), + FUNCTION(mgpi_clk), + FUNCTION(qdss_stm12), + FUNCTION(i2s_mclk), + FUNCTION(audio_ref), + FUNCTION(ldo_update), + FUNCTION(qdss_stm8), + FUNCTION(qdss_stm7), + FUNCTION(qdss_gpio4), + FUNCTION(atest_char), + FUNCTION(qdss_stm6), + FUNCTION(qdss_gpio5), + FUNCTION(atest_char3), + FUNCTION(qdss_stm5), + FUNCTION(qdss_gpio6), + FUNCTION(atest_char2), + FUNCTION(qdss_stm4), + FUNCTION(qdss_gpio7), + FUNCTION(atest_char1), + FUNCTION(uim1_data), + FUNCTION(atest_char0), + FUNCTION(uim1_present), + FUNCTION(uim1_reset), + FUNCTION(uim1_clk), + FUNCTION(qlink1_en), + FUNCTION(qlink1_req), + FUNCTION(qlink1_wmss), + FUNCTION(COEX_UART2), + FUNCTION(spmi_vgi), + FUNCTION(gcc_plltest), + FUNCTION(usb2phy_ac), + FUNCTION(emac_PPS1), + FUNCTION(emac_PPS0), + FUNCTION(uim2_data), + FUNCTION(gpio), + FUNCTION(qdss_stm31), + FUNCTION(ebi0_wrcdc), + FUNCTION(uim2_present), + FUNCTION(qdss_stm30), + FUNCTION(blsp_uart1), + FUNCTION(uim2_reset), + FUNCTION(blsp_i2c1), + FUNCTION(qdss_stm29), + FUNCTION(uim2_clk), + FUNCTION(qdss_stm28), + FUNCTION(blsp_spi2), + FUNCTION(blsp_uart2), + FUNCTION(qdss_stm23), + FUNCTION(qdss_gpio3), + FUNCTION(qdss_stm22), + FUNCTION(qdss_gpio2), + FUNCTION(blsp_i2c2), + FUNCTION(char_exec), + FUNCTION(qdss_stm21), + FUNCTION(qdss_gpio1), + FUNCTION(qdss_stm20), + FUNCTION(qdss_gpio0), + FUNCTION(pri_mi2s), + FUNCTION(blsp_spi3), + FUNCTION(blsp_uart3), + FUNCTION(ext_dbg), + FUNCTION(ldo_en), + FUNCTION(blsp_i2c3), + FUNCTION(gcc_gp3), + FUNCTION(qdss_stm19), + FUNCTION(qdss_gpio12), + FUNCTION(qdss_stm18), + FUNCTION(qdss_gpio13), + FUNCTION(emac_gcc1), + FUNCTION(qdss_stm17), + FUNCTION(qdss_gpio14), + FUNCTION(bimc_dte0), + FUNCTION(native_tsens), + FUNCTION(vsense_trigger), + FUNCTION(emac_gcc0), + FUNCTION(qdss_stm16), + FUNCTION(qdss_gpio15), + FUNCTION(bimc_dte1), + FUNCTION(sec_mi2s), + FUNCTION(blsp_spi4), + FUNCTION(blsp_uart4), + FUNCTION(qdss_cti), + FUNCTION(qdss_stm27), + FUNCTION(qdss_gpio8), + FUNCTION(qdss_stm26), + FUNCTION(qdss_gpio9), + FUNCTION(blsp_i2c4), + FUNCTION(gcc_gp1), + FUNCTION(qdss_stm25), + FUNCTION(qdss_gpio10), + FUNCTION(jitter_bist), + FUNCTION(gcc_gp2), + FUNCTION(qdss_stm24), + FUNCTION(qdss_gpio11), + FUNCTION(ebi2_a), + FUNCTION(qdss_stm3), + FUNCTION(ebi2_lcd), + FUNCTION(qdss_stm2), + FUNCTION(pll_bist), + FUNCTION(qdss_stm1), + FUNCTION(qdss_stm0), + FUNCTION(adsp_ext), + FUNCTION(native_char), + FUNCTION(QLINK0_WMSS), + FUNCTION(native_char3), + FUNCTION(native_char2), + FUNCTION(native_tsense), + FUNCTION(nav_gpio), + FUNCTION(pll_ref), + FUNCTION(pa_indicator), + FUNCTION(native_char0), + FUNCTION(qlink0_en), + FUNCTION(qlink0_req), + FUNCTION(pll_test), + FUNCTION(cri_trng), + FUNCTION(dbg_out), + FUNCTION(prng_rosc), + FUNCTION(cri_trng0), + FUNCTION(cri_trng1), + FUNCTION(qdss_gpio), + FUNCTION(native_char1), + FUNCTION(coex_uart), + FUNCTION(spmi_coex), +}; + +/* Every pin is maintained as a single group, and missing or non-existing pin + * would be maintained as dummy group to synchronize pin group index with + * pin descriptor registered with pinctrl core. + * Clients would not be able to request these dummy pin groups. + */ +static const struct msm_pingroup sdxprairie_groups[] = { + [0] = PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA, + NA, NA, NA, NA), + [1] = PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA, + NA, NA), + [2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29, + ebi0_wrcdc, NA, NA, NA, NA), + [3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA, + NA, NA, NA), + [4] = PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss_gpio3, + NA, NA, NA, NA), + [5] = PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss_gpio2, + NA, NA, NA, NA), + [6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, NA, + qdss_stm21, qdss_gpio1, NA, NA), + [7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, NA, + qdss_stm20, qdss_gpio0, NA, NA), + [8] = PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA, + NA, NA, NA), + [9] = PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA, + NA, NA), + [10] = PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, + ext_dbg, NA, NA, NA, NA), + [11] = PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, + ext_dbg, gcc_gp3, NA, NA, NA), + [12] = PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss_gpio12, NA, NA, NA, + NA, NA), + [13] = PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss_gpio13, NA, NA, NA, + NA, NA), + [14] = PINGROUP(14, pri_mi2s, emac_gcc1, NA, NA, qdss_stm17, + qdss_gpio14, bimc_dte0, native_tsens, vsense_trigger), + [15] = PINGROUP(15, pri_mi2s, emac_gcc0, NA, NA, qdss_stm16, + qdss_gpio15, bimc_dte1, NA, NA), + [16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, + qdss_cti, NA, NA, qdss_stm27, qdss_gpio8), + [17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, + qdss_cti, NA, qdss_stm26, qdss_gpio9, NA), + [18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, + gcc_gp1, qdss_stm25, qdss_gpio10, NA, NA), + [19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, + jitter_bist, gcc_gp2, NA, qdss_stm24, qdss_gpio11), + [20] = PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, + qdss_stm3, NA, NA, NA, NA), + [21] = PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, + qdss_stm2, NA, NA, NA), + [22] = PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti, + blsp_uart4, pll_bist, NA, qdss_stm1), + [23] = PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1, + blsp_uart4, qdss_stm0, NA, NA), + [24] = PINGROUP(24, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA), + [25] = PINGROUP(25, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA), + [26] = PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA), + [27] = PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [28] = PINGROUP(28, QLINK0_WMSS, NA, native_char3, NA, NA, NA, NA, NA, + NA), + [29] = PINGROUP(29, NA, NA, NA, native_char2, native_tsense, NA, NA, + NA, NA), + [30] = PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [31] = PINGROUP(31, nav_gpio, NA, NA, NA, NA, NA, NA, NA, NA), + [32] = PINGROUP(32, nav_gpio, pll_ref, NA, NA, NA, NA, NA, NA, NA), + [33] = PINGROUP(33, NA, pa_indicator, native_char0, NA, NA, NA, NA, NA, + NA), + [34] = PINGROUP(34, qlink0_en, NA, NA, NA, NA, NA, NA, NA, NA), + [35] = PINGROUP(35, qlink0_req, pll_test, NA, NA, NA, NA, NA, NA, NA), + [36] = PINGROUP(36, NA, NA, cri_trng, dbg_out, NA, NA, NA, NA, NA), + [37] = PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [38] = PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA), + [39] = PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [40] = PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA), + [41] = PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA), + [42] = PINGROUP(42, NA, qdss_gpio, native_char1, NA, NA, NA, NA, NA, + NA), + [43] = PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [44] = PINGROUP(44, coex_uart, spmi_coex, NA, qdss_stm11, NA, NA, NA, + NA, NA), + [45] = PINGROUP(45, coex_uart, spmi_coex, qdss_stm10, ddr_pxi0, NA, NA, + NA, NA, NA), + [46] = PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, NA, NA, NA, NA, NA, NA), + [47] = PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA), + [48] = PINGROUP(48, m_voc, ddr_bist, NA, NA, NA, NA, NA, NA, NA), + [49] = PINGROUP(49, m_voc, ddr_bist, NA, NA, NA, NA, NA, NA, NA), + [50] = PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [51] = PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [52] = PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA, + qdss_stm14, NA, NA), + [53] = PINGROUP(53, pci_e, NA, NA, qdss_stm13, NA, NA, NA, NA, NA), + [54] = PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA), + [55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA, + NA), + [56] = PINGROUP(56, pcie_clkreq, NA, qdss_stm9, NA, NA, NA, NA, NA, NA), + [57] = PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA), + [58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [59] = PINGROUP(59, qdss_cti, m_voc, bimc_dte0, NA, NA, NA, NA, NA, NA), + [60] = PINGROUP(60, qdss_cti, NA, m_voc, NA, NA, NA, NA, NA, NA), + [61] = PINGROUP(61, mgpi_clk, qdss_stm12, qdss_gpio, bimc_dte1, NA, NA, + NA, NA, NA), + [62] = PINGROUP(62, i2s_mclk, audio_ref, blsp_spi1, blsp_spi2, + blsp_spi3, blsp_spi4, ldo_update, qdss_stm8, NA), + [63] = PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss_gpio4, atest_char, + NA, NA, NA, NA), + [64] = PINGROUP(64, blsp_uart2, qdss_stm6, qdss_gpio5, atest_char3, NA, + NA, NA, NA, NA), + [65] = PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss_gpio6, + atest_char2, NA, NA, NA), + [66] = PINGROUP(66, blsp_uart2, blsp_i2c2, qdss_stm4, qdss_gpio7, + atest_char1, NA, NA, NA, NA), + [67] = PINGROUP(67, uim1_data, atest_char0, NA, NA, NA, NA, NA, NA, NA), + [68] = PINGROUP(68, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA), + [69] = PINGROUP(69, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA), + [70] = PINGROUP(70, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA), + [71] = PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3, + blsp_spi4, NA, NA, NA, NA), + [72] = PINGROUP(72, qlink1_en, NA, NA, NA, NA, NA, NA, NA, NA), + [73] = PINGROUP(73, qlink1_req, NA, NA, NA, NA, NA, NA, NA, NA), + [74] = PINGROUP(74, qlink1_wmss, NA, NA, NA, NA, NA, NA, NA, NA), + [75] = PINGROUP(75, COEX_UART2, NA, NA, NA, NA, NA, NA, NA, NA), + [76] = PINGROUP(76, COEX_UART2, nav_gpio, NA, NA, NA, NA, NA, NA, NA), + [77] = PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [78] = PINGROUP(78, spmi_vgi, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA), + [79] = PINGROUP(79, spmi_vgi, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA), + [80] = PINGROUP(80, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA), + [81] = PINGROUP(81, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA), + [82] = PINGROUP(82, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA, + NA, NA), + [83] = PINGROUP(83, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA), + [84] = PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [85] = PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [86] = PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [87] = PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [88] = PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [89] = PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [90] = PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [91] = PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [92] = PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [93] = PINGROUP(93, NA, NA, usb2phy_ac, NA, NA, NA, NA, NA, NA), + [94] = PINGROUP(94, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA), + [95] = PINGROUP(95, qdss_cti, qdss_cti, emac_PPS1, NA, NA, NA, NA, NA, + NA), + [96] = PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [97] = PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [98] = PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [99] = PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [100] = PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [101] = PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [102] = PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [103] = PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [104] = PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [105] = PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [106] = PINGROUP(106, emac_PPS0, NA, NA, NA, NA, NA, NA, NA, NA), + [107] = PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA), + [108] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0), + [109] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6), + [110] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3), + [111] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0), + [112] = UFS_RESET(ufs_reset, 0x0), +}; + +static const struct msm_pinctrl_soc_data sdxprairie_pinctrl = { + .pins = sdxprairie_pins, + .npins = ARRAY_SIZE(sdxprairie_pins), + .functions = sdxprairie_functions, + .nfunctions = ARRAY_SIZE(sdxprairie_functions), + .groups = sdxprairie_groups, + .ngroups = ARRAY_SIZE(sdxprairie_groups), + .ngpios = 108, +}; + +static int sdxprairie_pinctrl_probe(struct platform_device *pdev) +{ + return msm_pinctrl_probe(pdev, &sdxprairie_pinctrl); +} + +static const struct of_device_id sdxprairie_pinctrl_of_match[] = { + { .compatible = "qcom,sdxprairie-pinctrl", }, + { }, +}; + +static struct platform_driver sdxprairie_pinctrl_driver = { + .driver = { + .name = "sdxprairie-pinctrl", + .owner = THIS_MODULE, + .of_match_table = sdxprairie_pinctrl_of_match, + }, + .probe = sdxprairie_pinctrl_probe, + .remove = msm_pinctrl_remove, +}; + +static int __init sdxprairie_pinctrl_init(void) +{ + return platform_driver_register(&sdxprairie_pinctrl_driver); +} +arch_initcall(sdxprairie_pinctrl_init); + +static void __exit sdxprairie_pinctrl_exit(void) +{ + platform_driver_unregister(&sdxprairie_pinctrl_driver); +} +module_exit(sdxprairie_pinctrl_exit); + +MODULE_DESCRIPTION("QTI sdxprairie pinctrl driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, sdxprairie_pinctrl_of_match); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c index fc37f55c6568b2d3a06ce9ab2b0ef6f4e3634c8e..33c16f295d318e3566679c7872862bb0a49467ea 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c @@ -53,6 +53,9 @@ .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \ .intr_status_reg = base + 0xc + REG_SIZE * id, \ .intr_target_reg = base + 0x8 + REG_SIZE * id, \ + .dir_conn_reg = (base == EAST) ? base + 0xb7000 : \ + ((base == WEST) ? base + 0xbb000 : \ + ((base == NORTH) ? base + 0xbc000 : base + 0xbe000)), \ .mux_bit = 2, \ .pull_bit = 0, \ .drv_bit = 6, \ @@ -67,6 +70,7 @@ .intr_polarity_bit = 1, \ .intr_detection_bit = 2, \ .intr_detection_width = 2, \ + .dir_conn_en_bit = 8, \ } #define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \ @@ -1845,6 +1849,89 @@ static const struct msm_pingroup sm8150_groups[] = { [178] = UFS_RESET(ufs_reset, 0xdb6004), }; +static struct msm_dir_conn sm8150_dir_conn[] = { + {3, 511}, + {5, 512}, + {8, 513}, + {9, 514}, + {10, 615}, + {12, 619}, + {24, 517}, + {26, 518}, + {27, 521}, + {28, 522}, + {30, 519}, + {36, 523}, + {37, 524}, + {38, 510}, + {39, 633}, /* GPIO 39 mapped to 640 SPI as well */ + {41, 527}, + {42, 528}, + {46, 530}, + {47, 529}, + {48, 531}, + {49, 533}, + {50, 532}, + {51, 631}, /* GPIO 51 mapped to SPI 638 as well */ + {53, 534}, + {54, 535}, + {55, 536}, + {56, 537}, + {58, 538}, + {60, 540}, + {61, 541}, + {68, 542}, + {70, 543}, + {76, 551}, + {77, 546}, + {81, 544}, + {83, 545}, + {86, 547}, + {87, 564}, + {88, 632}, /* GPIO 88 mapped to SPI 639 as well */ + {90, 549}, + {91, 550}, + {93, 555}, + {95, 552}, + {96, 553}, + {97, 554}, + {101, 520}, + {103, 557}, + {104, 558}, + {108, 559}, + {112, 560}, + {113, 561}, + {114, 562}, + {117, 565}, + {118, 616}, + {119, 567}, + {120, 568}, + {121, 569}, + {122, 570}, + {123, 571}, + {124, 572}, + {125, 573}, + {129, 609}, + {132, 620}, + {133, 563}, + {134, 516}, + {136, 612}, + {142, 618}, + {144, 630}, /* GPIO 144 mapped to SPI 637 as well */ + {147, 617}, + {150, 622}, + {152, 623}, + {153, 624}, + {0, 216}, + {0, 215}, + {0, 214}, + {0, 213}, + {0, 212}, + {0, 211}, + {0, 210}, + {0, 209}, +}; + static const struct msm_pinctrl_soc_data sm8150_pinctrl = { .pins = sm8150_pins, .npins = ARRAY_SIZE(sm8150_pins), @@ -1853,6 +1940,9 @@ static const struct msm_pinctrl_soc_data sm8150_pinctrl = { .groups = sm8150_groups, .ngroups = ARRAY_SIZE(sm8150_groups), .ngpios = 175, + .dir_conn = sm8150_dir_conn, + .n_dir_conns = ARRAY_SIZE(sm8150_dir_conn), + .dir_conn_irq_base = 216, }; static int sm8150_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index b05d419ca4c53b1eeef65bbd042b313213e4c45a..3bec5a41ca194845e658ee13c42b0269d50e7cd6 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014, 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -516,7 +516,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, pad->pullup = arg; break; case PMIC_GPIO_CONF_STRENGTH: - if (arg > PMIC_GPIO_STRENGTH_LOW) + if (arg > PMIC_GPIO_STRENGTH_HIGH) return -EINVAL; pad->strength = arg; break; diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index f53e32a9d8fcef232ec9ce825e58edd0684c4007..2bb96e9f580b88cc14f49e3d7d13507c1fa2b5bc 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -365,7 +365,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev, banks |= BIT(0); break; case PM8XXX_QCOM_DRIVE_STRENGH: - if (arg > PMIC_GPIO_STRENGTH_LOW) { + if (arg > PM8921_GPIO_STRENGTH_LOW) { dev_err(pctrl->dev, "invalid drive strength\n"); return -EINVAL; } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c index 200e1f4f6db92bcb1b78b96ec04ebd7f0c1b86b6..711333fb2c6e60b51275a1af7ce4f56be2fc08c0 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c @@ -1,7 +1,7 @@ /* * R8A7796 processor support - PFC hardware block. * - * Copyright (C) 2016 Renesas Electronics Corp. + * Copyright (C) 2016-2017 Renesas Electronics Corp. * * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c * @@ -477,7 +477,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28 #define MOD_SEL1_26 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1) #define MOD_SEL1_25_24 FM(SEL_SSP1_1_0) FM(SEL_SSP1_1_1) FM(SEL_SSP1_1_2) FM(SEL_SSP1_1_3) #define MOD_SEL1_23_22_21 FM(SEL_SSP1_0_0) FM(SEL_SSP1_0_1) FM(SEL_SSP1_0_2) FM(SEL_SSP1_0_3) FM(SEL_SSP1_0_4) F_(0, 0) F_(0, 0) F_(0, 0) -#define MOD_SEL1_20 FM(SEL_SSI_0) FM(SEL_SSI_1) +#define MOD_SEL1_20 FM(SEL_SSI1_0) FM(SEL_SSI1_1) #define MOD_SEL1_19 FM(SEL_SPEED_PULSE_0) FM(SEL_SPEED_PULSE_1) #define MOD_SEL1_18_17 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) FM(SEL_SIMCARD_2) FM(SEL_SIMCARD_3) #define MOD_SEL1_16 FM(SEL_SDHI2_0) FM(SEL_SDHI2_1) @@ -1224,7 +1224,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_11_8, HSCK0), PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3), PINMUX_IPSR_MSEL(IP13_11_8, AUDIO_CLKB_A, SEL_ADG_B_0), - PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_11_8, SSI_SDATA1_B, SEL_SSI1_1), PINMUX_IPSR_MSEL(IP13_11_8, TS_SCK0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_11_8, RIF0_CLK_C, SEL_DRIF0_2), @@ -1232,14 +1232,14 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_15_12, HRX0), PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_15_12, SSI_SDATA2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP13_15_12, TS_SDEN0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_15_12, STP_ISEN_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_15_12, RIF0_D0_C, SEL_DRIF0_2), PINMUX_IPSR_GPSR(IP13_19_16, HTX0), PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP13_19_16, SSI_SDATA9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP13_19_16, TS_SDAT0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_19_16, STP_ISD_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_C, SEL_DRIF0_2), @@ -1247,7 +1247,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_23_20, HCTS0_N), PINMUX_IPSR_MSEL(IP13_23_20, RX2_B, SEL_SCIF2_1), PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP13_23_20, SSI_SCK9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP13_23_20, TS_SPSYNC0_D, SEL_TSIF0_3), PINMUX_IPSR_MSEL(IP13_23_20, STP_ISSYNC_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_C, SEL_DRIF0_2), @@ -1256,7 +1256,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP13_27_24, HRTS0_N), PINMUX_IPSR_MSEL(IP13_27_24, TX2_B, SEL_SCIF2_1), PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3), - PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP13_27_24, SSI_WS9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP13_27_24, STP_IVCXO27_0_D, SEL_SSP1_0_3), PINMUX_IPSR_MSEL(IP13_27_24, BPFCLK_A, SEL_FM_0), PINMUX_IPSR_GPSR(IP13_27_24, AUDIO_CLKOUT2_A), @@ -1271,7 +1271,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_3_0, RX5_A, SEL_SCIF5_0), PINMUX_IPSR_MSEL(IP14_3_0, NFWP_N_A, SEL_NDF_0), PINMUX_IPSR_MSEL(IP14_3_0, AUDIO_CLKA_C, SEL_ADG_A_2), - PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP14_3_0, SSI_SCK2_A, SEL_SSI2_0), PINMUX_IPSR_MSEL(IP14_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2), PINMUX_IPSR_GPSR(IP14_3_0, AUDIO_CLKOUT3_A), PINMUX_IPSR_MSEL(IP14_3_0, TCLK1_B, SEL_TIMER_TMU_1), @@ -1280,7 +1280,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_7_4, TX5_A, SEL_SCIF5_0), PINMUX_IPSR_MSEL(IP14_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3), PINMUX_IPSR_MSEL(IP14_7_4, AUDIO_CLKC_A, SEL_ADG_C_0), - PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP14_7_4, SSI_WS2_A, SEL_SSI2_0), PINMUX_IPSR_MSEL(IP14_7_4, STP_OPWM_0_D, SEL_SSP1_0_3), PINMUX_IPSR_GPSR(IP14_7_4, AUDIO_CLKOUT_D), PINMUX_IPSR_MSEL(IP14_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1), @@ -1308,10 +1308,10 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5), /* IPSR15 */ - PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP15_3_0, SSI_SDATA1_A, SEL_SSI1_0), - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0), - PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI2_0), + PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI1_1), PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349), PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0), @@ -1397,11 +1397,11 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP16_27_24, RIF1_D1_A, SEL_DRIF1_0), PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D1_A, SEL_DRIF3_0), - PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP16_31_28, SSI_SDATA9_A, SEL_SSI9_0), PINMUX_IPSR_MSEL(IP16_31_28, HSCK2_B, SEL_HSCIF2_1), PINMUX_IPSR_MSEL(IP16_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2), PINMUX_IPSR_MSEL(IP16_31_28, HSCK1_A, SEL_HSCIF1_0), - PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS1_B, SEL_SSI1_1), PINMUX_IPSR_GPSR(IP16_31_28, SCK1), PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0), PINMUX_IPSR_MSEL(IP16_31_28, SCK5_A, SEL_SCIF5_0), @@ -1433,7 +1433,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_19_16, USB1_PWEN), PINMUX_IPSR_MSEL(IP17_19_16, SIM0_CLK_C, SEL_SIMCARD_2), - PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP17_19_16, SSI_SCK1_A, SEL_SSI1_0), PINMUX_IPSR_MSEL(IP17_19_16, TS_SCK0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP17_19_16, STP_ISCLK_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_19_16, FMCLK_B, SEL_FM_1), @@ -1443,7 +1443,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_23_20, USB1_OVC), PINMUX_IPSR_MSEL(IP17_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2), - PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI_0), + PINMUX_IPSR_MSEL(IP17_23_20, SSI_WS1_A, SEL_SSI1_0), PINMUX_IPSR_MSEL(IP17_23_20, TS_SDAT0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP17_23_20, STP_ISD_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP17_23_20, FMIN_B, SEL_FM_1), @@ -1453,7 +1453,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_27_24, USB30_PWEN), PINMUX_IPSR_GPSR(IP17_27_24, AUDIO_CLKOUT_B), - PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP17_27_24, SSI_SCK2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP17_27_24, TS_SDEN1_D, SEL_TSIF1_3), PINMUX_IPSR_MSEL(IP17_27_24, STP_ISEN_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP17_27_24, STP_OPWM_0_E, SEL_SSP1_0_4), @@ -1465,7 +1465,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP17_31_28, USB30_OVC), PINMUX_IPSR_GPSR(IP17_31_28, AUDIO_CLKOUT1_B), - PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP17_31_28, SSI_WS2_B, SEL_SSI2_1), PINMUX_IPSR_MSEL(IP17_31_28, TS_SPSYNC1_D, SEL_TSIF1_3), PINMUX_IPSR_MSEL(IP17_31_28, STP_ISSYNC_1_D, SEL_SSP1_1_3), PINMUX_IPSR_MSEL(IP17_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4), @@ -1476,7 +1476,7 @@ static const u16 pinmux_data[] = { /* IPSR18 */ PINMUX_IPSR_GPSR(IP18_3_0, GP6_30), PINMUX_IPSR_GPSR(IP18_3_0, AUDIO_CLKOUT2_B), - PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP18_3_0, SSI_SCK9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP18_3_0, TS_SDEN0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP18_3_0, STP_ISEN_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP18_3_0, RIF2_D0_B, SEL_DRIF2_1), @@ -1486,7 +1486,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP18_7_4, GP6_31), PINMUX_IPSR_GPSR(IP18_7_4, AUDIO_CLKOUT3_B), - PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI_1), + PINMUX_IPSR_MSEL(IP18_7_4, SSI_WS9_B, SEL_SSI9_1), PINMUX_IPSR_MSEL(IP18_7_4, TS_SPSYNC0_E, SEL_TSIF0_4), PINMUX_IPSR_MSEL(IP18_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4), PINMUX_IPSR_MSEL(IP18_7_4, RIF2_D1_B, SEL_DRIF2_1), diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 1baf720faf690abda2b769a338b6bd73782738c8..87e9747d229afe330a8e1b0b671b86c413fd0bd1 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -54,7 +54,6 @@ static int ec_response_timed_out(void) static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { - struct ec_host_request *request; struct ec_host_response response; u8 sum; int ret = 0; @@ -65,8 +64,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, /* Write buffer */ cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); - request = (struct ec_host_request *)ec->dout; - /* Here we go */ sum = EC_COMMAND_PROTOCOL_3; cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index 1a020b8e47bd101a7a6dee3f3d5b37cfdd559fab..2b4780b90632185bd6cdddbb48a36e8b99bf15fd 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -709,6 +709,20 @@ static uint32_t gsi_get_max_channels(enum gsi_ver ver) GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; break; + case GSI_VER_2_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + case GSI_VER_2_5: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; } GSIDBG("max channels %d\n", reg); @@ -752,6 +766,20 @@ static uint32_t gsi_get_max_event_rings(enum gsi_ver ver) GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; break; + case GSI_VER_2_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + case GSI_VER_2_5: + reg = gsi_readl(gsi_ctx->base + + GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; } GSIDBG("max event rings %d\n", reg); @@ -2199,6 +2227,10 @@ int gsi_reset_channel(unsigned long chan_hdl) BUG(); } + /* Hardware issue fixed from GSI 2.0 and no need for the WA */ + if (gsi_ctx->per.ver >= GSI_VER_2_0) + reset_done = true; + /* workaround: reset GSI producers again */ if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) { usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP); diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h index 2067ac922a747836beb4284d5984e4771e6bd344..377e39ad22ab350d1e211da267ee9182f5619f88 100644 --- a/drivers/platform/msm/gsi/gsi_reg.h +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -1548,6 +1548,38 @@ #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 +/* v2.2 */ +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 + #define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n)) #define GSI_EE_n_GSI_SW_VERSION_RMSK 0xffffffff diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 6551d99a0fe2aa49e67fdf38823f52e9fa1e12be..e514979115d09a5f48a7152e55f4c50715879736 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -306,6 +306,46 @@ u8 *ipa_pad_to_32(u8 *dest) return dest; } +int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, + struct sg_table *in_sgt_ptr) +{ + unsigned int nents; + + if (in_sgt_ptr != NULL) { + *out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (*out_ch_ptr == NULL) + return -ENOMEM; + + nents = in_sgt_ptr->nents; + + (*out_ch_ptr)->sgl = + kcalloc(nents, sizeof(struct scatterlist), + GFP_KERNEL); + if ((*out_ch_ptr)->sgl == NULL) { + kfree(*out_ch_ptr); + *out_ch_ptr = NULL; + return -ENOMEM; + } + + memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl, + nents*sizeof((*out_ch_ptr)->sgl)); + (*out_ch_ptr)->nents = nents; + (*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents; + } + return 0; +} + +int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr) +{ + if (*out_sgt_ptr != NULL) { + kfree((*out_sgt_ptr)->sgl); + (*out_sgt_ptr)->sgl = NULL; + kfree(*out_sgt_ptr); + *out_sgt_ptr = NULL; + } + return 0; +} + /** * ipa_clear_endpoint_delay() - Clear ep_delay. * @clnt_hdl: [in] IPA client handle @@ -2757,6 +2797,13 @@ const char *ipa_get_version_string(enum ipa_hw_type ver) break; case IPA_HW_v4_1: str = "4.1"; + break; + case IPA_HW_v4_2: + str = "4.2"; + break; + case IPA_HW_v4_5: + str = "4.5"; + break; default: str = "Invalid version"; break; @@ -2812,6 +2859,8 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) case IPA_HW_v3_5_1: case IPA_HW_v4_0: case IPA_HW_v4_1: + case IPA_HW_v4_2: + case IPA_HW_v4_5: result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl, ipa_plat_drv_match); break; @@ -3071,12 +3120,12 @@ int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes */ int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, - int ipa_ep_idx_dl) + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) { int ret; IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul, - ipa_ep_idx_dl); + ipa_ep_idx_dl, params); return ret; } diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 98ce53b195912f7fc02209f70e2e74d1f2e6a052..f0541e62d0406a7c772700e1978eb90098fec4e9 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -389,7 +389,7 @@ struct ipa_api_controller { struct ipa_ntn_conn_out_params *); int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul, - int ipa_ep_idx_dl); + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params); struct device *(*ipa_get_pdev)(void); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c index 64dcc11163bc11a03b1d53d35a9e32be5ce1d8c4..c74d3b1ba1362983e6d2cec450d03ef9d9f113a0 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -71,6 +71,7 @@ struct ipa_uc_offload_ctx { ipa_notify_cb notify; struct completion ntn_completion; u32 pm_hdl; + struct ipa_ntn_conn_in_params conn; }; static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE]; @@ -393,6 +394,51 @@ static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event, } } +static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest, + struct ipa_ntn_setup_info *source) +{ + int result; + + IPA_UC_OFFLOAD_DBG("Allocating smmu info\n"); + + memcpy(dest, source, sizeof(struct ipa_ntn_setup_info)); + + dest->data_buff_list = + kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map), + GFP_KERNEL); + if (dest->data_buff_list == NULL) { + IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n"); + return -ENOMEM; + } + + memcpy(dest->data_buff_list, source->data_buff_list, + sizeof(struct ntn_buff_smmu_map) * dest->num_buffers); + + result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt, + source->buff_pool_base_sgt); + if (result) { + kfree(dest->data_buff_list); + return result; + } + + result = ipa_smmu_store_sgt(&dest->ring_base_sgt, + source->ring_base_sgt); + if (result) { + kfree(dest->data_buff_list); + ipa_smmu_free_sgt(&dest->buff_pool_base_sgt); + return result; + } + + return 0; +} + +static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params) +{ + kfree(params->data_buff_list); + ipa_smmu_free_sgt(¶ms->buff_pool_base_sgt); + ipa_smmu_free_sgt(¶ms->ring_base_sgt); +} + int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, struct ipa_ntn_conn_out_params *outp, struct ipa_uc_offload_ctx *ntn_ctx) @@ -400,6 +446,11 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, int result = 0; enum ipa_uc_offload_state prev_state; + if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) { + IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n"); + return -EINVAL; + } + prev_state = ntn_ctx->state; if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT || inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) { @@ -453,7 +504,21 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp, goto fail; } - return 0; + if (ntn_ctx->conn.dl.smmu_enabled) { + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl, + &inp->dl); + if (result) { + IPA_UC_OFFLOAD_ERR("alloc failure on TX\n"); + goto fail; + } + result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul, + &inp->ul); + if (result) { + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); + IPA_UC_OFFLOAD_ERR("alloc failure on RX\n"); + goto fail; + } + } fail: if (!ipa_pm_is_used()) @@ -548,6 +613,11 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx) int ipa_ep_idx_ul, ipa_ep_idx_dl; int ret = 0; + if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) { + IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n"); + return -EINVAL; + } + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; if (ipa_pm_is_used()) { @@ -575,12 +645,16 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx) ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD); ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS); - ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl); + ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl, + &ntn_ctx->conn); if (ret) { IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n", ret); return -EFAULT; } + if (ntn_ctx->conn.dl.smmu_enabled) + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl); + ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul); return ret; } diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index a6f713b271c853bec92ce9ae7a08b6fd7130fd03..b937964fb459fe0aac8c9ebc70ff89c934eb35ff 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -1148,7 +1148,8 @@ static int ipa3_usb_smmu_map_xdci_channel( if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) { ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r; result = ipa3_smmu_map_peer_reg( - ipa3_usb_ctx->smmu_reg_map.addr, true); + ipa3_usb_ctx->smmu_reg_map.addr, true, + IPA_SMMU_CB_AP); if (result) { IPA_USB_ERR("failed to map USB regs %d\n", result); @@ -1171,7 +1172,8 @@ static int ipa3_usb_smmu_map_xdci_channel( if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) { result = ipa3_smmu_map_peer_reg( - ipa3_usb_ctx->smmu_reg_map.addr, false); + ipa3_usb_ctx->smmu_reg_map.addr, false, + IPA_SMMU_CB_AP); if (result) { IPA_USB_ERR("failed to unmap USB regs %d\n", result); @@ -1183,14 +1185,16 @@ static int ipa3_usb_smmu_map_xdci_channel( result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova, - params->xfer_ring_len, map, params->sgt_xfer_rings); + params->xfer_ring_len, map, params->sgt_xfer_rings, + IPA_SMMU_CB_AP); if (result) { IPA_USB_ERR("failed to map Xfer ring %d\n", result); return result; } result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova, - params->data_buff_base_len, map, params->sgt_data_buff); + params->data_buff_base_len, map, params->sgt_data_buff, + IPA_SMMU_CB_AP); if (result) { IPA_USB_ERR("failed to map TRBs buff %d\n", result); return result; @@ -1199,43 +1203,6 @@ static int ipa3_usb_smmu_map_xdci_channel( return 0; } -static int ipa3_usb_smmu_store_sgt(struct sg_table **out_ch_ptr, - struct sg_table *in_sgt_ptr) -{ - unsigned int nents; - - if (in_sgt_ptr != NULL) { - *out_ch_ptr = kzalloc(sizeof(struct sg_table), GFP_KERNEL); - if (*out_ch_ptr == NULL) - return -ENOMEM; - - nents = in_sgt_ptr->nents; - - (*out_ch_ptr)->sgl = - kcalloc(nents, sizeof(struct scatterlist), - GFP_KERNEL); - if ((*out_ch_ptr)->sgl == NULL) - return -ENOMEM; - - memcpy((*out_ch_ptr)->sgl, in_sgt_ptr->sgl, - nents*sizeof((*out_ch_ptr)->sgl)); - (*out_ch_ptr)->nents = nents; - (*out_ch_ptr)->orig_nents = in_sgt_ptr->orig_nents; - } - return 0; -} - -static int ipa3_usb_smmu_free_sgt(struct sg_table **out_sgt_ptr) -{ - if (*out_sgt_ptr != NULL) { - kfree((*out_sgt_ptr)->sgl); - (*out_sgt_ptr)->sgl = NULL; - kfree(*out_sgt_ptr); - *out_sgt_ptr = NULL; - } - return 0; -} - static int ipa3_usb_request_xdci_channel( struct ipa_usb_xdci_chan_params *params, enum ipa_usb_direction dir, @@ -1321,18 +1288,17 @@ static int ipa3_usb_request_xdci_channel( xdci_ch_params = &ipa3_usb_ctx->ttype_ctx[ttype].dl_ch_params; *xdci_ch_params = *params; - result = ipa3_usb_smmu_store_sgt( + result = ipa_smmu_store_sgt( &xdci_ch_params->sgt_xfer_rings, params->sgt_xfer_rings); - if (result) { - ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); + if (result) return result; - } - result = ipa3_usb_smmu_store_sgt( + + result = ipa_smmu_store_sgt( &xdci_ch_params->sgt_data_buff, params->sgt_data_buff); if (result) { - ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff); + ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); return result; } chan_params.keep_ipa_awake = params->keep_ipa_awake; @@ -1437,9 +1403,9 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl, result = ipa3_usb_smmu_map_xdci_channel(xdci_ch_params, false); if (xdci_ch_params->sgt_xfer_rings != NULL) - ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); + ipa_smmu_free_sgt(&xdci_ch_params->sgt_xfer_rings); if (xdci_ch_params->sgt_data_buff != NULL) - ipa3_usb_smmu_free_sgt(&xdci_ch_params->sgt_data_buff); + ipa_smmu_free_sgt(&xdci_ch_params->sgt_data_buff); /* Change ipa_usb state to INITIALIZED */ if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype)) @@ -2181,6 +2147,18 @@ static void ipa_usb_debugfs_init(void){} static void ipa_usb_debugfs_remove(void){} #endif /* CONFIG_DEBUG_FS */ +static int ipa_usb_set_lock_unlock(bool is_lock) +{ + IPA_USB_DBG("entry\n"); + if (is_lock) + mutex_lock(&ipa3_usb_ctx->general_mutex); + else + mutex_unlock(&ipa3_usb_ctx->general_mutex); + IPA_USB_DBG("exit\n"); + + return 0; +} + int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, @@ -2244,6 +2222,16 @@ int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params, goto connect_fail; } + /* + * Register for xdci lock/unlock callback with ipa core driver. + * As per use case, only register for IPA_CONS end point for now. + * If needed we can include the same for IPA_PROD ep. + * For IPA_USB_DIAG/DPL config there will not be any UL ep. + */ + if (connect_params->teth_prot != IPA_USB_DIAG) + ipa3_register_lock_unlock_callback(&ipa_usb_set_lock_unlock, + ul_out_params->clnt_hdl); + IPA_USB_DBG_LOW("exit\n"); mutex_unlock(&ipa3_usb_ctx->general_mutex); return 0; @@ -2321,6 +2309,15 @@ static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl, } } + /* + * Deregister for xdci lock/unlock callback from ipa core driver. + * As per use case, only deregister for IPA_CONS end point for now. + * If needed we can include the same for IPA_PROD ep. + * For IPA_USB_DIAG/DPL config there will not be any UL config. + */ + if (!IPA3_USB_IS_TTYPE_DPL(ttype)) + ipa3_deregister_lock_unlock_callback(ul_clnt_hdl); + /* Change state to STOPPED */ if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype)) IPA_USB_ERR("failed to change state to stopped\n"); diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index b37a127c3efe3e368b7aa70fdb989b23579a7268..92f1ab65b70fc2692cd6e11c4f743544b39ea43b 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -408,7 +408,8 @@ int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, ipa_notify_cb notify, void *priv, u8 hdr_len, struct ipa_ntn_conn_out_params *outp); -int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); u8 *ipa_write_64(u64 w, u8 *dest); u8 *ipa_write_32(u32 w, u8 *dest); u8 *ipa_write_16(u16 hw, u8 *dest); @@ -434,4 +435,8 @@ int ipa_start_gsi_channel(u32 clnt_hdl); bool ipa_pm_is_used(void); +int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, + struct sg_table *in_sgt_ptr); +int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr); + #endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h index 0bc4b768e847c0d7af8f09b400d42492fe54ec91..3caac8c0cbad966c3ffffe3ab193f1dd88a85bc6 100644 --- a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h +++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,7 +19,9 @@ int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, ipa_notify_cb notify, void *priv, u8 hdr_len, struct ipa_ntn_conn_out_params *outp); -int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); + +int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), void *user_data); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 5cbcbaa9db54dae1e5d4338bbf8d85b2a5fd4806..92120c009a2cee7da687a0b789494a844268e7e8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -506,6 +506,19 @@ struct iommu_domain *ipa3_get_wlan_smmu_domain(void) return NULL; } +struct iommu_domain *ipa3_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type) +{ + + if (cb_type == IPA_SMMU_CB_WLAN && smmu_cb[IPA_SMMU_CB_WLAN].valid) + return smmu_cb[IPA_SMMU_CB_WLAN].iommu; + + if (smmu_cb[cb_type].valid) + return smmu_cb[cb_type].mapping->domain; + + IPAERR("CB#%d not valid\n", cb_type); + + return NULL; +} struct device *ipa3_get_dma_dev(void) { @@ -2109,6 +2122,9 @@ static void ipa3_q6_avoid_holb(void) ep_holb.tmr_val = 0; ep_holb.en = 1; + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) + ipa3_cal_ep_holb_scale_base_val(ep_holb.tmr_val, &ep_holb); + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { if (IPA_CLIENT_IS_Q6_CONS(client_idx)) { ep_idx = ipa3_get_ep_mapping(client_idx); @@ -2213,6 +2229,15 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip, return -EINVAL; } + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Clean hashable rules not supported\n"); + return retval; + } + /* Up to filtering pipes we have filtering tables */ desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc), GFP_KERNEL); @@ -2331,6 +2356,15 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip, return -EINVAL; } + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (rlt == IPA_RULE_HASHABLE && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Clean hashable rules not supported\n"); + return retval; + } + if (ip == IPA_IP_v4) { modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo); modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi); @@ -2404,7 +2438,7 @@ static int ipa3_q6_clean_q6_tables(void) struct ipa3_desc *desc; struct ipahal_imm_cmd_pyld *cmd_pyld = NULL; struct ipahal_imm_cmd_register_write reg_write_cmd = {0}; - int retval; + int retval = 0; struct ipahal_reg_fltrt_hash_flush flush; struct ipahal_reg_valmask valmask; @@ -2445,6 +2479,12 @@ static int ipa3_q6_clean_q6_tables(void) return -EFAULT; } + /* + * SRAM memory not allocated to hash tables. Cleaning the of hash table + * operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) + return retval; /* Flush rules cache */ desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL); if (!desc) @@ -2591,6 +2631,8 @@ void ipa3_q6_pre_shutdown_cleanup(void) */ ipa3_q6_pipe_delay(false); + ipa3_set_usb_prod_pipe_delay(); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPADBG_LOW("Exit with success\n"); } @@ -2841,10 +2883,21 @@ int _ipa_init_rt4_v3(void) return rc; } - v4_cmd.hash_rules_addr = mem.phys_base; - v4_cmd.hash_rules_size = mem.size; - v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + - IPA_MEM_PART(v4_rt_hash_ofst); + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v4_cmd.hash_rules_addr = 0; + v4_cmd.hash_rules_size = 0; + v4_cmd.hash_local_addr = 0; + } else { + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_hash_ofst); + } + v4_cmd.nhash_rules_addr = mem.phys_base; v4_cmd.nhash_rules_size = mem.size; v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + @@ -2904,10 +2957,21 @@ int _ipa_init_rt6_v3(void) return rc; } - v6_cmd.hash_rules_addr = mem.phys_base; - v6_cmd.hash_rules_size = mem.size; - v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + - IPA_MEM_PART(v6_rt_hash_ofst); + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v6_cmd.hash_rules_addr = 0; + v6_cmd.hash_rules_size = 0; + v6_cmd.hash_local_addr = 0; + } else { + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_hash_ofst); + } + v6_cmd.nhash_rules_addr = mem.phys_base; v6_cmd.nhash_rules_size = mem.size; v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + @@ -2961,10 +3025,21 @@ int _ipa_init_flt4_v3(void) return rc; } - v4_cmd.hash_rules_addr = mem.phys_base; - v4_cmd.hash_rules_size = mem.size; - v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + - IPA_MEM_PART(v4_flt_hash_ofst); + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v4_cmd.hash_rules_addr = 0; + v4_cmd.hash_rules_size = 0; + v4_cmd.hash_local_addr = 0; + } else { + v4_cmd.hash_rules_addr = mem.phys_base; + v4_cmd.hash_rules_size = mem.size; + v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_hash_ofst); + } + v4_cmd.nhash_rules_addr = mem.phys_base; v4_cmd.nhash_rules_size = mem.size; v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + @@ -3018,10 +3093,21 @@ int _ipa_init_flt6_v3(void) return rc; } - v6_cmd.hash_rules_addr = mem.phys_base; - v6_cmd.hash_rules_size = mem.size; - v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + - IPA_MEM_PART(v6_flt_hash_ofst); + /* + * SRAM memory not allocated to hash tables. Initializing/Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) { + v6_cmd.hash_rules_addr = 0; + v6_cmd.hash_rules_size = 0; + v6_cmd.hash_local_addr = 0; + } else { + v6_cmd.hash_rules_addr = mem.phys_base; + v6_cmd.hash_rules_size = mem.size; + v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_hash_ofst); + } + v6_cmd.nhash_rules_addr = mem.phys_base; v6_cmd.nhash_rules_size = mem.size; v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes + @@ -3158,20 +3244,21 @@ static int ipa3_setup_apps_pipes(void) ipa3_ctx->ctrl->ipa_init_flt6(); IPADBG("V6 FLT initialized\n"); - if (ipa3_setup_flt_hash_tuple()) { - IPAERR(":fail to configure flt hash tuple\n"); - result = -EPERM; - goto fail_flt_hash_tuple; - } - IPADBG("flt hash tuple is configured\n"); + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + if (ipa3_setup_flt_hash_tuple()) { + IPAERR(":fail to configure flt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("flt hash tuple is configured\n"); - if (ipa3_setup_rt_hash_tuple()) { - IPAERR(":fail to configure rt hash tuple\n"); - result = -EPERM; - goto fail_flt_hash_tuple; + if (ipa3_setup_rt_hash_tuple()) { + IPAERR(":fail to configure rt hash tuple\n"); + result = -EPERM; + goto fail_flt_hash_tuple; + } + IPADBG("rt hash tuple is configured\n"); } - IPADBG("rt hash tuple is configured\n"); - if (ipa3_setup_exception_path()) { IPAERR(":fail to setup excp path\n"); result = -EPERM; @@ -4374,6 +4461,12 @@ static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) case IPA_HW_v4_1: gsi_ver = GSI_VER_2_0; break; + case IPA_HW_v4_2: + gsi_ver = GSI_VER_2_2; + break; + case IPA_HW_v4_5: + gsi_ver = GSI_VER_2_5; + break; default: IPAERR("No GSI version for ipa type %d\n", ipa_hw_type); WARN_ON(1); @@ -4433,7 +4526,6 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, /* move proxy vote for modem on ipa3_post_init */ if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0) ipa3_proxy_clk_vote(); - /* SMMU was already attached if used, safe to do allocations */ if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio, ipa3_ctx->pdev)) { @@ -4557,8 +4649,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, } /* - * IPAv3.5 and above requires to disable prefetch for USB in order - * to allow MBIM to work. + * Disable prefetch for USB or MHI at IPAv3.5/IPA.3.5.1 + * This is to allow MBIM to work. */ if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && @@ -4811,18 +4903,20 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf, if (count && (dbg_buff[count - 1] == '\n')) dbg_buff[count - 1] = '\0'; + /* + * This logic enforeces MHI mode based on userspace input. + * Note that MHI mode could be already determined due + * to previous logic. + */ if (!strcasecmp(dbg_buff, "MHI")) { ipa3_ctx->ipa_config_is_mhi = true; - pr_info( - "IPA is loading with MHI configuration\n"); - } else if (!strcmp(dbg_buff, "1")) { - pr_info( - "IPA is loading with non MHI configuration\n"); - } else { + } else if (strcmp(dbg_buff, "1")) { IPAERR("got invalid string %s not loading FW\n", dbg_buff); return count; } + pr_info("IPA is loading with %sMHI configuration\n", + ipa3_ctx->ipa_config_is_mhi ? "" : "non "); } queue_work(ipa3_ctx->transport_power_mgmt_wq, @@ -5002,6 +5096,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge; ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt; ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2; + ipa3_ctx->ipa_fltrt_not_hashable = resource_p->ipa_fltrt_not_hashable; ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask; ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size; ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size; @@ -5551,6 +5646,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->ipa_tz_unlock_reg = NULL; ipa_drv_res->mhi_evid_limits[0] = IPA_MHI_GSI_EVENT_RING_ID_START; ipa_drv_res->mhi_evid_limits[1] = IPA_MHI_GSI_EVENT_RING_ID_END; + ipa_drv_res->ipa_fltrt_not_hashable = false; /* Get IPA HW Version */ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", @@ -5566,6 +5662,11 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, return -ENODEV; } + if (ipa_drv_res->ipa_hw_type >= IPA_HW_MAX) { + IPAERR(":IPA version is greater than the MAX\n"); + return -ENODEV; + } + /* Get IPA HW mode */ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode", &ipa_drv_res->ipa3_hw_mode); @@ -5624,6 +5725,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->ipa_wdi2 ? "True" : "False"); + ipa_drv_res->ipa_fltrt_not_hashable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-fltrt-not-hashable"); + IPADBG(": IPA filter/route rule hashable = %s\n", + ipa_drv_res->ipa_fltrt_not_hashable + ? "True" : "False"); + ipa_drv_res->use_64_bit_dma_mask = of_property_read_bool(pdev->dev.of_node, "qcom,use-64-bit-dma-mask"); @@ -6199,7 +6307,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) iova_p, &pa_p, size_p); ipa3_iommu_map(cb->mapping->domain, iova_p, pa_p, size_p, - IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + IOMMU_READ | IOMMU_WRITE); smmu_info.present[IPA_SMMU_CB_AP] = true; ipa3_ctx->pdev = dev; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index afc30bf7209cebd8ba6a3339c7538fd3e480bbcd..637d0f8b4c48ad070206f73196141da87ebe3285 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -521,15 +521,23 @@ static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params) return true; } -int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map) +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map, + enum ipa_smmu_cb_type cb_type) { struct iommu_domain *smmu_domain; int res; - if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) + if (cb_type >= IPA_SMMU_CB_MAX) { + IPAERR("invalid cb_type\n"); + return -EINVAL; + } + + if (ipa3_ctx->s1_bypass_arr[cb_type]) { + IPADBG("CB# %d is set to s1 bypass\n", cb_type); return 0; + } - smmu_domain = ipa3_get_smmu_domain(); + smmu_domain = ipa3_get_smmu_domain_by_type(cb_type); if (!smmu_domain) { IPAERR("invalid smmu domain\n"); return -EINVAL; @@ -553,7 +561,8 @@ int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map) return 0; } -int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt) +int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt, + enum ipa_smmu_cb_type cb_type) { struct iommu_domain *smmu_domain; int res; @@ -565,10 +574,17 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt) int i; struct page *page; - if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) + if (cb_type >= IPA_SMMU_CB_MAX) { + IPAERR("invalid cb_type\n"); + return -EINVAL; + } + + if (ipa3_ctx->s1_bypass_arr[cb_type]) { + IPADBG("CB# %d is set to s1 bypass\n", cb_type); return 0; + } - smmu_domain = ipa3_get_smmu_domain(); + smmu_domain = ipa3_get_smmu_domain_by_type(cb_type); if (!smmu_domain) { IPAERR("invalid smmu domain\n"); return -EINVAL; @@ -624,6 +640,69 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt) return 0; } +void ipa3_register_lock_unlock_callback(int (*client_cb)(bool is_lock), + u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (client_cb == NULL) { + IPAERR("Bad Param"); + return; + } + + ep->client_lock_unlock = client_cb; + IPADBG("exit\n"); +} + +void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (ep->client_lock_unlock == NULL) { + IPAERR("client_lock_unlock is already NULL"); + return; + } + + ep->client_lock_unlock = NULL; + IPADBG("exit\n"); +} + +static void client_lock_unlock_cb(u32 ipa_ep_idx, bool is_lock) +{ + struct ipa3_ep_context *ep; + + IPADBG("entry\n"); + + ep = &ipa3_ctx->ep[ipa_ep_idx]; + + if (!ep->valid) { + IPAERR("Invalid EP\n"); + return; + } + + if (ep->client_lock_unlock) + ep->client_lock_unlock(is_lock); + + IPADBG("exit\n"); +} int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, struct ipa_req_chan_out_params *out_params) @@ -1249,6 +1328,46 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id, return result; } +/* + * Set USB PROD pipe delay for MBIM/RMNET config + * Clocks, should be voted before calling this API + * locks should be taken before calling this API + */ + +void ipa3_set_usb_prod_pipe_delay(void) +{ + int result; + int pipe_idx; + struct ipa3_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl; + + memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_ctrl.ipa_ep_delay = true; + + + pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD); + + if (pipe_idx == IPA_EP_NOT_ALLOCATED) { + IPAERR("client (%d) not valid\n", IPA_CLIENT_USB_PROD); + return; + } + + ep = &ipa3_ctx->ep[pipe_idx]; + + /* Setting delay on USB_PROD with skip_ep_cfg */ + client_lock_unlock_cb(pipe_idx, true); + if (ep->valid && ep->skip_ep_cfg) { + ep->ep_delay_set = ep_ctrl.ipa_ep_delay; + result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl); + if (result) + IPAERR("client (ep: %d) failed result=%d\n", + pipe_idx, result); + else + IPADBG("client (ep: %d) success\n", pipe_idx); + } + client_lock_unlock_cb(pipe_idx, false); +} + void ipa3_xdci_ep_delay_rm(u32 clnt_hdl) { struct ipa3_ep_context *ep; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 00b812f6f2e0f791a8df0a0b57225d7032bb12c2..cf0fa50ebb926c633c0d15815895482a67ec518f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -372,6 +372,8 @@ static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count, list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) { + if (entry->cookie != IPA_HDR_COOKIE) + continue; nbytes = scnprintf( dbg_buff, IPA_MAX_MSG_LEN, @@ -552,6 +554,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) if (attrib->tc_eq_present) pr_err("tc:%d ", attrib->tc_eq); + if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) { + IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128); + return -EPERM; + } + for (i = 0; i < attrib->num_offset_meq_128; i++) { for (j = 0; j < 16; j++) { addr[j] = attrib->offset_meq_128[i].value[j]; @@ -563,6 +571,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) mask, addr); } + if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) { + IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32); + return -EPERM; + } + for (i = 0; i < attrib->num_offset_meq_32; i++) pr_err( "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ", @@ -570,6 +584,12 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) attrib->offset_meq_32[i].mask, attrib->offset_meq_32[i].value); + if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) { + IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32); + return -EPERM; + } + for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) pr_err( "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ", @@ -584,6 +604,14 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) attrib->metadata_meq32.mask, attrib->metadata_meq32.value); + if (attrib->num_ihl_offset_range_16 > + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) { + IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS, + attrib->num_ihl_offset_range_16); + return -EPERM; + } + for (i = 0; i < attrib->num_ihl_offset_range_16; i++) pr_err( "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ", @@ -776,7 +804,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf, pr_err("rule_id:%u prio:%u retain_hdr:%u ", rules[rl].id, rules[rl].priority, rules[rl].retain_hdr); - ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } } pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl); @@ -807,7 +839,11 @@ static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf, pr_err("rule_id:%u prio:%u retain_hdr:%u\n", rules[rl].id, rules[rl].priority, rules[rl].retain_hdr); - ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + res = ipa3_attrib_dump_eq(&rules[rl].eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } } pr_err("\n"); } @@ -881,6 +917,7 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, u32 rt_tbl_idx; u32 bitmap; bool eq; + int res = 0; mutex_lock(&ipa3_ctx->lock); @@ -890,6 +927,8 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, tbl = &ipa3_ctx->flt_tbl[j][ip]; i = 0; list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; if (entry->rule.eq_attrib_type) { rt_tbl_idx = entry->rule.rt_tbl_idx; bitmap = entry->rule.eq_attrib.rule_eq_bitmap; @@ -915,18 +954,23 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, pr_err("pdn index %d, set metadata %d ", entry->rule.pdn_idx, entry->rule.set_metadata); - if (eq) - ipa3_attrib_dump_eq( - &entry->rule.eq_attrib); - else + if (eq) { + res = ipa3_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else ipa3_attrib_dump( &entry->rule.attrib, ip); i++; } } +bail: mutex_unlock(&ipa3_ctx->lock); - return 0; + return res; } static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf, @@ -979,7 +1023,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf, pr_err("pdn: %u, set_metadata: %u ", rules[rl].rule.pdn_idx, rules[rl].rule.set_metadata); - ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } } pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n", @@ -1006,7 +1054,11 @@ static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf, pr_err("pdn: %u, set_metadata: %u ", rules[rl].rule.pdn_idx, rules[rl].rule.set_metadata); - ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + res = ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } } pr_err("\n"); } @@ -2234,6 +2286,13 @@ void ipa3_debugfs_init(void) goto fail; } + file = debugfs_create_u32("clk_rate", IPA_READ_ONLY_MODE, + dent, &ipa3_ctx->curr_ipa_clk_rate); + if (!file) { + IPAERR("could not create clk_rate file\n"); + goto fail; + } + ipa_debugfs_init_stats(dent); return; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 569ab9d5edd2e68cc9fff39e163f750cc4852d66..6649017f0848de4d17c06ce5ff88c0563613dc27 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -1064,6 +1064,13 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) goto fail_gen2; } + result = gsi_start_channel(ep->gsi_chan_hdl); + if (result != GSI_STATUS_SUCCESS) { + IPAERR("gsi_start_channel failed res=%d ep=%d.\n", result, + ipa_ep_idx); + goto fail_gen2; + } + if (!ep->keep_ipa_awake) IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); @@ -1113,11 +1120,6 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl)); ipa3_disable_data_path(clnt_hdl); - if (ep->napi_enabled) { - do { - usleep_range(95, 105); - } while (atomic_read(&ep->sys->curr_polling_state)); - } if (IPA_CLIENT_IS_PROD(ep->client)) { do { @@ -1131,9 +1133,6 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) } while (1); } - if (IPA_CLIENT_IS_CONS(ep->client)) - cancel_delayed_work_sync(&ep->sys->replenish_rx_work); - flush_workqueue(ep->sys->wq); /* channel stop might fail on timeout if IPA is busy */ for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) { result = ipa3_stop_gsi_channel(clnt_hdl); @@ -1141,7 +1140,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) break; if (result != -GSI_STATUS_AGAIN && - result != -GSI_STATUS_TIMED_OUT) + result != -GSI_STATUS_TIMED_OUT) break; } @@ -1150,6 +1149,17 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl) ipa_assert(); return result; } + + if (ep->napi_enabled) { + do { + usleep_range(95, 105); + } while (atomic_read(&ep->sys->curr_polling_state)); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + cancel_delayed_work_sync(&ep->sys->replenish_rx_work); + flush_workqueue(ep->sys->wq); + result = ipa3_reset_gsi_channel(clnt_hdl); if (result != GSI_STATUS_SUCCESS) { IPAERR("Failed to reset chan: %d.\n", result); @@ -3797,15 +3807,11 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, goto fail_write_channel_scratch; } - result = gsi_start_channel(ep->gsi_chan_hdl); - if (result != GSI_STATUS_SUCCESS) - goto fail_start_channel; if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL); return 0; -fail_start_channel: fail_write_channel_scratch: if (gsi_dealloc_channel(ep->gsi_chan_hdl) != GSI_STATUS_SUCCESS) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index a49260638c8eda5a5195c6f45ae0157b88ca4ee4..0bc77b55be17eb7785dff8140f2d91788cb0b093 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -61,8 +61,10 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip, gen_params.rule = (const struct ipa_flt_rule *)&entry->rule; res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf); - if (res) + if (res) { IPAERR_RL("failed to generate flt h/w rule\n"); + return res; + } return 0; } @@ -559,27 +561,36 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip) goto fail_size_valid; } - /* flushing ipa internal hashable flt rules cache */ - memset(&flush, 0, sizeof(flush)); - if (ip == IPA_IP_v4) - flush.v4_flt = true; - else - flush.v6_flt = true; - ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); - reg_write_cmd.skip_pipeline_clear = false; - reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; - reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); - reg_write_cmd.value = valmask.val; - reg_write_cmd.value_mask = valmask.mask; - cmd_pyld[0] = ipahal_construct_imm_cmd( - IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false); - if (!cmd_pyld[0]) { - IPAERR("fail construct register_write imm cmd: IP %d\n", ip); - rc = -EFAULT; - goto fail_reg_write_construct; + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + /* flushing ipa internal hashable flt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_flt = true; + else + flush.v6_flt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst( + IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[0] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, + false); + if (!cmd_pyld[0]) { + IPAERR( + "fail construct register_write imm cmd: IP %d\n", ip); + rc = -EFAULT; + goto fail_reg_write_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + ++num_cmd; } - ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); - ++num_cmd; hdr_idx = 0; for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { @@ -622,25 +633,33 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip) ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); ++num_cmd; - mem_cmd.is_read = false; - mem_cmd.skip_pipeline_clear = false; - mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; - mem_cmd.size = tbl_hdr_width; - mem_cmd.system_addr = alloc_params.hash_hdr.phys_base + - hdr_idx * tbl_hdr_width; - mem_cmd.local_addr = lcl_hash_hdr + - hdr_idx * tbl_hdr_width; - cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( - IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); - if (!cmd_pyld[num_cmd]) { - IPAERR("fail construct dma_shared_mem cmd: IP = %d\n", - ip); - rc = -ENOMEM; - goto fail_imm_cmd_construct; + /* + * SRAM memory not allocated to hash tables. Sending command + * to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = tbl_hdr_width; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base + + hdr_idx * tbl_hdr_width; + mem_cmd.local_addr = lcl_hash_hdr + + hdr_idx * tbl_hdr_width; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, + &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem cmd: IP = %d\n", + ip); + rc = -ENOMEM; + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], + cmd_pyld[num_cmd]); + ++num_cmd; } - ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); - ++num_cmd; - ++hdr_idx; } @@ -1014,41 +1033,12 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } + if (__ipa_validate_flt_rule(&frule->rule, &rt_tbl, ip)) + goto error; + if (entry->rt_tbl) entry->rt_tbl->ref_cnt--; - if (frule->rule.action != IPA_PASS_TO_EXCEPTION) { - if (!frule->rule.eq_attrib_type) { - if (!frule->rule.rt_tbl_hdl) { - IPAERR_RL("invalid RT tbl\n"); - goto error; - } - - rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl); - if (rt_tbl == NULL) { - IPAERR_RL("RT tbl not found\n"); - goto error; - } - - if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { - IPAERR_RL("RT table cookie is invalid\n"); - goto error; - } - } else { - if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ? - IPA_MEM_PART(v4_modem_rt_index_hi) : - IPA_MEM_PART(v6_modem_rt_index_hi))) { - IPAERR_RL("invalid RT tbl\n"); - goto error; - } - } - } else { - if (frule->rule.rt_tbl_idx > 0) { - IPAERR_RL("invalid RT tbl\n"); - goto error; - } - } - entry->rule = frule->rule; entry->rt_tbl = rt_tbl; if (entry->rt_tbl) @@ -1124,12 +1114,17 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) mutex_lock(&ipa3_ctx->lock); for (i = 0; i < rules->num_rules; i++) { - if (!rules->global) + if (!rules->global) { + /* if hashing not supported, all table entry + * are non-hash tables + */ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].flt_rule_hdl); - else + } else result = -1; if (result) { @@ -1231,6 +1226,9 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules) */ for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; result = __ipa_add_flt_rule_after(tbl, &rules->rules[i].rule, &rules->rules[i].flt_rule_hdl, @@ -1318,6 +1316,9 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) mutex_lock(&ipa3_ctx->lock); for (i = 0; i < hdls->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + hdls->rules[i].rule.hashable = false; if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { IPAERR_RL("failed to mdfy flt rule %i\n", i); hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; @@ -1562,6 +1563,16 @@ int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type, IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%pK num_entry=0x%pK\n", pipe_idx, ip_type, hashable, entry, num_entry); + /* + * SRAM memory not allocated to hash tables. Reading of hash table + * rules operation not supported + */ + if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Reading hashable rules not supported\n"); + *num_entry = 0; + return 0; + } + if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX || !entry || !num_entry) { IPAERR("Invalid params\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 8a39857d0b675892929ac1b36fb83737dd459873..02da317c05a32f72feed9ffab9523999eff93f40 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -118,6 +118,14 @@ } \ } while (0) +/* round addresses for closes page per SMMU requirements */ +#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ + do { \ + (iova_p) = rounddown((iova), PAGE_SIZE); \ + (pa_p) = rounddown((pa), PAGE_SIZE); \ + (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ + } while (0) + #define WLAN_AMPDU_TX_EP 15 #define WLAN_PROD_TX_EP 19 #define WLAN1_CONS_RX_EP 14 @@ -556,6 +564,8 @@ struct ipa3_status_stats { * @qmi_request_sent: Indicates whether QMI request to enable clear data path * request is sent or not. * @napi_enabled: when true, IPA call client callback to start polling + * @client_lock_unlock: callback function to take mutex lock/unlock for USB + * clients */ struct ipa3_ep_context { int valid; @@ -588,6 +598,8 @@ struct ipa3_ep_context { u32 eot_in_poll_err; bool ep_delay_set; + int (*client_lock_unlock)(bool is_lock); + /* sys MUST be the last element of this struct */ struct ipa3_sys_context *sys; }; @@ -1255,6 +1267,7 @@ struct ipa3_char_device_context { * @logbuf: ipc log buffer for high priority messages * @logbuf_low: ipc log buffer for low priority messages * @ipa_wdi2: using wdi-2.0 + * @ipa_fltrt_not_hashable: filter/route rules not hashable * @use_64_bit_dma_mask: using 64bits dma mask * @ipa_bus_hdl: msm driver handle for the data path bus * @ctrl: holds the core specific operations based on @@ -1354,6 +1367,7 @@ struct ipa3_context { bool use_ipa_teth_bridge; bool modem_cfg_emb_pipe_flt; bool ipa_wdi2; + bool ipa_fltrt_not_hashable; bool use_64_bit_dma_mask; /* featurize if memory footprint becomes a concern */ struct ipa3_stats stats; @@ -1430,6 +1444,7 @@ struct ipa3_plat_drv_res { u32 ee; bool modem_cfg_emb_pipe_flt; bool ipa_wdi2; + bool ipa_fltrt_not_hashable; bool use_64_bit_dma_mask; bool use_bw_vote; u32 wan_rx_ring_size; @@ -1724,6 +1739,9 @@ int ipa3_xdci_connect(u32 clnt_hdl); int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id); void ipa3_xdci_ep_delay_rm(u32 clnt_hdl); +void ipa3_register_lock_unlock_callback(int (*client_cb)(bool), u32 ipa_ep_idx); +void ipa3_deregister_lock_unlock_callback(u32 ipa_ep_idx); +void ipa3_set_usb_prod_pipe_delay(void); int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool should_force_clear, u32 qmi_req_id, bool is_dpl); @@ -1761,6 +1779,9 @@ int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); +void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val, + struct ipa_ep_cfg_holb *ep_holb); + int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl, @@ -1934,7 +1955,8 @@ int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, ipa_notify_cb notify, void *priv, u8 hdr_len, struct ipa_ntn_conn_out_params *outp); -int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); void ipa3_ntn_uc_dereg_rdyCB(void); int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, @@ -2312,6 +2334,8 @@ struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type); struct iommu_domain *ipa3_get_smmu_domain(void); struct iommu_domain *ipa3_get_uc_smmu_domain(void); struct iommu_domain *ipa3_get_wlan_smmu_domain(void); +struct iommu_domain *ipa3_get_smmu_domain_by_type + (enum ipa_smmu_cb_type cb_type); int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); int ipa3_ap_suspend(struct device *dev); @@ -2346,8 +2370,10 @@ const char *ipa_hw_error_str(enum ipa3_hw_errors err_type); int ipa_gsi_ch20_wa(void); int ipa3_rx_poll(u32 clnt_hdl, int budget); void ipa3_recycle_wan_skb(struct sk_buff *skb); -int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map); -int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt); +int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map, + enum ipa_smmu_cb_type cb_type); +int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt, + enum ipa_smmu_cb_type cb_type); void ipa3_reset_freeze_vote(void); int ipa3_ntn_init(void); int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c index e05c0232f00be05c34af50d387619463842afa65..ef10f940fe28206e740f4a7ff93401adc9df33f4 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c @@ -331,6 +331,11 @@ static void __map_smmu_info(struct device *dev, } for (i = 0; i < num_mapping; i++) { + int prot = IOMMU_READ | IOMMU_WRITE; + u32 ipa_base = ipa3_ctx->ipa_wrapper_base + + ipa3_ctx->ctrl->ipa_reg_base_ofst; + u32 ipa_size = ipa3_ctx->ipa_wrapper_size; + imp_smmu_round_to_page(map_info[i].iova, map_info[i].pa, map_info[i].size, &iova_p, &pa_p, &size_p); @@ -340,11 +345,14 @@ static void __map_smmu_info(struct device *dev, (partition->base + partition->size) < (iova_p + size_p)); + /* for IPA uC MBOM we need to map with device type */ + if (pa_p - ipa_base < ipa_size) + prot |= IOMMU_MMIO; + IMP_DBG("mapping 0x%lx to 0x%pa size %d\n", iova_p, &pa_p, size_p); iommu_map(domain, - iova_p, pa_p, size_p, - IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + iova_p, pa_p, size_p, prot); } else { IMP_DBG("unmapping 0x%lx to 0x%pa size %d\n", iova_p, &pa_p, size_p); @@ -594,7 +602,7 @@ int imp_handle_vote_req(bool vote) IMP_DBG_LOW("vote %d\n", vote); mutex_lock(&imp_ctx->mutex); - if (imp_ctx->state != IMP_READY) { + if (imp_ctx->state != IMP_STARTED) { IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state); mutex_unlock(&imp_ctx->mutex); return -EPERM; @@ -619,6 +627,7 @@ int imp_handle_vote_req(bool vote) imp_ctx->lpm_disabled = false; } + mutex_unlock(&imp_ctx->mutex); return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c index 8f92e04e3caaf47ab7b3728fe70ddf7e9fca5763..adc153d24f157b5a357116395b05dc0ac0b7936b 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -1264,6 +1264,11 @@ int ipa3_table_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) goto bail; } + if (!ipa3_ctx->nat_mem.dev.is_dev_init) { + IPAERR_RL("NAT hasn't been initialized\n"); + return -EPERM; + } + for (cnt = 0; cnt < dma->entries; ++cnt) { result = ipa3_table_validate_table_dma_one(&dma->dma[cnt]); if (result) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 2c9710454412f874008d81c68c4d9501af160850..d2f0f9ced72853e051a487304bb9cc17c7b84b08 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -563,26 +563,35 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip) goto fail_size_valid; } - /* flushing ipa internal hashable rt rules cache */ - memset(&flush, 0, sizeof(flush)); - if (ip == IPA_IP_v4) - flush.v4_rt = true; - else - flush.v6_rt = true; - ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); - reg_write_cmd.skip_pipeline_clear = false; - reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; - reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH); - reg_write_cmd.value = valmask.val; - reg_write_cmd.value_mask = valmask.mask; - cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( - IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, false); - if (!cmd_pyld[num_cmd]) { - IPAERR("fail construct register_write imm cmd. IP %d\n", ip); - goto fail_size_valid; + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + /* flushing ipa internal hashable rt rules cache */ + memset(&flush, 0, sizeof(flush)); + if (ip == IPA_IP_v4) + flush.v4_rt = true; + else + flush.v6_rt = true; + ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask); + reg_write_cmd.skip_pipeline_clear = false; + reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + reg_write_cmd.offset = ipahal_get_reg_ofst( + IPA_FILT_ROUT_HASH_FLUSH); + reg_write_cmd.value = valmask.val; + reg_write_cmd.value_mask = valmask.mask; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write_cmd, + false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct register_write imm cmd. IP %d\n", ip); + goto fail_size_valid; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; } - ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); - num_cmd++; mem_cmd.is_read = false; mem_cmd.skip_pipeline_clear = false; @@ -599,20 +608,27 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip) ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); num_cmd++; - mem_cmd.is_read = false; - mem_cmd.skip_pipeline_clear = false; - mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; - mem_cmd.size = alloc_params.hash_hdr.size; - mem_cmd.system_addr = alloc_params.hash_hdr.phys_base; - mem_cmd.local_addr = lcl_hash_hdr; - cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( - IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); - if (!cmd_pyld[num_cmd]) { - IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip); - goto fail_imm_cmd_construct; + /* + * SRAM memory not allocated to hash tables. Sending + * command to hash tables(filer/routing) operation not supported. + */ + if (!ipa3_ctx->ipa_fltrt_not_hashable) { + mem_cmd.is_read = false; + mem_cmd.skip_pipeline_clear = false; + mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR; + mem_cmd.size = alloc_params.hash_hdr.size; + mem_cmd.system_addr = alloc_params.hash_hdr.phys_base; + mem_cmd.local_addr = lcl_hash_hdr; + cmd_pyld[num_cmd] = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false); + if (!cmd_pyld[num_cmd]) { + IPAERR( + "fail construct dma_shared_mem imm cmd. IP %d\n", ip); + goto fail_imm_cmd_construct; + } + ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); + num_cmd++; } - ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]); - num_cmd++; if (lcl_nhash) { if (num_cmd >= IPA_RT_MAX_NUM_OF_COMMIT_TABLES_CMD_DESC) { @@ -1112,6 +1128,9 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) mutex_lock(&ipa3_ctx->lock); for (i = 0; i < rules->num_rules; i++) { rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, &rules->rules[i].rule, rules->rules[i].at_rear, @@ -1157,6 +1176,9 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules) mutex_lock(&ipa3_ctx->lock); for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, &rules->rules[i].rule, rules->rules[i].at_rear, @@ -1258,6 +1280,9 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) */ for (i = 0; i < rules->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + rules->rules[i].rule.hashable = false; if (__ipa_add_rt_rule_after(tbl, &rules->rules[i].rule, &rules->rules[i].rt_rule_hdl, @@ -1741,6 +1766,9 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) mutex_lock(&ipa3_ctx->lock); for (i = 0; i < hdls->num_rules; i++) { + /* if hashing not supported, all tables are non-hash tables*/ + if (ipa3_ctx->ipa_fltrt_not_hashable) + hdls->rules[i].rule.hashable = false; if (__ipa_mdfy_rt_rule(&hdls->rules[i])) { IPAERR_RL("failed to mdfy rt rule %i\n", i); hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED; @@ -1841,6 +1869,16 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type, IPADBG_LOW("tbl_idx=%d ip_t=%d hash=%d entry=0x%pK num_entry=0x%pK\n", tbl_idx, ip_type, hashable, entry, num_entry); + /* + * SRAM memory not allocated to hash tables. Reading of hash table + * rules operation not supported + */ + if (hashable && ipa3_ctx->ipa_fltrt_not_hashable) { + IPADBG("Reading hashable rules not supported\n"); + *num_entry = 0; + return 0; + } + if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) { IPAERR("Invalid params\n"); return -EFAULT; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c index 14e7b713553255c5c00aec0407011a33bd7c25df..e66aeeabe7e6d2a03401289f190efb37edee3302 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -221,8 +221,11 @@ static int ipa3_uc_send_ntn_setup_pipe_cmd( IPADBG("ring_base_pa = 0x%pa\n", &ntn_info->ring_base_pa); + IPADBG("ring_base_iova = 0x%pa\n", + &ntn_info->ring_base_iova); IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size); IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa); + IPADBG("buff_pool_base_iova = 0x%pa\n", &ntn_info->buff_pool_base_iova); IPADBG("num_buffers = %d\n", ntn_info->num_buffers); IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size); IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa); @@ -248,8 +251,15 @@ static int ipa3_uc_send_ntn_setup_pipe_cmd( Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params; } - Ntn_params->ring_base_pa = ntn_info->ring_base_pa; - Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + if (ntn_info->smmu_enabled) { + Ntn_params->ring_base_pa = (u32)ntn_info->ring_base_iova; + Ntn_params->buff_pool_base_pa = + (u32)ntn_info->buff_pool_base_iova; + } else { + Ntn_params->ring_base_pa = ntn_info->ring_base_pa; + Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + } + Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size; Ntn_params->num_buffers = ntn_info->num_buffers; Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa; @@ -268,6 +278,128 @@ static int ipa3_uc_send_ntn_setup_pipe_cmd( return result; } +static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params, + bool map) +{ + struct iommu_domain *smmu_domain; + int result; + int i; + u64 iova; + phys_addr_t pa; + u64 iova_p; + phys_addr_t pa_p; + u32 size_p; + + if (params->data_buff_size > PAGE_SIZE) { + IPAERR("invalid data buff size\n"); + return -EINVAL; + } + + result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa, + PAGE_SIZE), map, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s uC regs %d\n", + map ? "map" : "unmap", result); + goto fail; + } + + if (params->smmu_enabled) { + IPADBG("smmu is enabled on EMAC\n"); + result = ipa3_smmu_map_peer_buff((u64)params->ring_base_iova, + params->ntn_ring_size, map, params->ring_base_sgt, + IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s ntn ring %d\n", + map ? "map" : "unmap", result); + goto fail_map_ring; + } + result = ipa3_smmu_map_peer_buff( + (u64)params->buff_pool_base_iova, + params->num_buffers * 4, map, + params->buff_pool_base_sgt, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s pool buffs %d\n", + map ? "map" : "unmap", result); + goto fail_map_buffer_smmu_enabled; + } + } else { + IPADBG("smmu is disabled on EMAC\n"); + result = ipa3_smmu_map_peer_buff((u64)params->ring_base_pa, + params->ntn_ring_size, map, NULL, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s ntn ring %d\n", + map ? "map" : "unmap", result); + goto fail_map_ring; + } + result = ipa3_smmu_map_peer_buff(params->buff_pool_base_pa, + params->num_buffers * 4, map, NULL, IPA_SMMU_CB_UC); + if (result) { + IPAERR("failed to %s pool buffs %d\n", + map ? "map" : "unmap", result); + goto fail_map_buffer_smmu_disabled; + } + } + + if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) { + IPADBG("AP SMMU is set to s1 bypass\n"); + return 0; + } + + smmu_domain = ipa3_get_smmu_domain(); + if (!smmu_domain) { + IPAERR("invalid smmu domain\n"); + return -EINVAL; + } + + for (i = 0; i < params->num_buffers; i++) { + iova = (u64)params->data_buff_list[i].iova; + pa = (phys_addr_t)params->data_buff_list[i].pa; + IPA_SMMU_ROUND_TO_PAGE(iova, pa, params->data_buff_size, iova_p, + pa_p, size_p); + IPADBG("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" : + "unmapping", iova_p, &pa_p, size_p); + if (map) { + result = ipa3_iommu_map(smmu_domain, iova_p, pa_p, + size_p, IOMMU_READ | IOMMU_WRITE); + if (result) + IPAERR("Fail to map 0x%llx\n", iova); + } else { + result = iommu_unmap(smmu_domain, iova_p, size_p); + if (result != params->data_buff_size) + IPAERR("Fail to unmap 0x%llx\n", iova); + } + if (result) { + if (params->smmu_enabled) + goto fail_map_data_buff_smmu_enabled; + else + goto fail_map_data_buff_smmu_disabled; + } + } + return 0; + +fail_map_data_buff_smmu_enabled: + ipa3_smmu_map_peer_buff((u64)params->buff_pool_base_iova, + params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC); + goto fail_map_buffer_smmu_enabled; +fail_map_data_buff_smmu_disabled: + ipa3_smmu_map_peer_buff(params->buff_pool_base_pa, + params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC); + goto fail_map_buffer_smmu_disabled; +fail_map_buffer_smmu_enabled: + ipa3_smmu_map_peer_buff((u64)params->ring_base_iova, + params->ntn_ring_size, !map, params->ring_base_sgt, + IPA_SMMU_CB_UC); + goto fail_map_ring; +fail_map_buffer_smmu_disabled: + ipa3_smmu_map_peer_buff((u64)params->ring_base_pa, + params->ntn_ring_size, !map, NULL, IPA_SMMU_CB_UC); +fail_map_ring: + ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa, + PAGE_SIZE), !map, IPA_SMMU_CB_UC); +fail: + return result; +} + /** * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes */ @@ -324,10 +456,16 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, goto fail; } + result = ipa3_smmu_map_uc_ntn_pipes(&in->ul, true); + if (result) { + IPAERR("failed to map SMMU for UL %d\n", result); + goto fail; + } + if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) { IPAERR("fail to send cmd to uc for ul pipe\n"); result = -EFAULT; - goto fail; + goto fail_smmu_map_ul; } ipa3_install_dflt_flt_rules(ipa_ep_idx_ul); outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX; @@ -346,13 +484,19 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) { IPAERR("fail to setup dl pipe cfg\n"); result = -EFAULT; - goto fail; + goto fail_smmu_map_ul; + } + + result = ipa3_smmu_map_uc_ntn_pipes(&in->dl, true); + if (result) { + IPAERR("failed to map SMMU for DL %d\n", result); + goto fail_smmu_map_ul; } if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) { IPAERR("fail to send cmd to uc for dl pipe\n"); result = -EFAULT; - goto fail; + goto fail_smmu_map_dl; } outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX; ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; @@ -362,11 +506,17 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, IPAERR("Enable data path failed res=%d clnt=%d.\n", result, ipa_ep_idx_dl); result = -EFAULT; - goto fail; + goto fail_smmu_map_dl; } IPADBG("client %d (ep: %d) connected\n", in->dl.client, ipa_ep_idx_dl); + return 0; + +fail_smmu_map_dl: + ipa3_smmu_map_uc_ntn_pipes(&in->dl, false); +fail_smmu_map_ul: + ipa3_smmu_map_uc_ntn_pipes(&in->ul, false); fail: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return result; @@ -377,7 +527,7 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, */ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, - int ipa_ep_idx_dl) + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) { struct ipa_mem_buffer cmd; struct ipa3_ep_context *ep_ul, *ep_dl; @@ -442,6 +592,13 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, goto fail; } + /* unmap the DL pipe */ + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->dl, false); + if (result) { + IPAERR("failed to unmap SMMU for DL %d\n", result); + goto fail; + } + /* teardown the UL pipe */ tear->params.ipa_pipe_number = ipa_ep_idx_ul; result = ipa3_uc_send_cmd((u32)(cmd.phys_base), @@ -453,6 +610,14 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, result = -EFAULT; goto fail; } + + /* unmap the UL pipe */ + result = ipa3_smmu_map_uc_ntn_pipes(¶ms->ul, false); + if (result) { + IPAERR("failed to unmap SMMU for UL %d\n", result); + goto fail; + } + ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul); memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context)); IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 609be4fa595e8f1a7e8d6e4c85d135f2d851ee78..6fac15b6d2032232d0465a1651d6e40c0485fe45 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -57,6 +57,7 @@ #define IPA_BCR_REG_VAL_v3_0 (0x00000001) #define IPA_BCR_REG_VAL_v3_5 (0x0000003B) #define IPA_BCR_REG_VAL_v4_0 (0x00000039) +#define IPA_BCR_REG_VAL_v4_2 (0x00000000) #define IPA_AGGR_GRAN_MIN (1) #define IPA_AGGR_GRAN_MAX (32) #define IPA_EOT_COAL_GRAN_MIN (1) @@ -96,6 +97,8 @@ #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006 /* Packet Processing + no decipher + no uCP */ #define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017 +/* Packet Processing + no decipher + no uCP + HPS REP DMA Parser.*/ +#define IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP 0x00000806 /* COMP/DECOMP */ #define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020 /* Invalid sequencer type */ @@ -141,6 +144,9 @@ #define IPA_v4_0_SRC_GROUP_MAX (4) #define IPA_v4_0_DST_GROUP_MAX (4) +#define IPA_v4_2_GROUP_UL_DL (0) +#define IPA_v4_2_SRC_GROUP_MAX (1) +#define IPA_v4_2_DST_GROUP_MAX (1) #define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX enum ipa_rsrc_grp_type_src { @@ -210,6 +216,9 @@ enum ipa_ver { IPA_4_0, IPA_4_0_MHI, IPA_4_1, + IPA_4_2, + IPA_4_5, + IPA_4_5_MHI, IPA_VER_MAX, }; @@ -312,6 +321,19 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { {14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} }, }, + [IPA_4_2] = { + /* UL_DL other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {10, 10}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {1, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, }; @@ -368,6 +390,13 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { {2, 63}, {1, 63}, {1, 2}, {0, 2}, {0, 0}, {0, 0} }, }, + [IPA_4_2] = { + /*UL/DL/DPL, other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, }; static const struct rsrc_min_max ipa3_rsrc_rx_grp_config @@ -407,6 +436,11 @@ static const struct rsrc_min_max ipa3_rsrc_rx_grp_config [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { {3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} }, }, + [IPA_4_2] = { + /* UL_DL, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {4, 4}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, }; @@ -440,6 +474,10 @@ static const u32 ipa3_rsrc_rx_grp_hps_weight_config /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, }, + [IPA_4_2] = { + /* UL_DL, other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 0, 0, 0, 0, 0 }, + }, }; enum ipa_ees { @@ -996,12 +1034,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping /* IPA_3_5_1 */ [IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = { true, IPA_v3_5_GROUP_UL_DL, true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, { 7, 1, 8, 16, IPA_EE_UC } }, [IPA_3_5_1][IPA_CLIENT_USB_PROD] = { true, IPA_v3_5_GROUP_UL_DL, true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, { 0, 0, 8, 16, IPA_EE_AP } }, [IPA_3_5_1][IPA_CLIENT_APPS_LAN_PROD] = { @@ -1011,7 +1049,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping { 8, 7, 8, 16, IPA_EE_AP } }, [IPA_3_5_1][IPA_CLIENT_APPS_WAN_PROD] = { true, IPA_v3_5_GROUP_UL_DL, true, - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, + IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP, QMB_MASTER_SELECT_DDR, { 2, 3, 16, 32, IPA_EE_AP } }, [IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD] = { @@ -1699,6 +1737,182 @@ static const struct ipa_ep_configuration ipa3_ep_mapping QMB_MASTER_SELECT_DDR, { 31, 31, 8, 8, IPA_EE_AP } }, + /* IPA_4_2 */ + [IPA_4_2][IPA_CLIENT_WLAN1_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 3, 7, 6, 7, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_USB_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 0, 5, 8, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_APPS_LAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 2, 6, 8, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_APPS_WAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_REP_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, + QMB_MASTER_SELECT_DDR, + { 1, 0, 8, 12, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_APPS_CMD_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY, + QMB_MASTER_SELECT_DDR, + { 6, 1, 20, 20, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_Q6_WAN_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 4, 0, 8, 12, IPA_EE_Q6 } }, + [IPA_4_2][IPA_CLIENT_Q6_CMD_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 5, 1, 20, 20, IPA_EE_Q6 } }, + [IPA_4_2][IPA_CLIENT_ETHERNET_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 0, 8, 10, IPA_EE_UC } }, + /* Only for test purpose */ + [IPA_4_2][IPA_CLIENT_TEST_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + {0, 5, 8, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST1_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 0, 5, 8, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST2_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 3, 7, 6, 7, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST3_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + {1, 0, 8, 12, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST4_PROD] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP, + QMB_MASTER_SELECT_DDR, + { 7, 10, 8, 10, IPA_EE_AP } }, + + + [IPA_4_2][IPA_CLIENT_WLAN1_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 8, 6, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_USB_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_USB_DPL_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 4, 4, 4, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_APPS_LAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 8, 2, 6, 6, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_APPS_WAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 3, 6, 6, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_Q6_LAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 11, 3, 6, 6, IPA_EE_Q6 } }, + [IPA_4_2][IPA_CLIENT_Q6_WAN_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 10, 2, 6, 6, IPA_EE_Q6 } }, + [IPA_4_2][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 13, 4, 6, 6, IPA_EE_Q6 } }, + [IPA_4_2][IPA_CLIENT_ETHERNET_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + true, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 16, 1, 6, 6, IPA_EE_UC } }, + /* Only for test purpose */ + /* MBIM aggregation test pipes should have the same QMB as USB_CONS */ + [IPA_4_2][IPA_CLIENT_TEST_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST1_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 15, 9, 6, 6, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST2_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 12, 4, 4, 4, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST3_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 14, 8, 6, 9, IPA_EE_AP } }, + [IPA_4_2][IPA_CLIENT_TEST4_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 9, 3, 6, 6, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_2][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_2_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, + }; static struct ipa3_mem_partition ipa_4_1_mem_part = { @@ -1792,6 +2006,97 @@ static struct ipa3_mem_partition ipa_4_1_mem_part = { .stats_drop_size = 0x20, }; +static struct ipa3_mem_partition ipa_4_2_mem_part = { + .ofst_start = 0x280, + .nat_ofst = 0x0, + .nat_size = 0x0, + .v4_flt_hash_ofst = 0x288, + .v4_flt_hash_size = 0x0, + .v4_flt_hash_size_ddr = 0x0, + .v4_flt_nhash_ofst = 0x290, + .v4_flt_nhash_size = 0x78, + .v4_flt_nhash_size_ddr = 0x4000, + .v6_flt_hash_ofst = 0x310, + .v6_flt_hash_size = 0x0, + .v6_flt_hash_size_ddr = 0x0, + .v6_flt_nhash_ofst = 0x318, + .v6_flt_nhash_size = 0x78, + .v6_flt_nhash_size_ddr = 0x4000, + .v4_rt_num_index = 0xf, + .v4_modem_rt_index_lo = 0x0, + .v4_modem_rt_index_hi = 0x7, + .v4_apps_rt_index_lo = 0x8, + .v4_apps_rt_index_hi = 0xe, + .v4_rt_hash_ofst = 0x398, + .v4_rt_hash_size = 0x0, + .v4_rt_hash_size_ddr = 0x0, + .v4_rt_nhash_ofst = 0x3A0, + .v4_rt_nhash_size = 0x78, + .v4_rt_nhash_size_ddr = 0x4000, + .v6_rt_num_index = 0xf, + .v6_modem_rt_index_lo = 0x0, + .v6_modem_rt_index_hi = 0x7, + .v6_apps_rt_index_lo = 0x8, + .v6_apps_rt_index_hi = 0xe, + .v6_rt_hash_ofst = 0x420, + .v6_rt_hash_size = 0x0, + .v6_rt_hash_size_ddr = 0x0, + .v6_rt_nhash_ofst = 0x428, + .v6_rt_nhash_size = 0x78, + .v6_rt_nhash_size_ddr = 0x4000, + .modem_hdr_ofst = 0x4A8, + .modem_hdr_size = 0x140, + .apps_hdr_ofst = 0x5E8, + .apps_hdr_size = 0x0, + .apps_hdr_size_ddr = 0x800, + .modem_hdr_proc_ctx_ofst = 0x5F0, + .modem_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_ofst = 0x7F0, + .apps_hdr_proc_ctx_size = 0x200, + .apps_hdr_proc_ctx_size_ddr = 0x0, + .modem_comp_decomp_ofst = 0x0, + .modem_comp_decomp_size = 0x0, + .modem_ofst = 0xbf0, + .modem_size = 0x100c, + .apps_v4_flt_hash_ofst = 0x1bfc, + .apps_v4_flt_hash_size = 0x0, + .apps_v4_flt_nhash_ofst = 0x1bfc, + .apps_v4_flt_nhash_size = 0x0, + .apps_v6_flt_hash_ofst = 0x1bfc, + .apps_v6_flt_hash_size = 0x0, + .apps_v6_flt_nhash_ofst = 0x1bfc, + .apps_v6_flt_nhash_size = 0x0, + .uc_info_ofst = 0x80, + .uc_info_size = 0x200, + .end_ofst = 0x2000, + .apps_v4_rt_hash_ofst = 0x1bfc, + .apps_v4_rt_hash_size = 0x0, + .apps_v4_rt_nhash_ofst = 0x1bfc, + .apps_v4_rt_nhash_size = 0x0, + .apps_v6_rt_hash_ofst = 0x1bfc, + .apps_v6_rt_hash_size = 0x0, + .apps_v6_rt_nhash_ofst = 0x1bfc, + .apps_v6_rt_nhash_size = 0x0, + .uc_event_ring_ofst = 0x1c00, + .uc_event_ring_size = 0x400, + .pdn_config_ofst = 0x9F8, + .pdn_config_size = 0x50, + .stats_quota_ofst = 0xa50, + .stats_quota_size = 0x60, + .stats_tethering_ofst = 0xab0, + .stats_tethering_size = 0x140, + .stats_flt_v4_ofst = 0xbf0, + .stats_flt_v4_size = 0x0, + .stats_flt_v6_ofst = 0xbf0, + .stats_flt_v6_size = 0x0, + .stats_rt_v4_ofst = 0xbf0, + .stats_rt_v4_size = 0x0, + .stats_rt_v6_ofst = 0xbf0, + .stats_rt_v6_size = 0x0, + .stats_drop_ofst = 0xbf0, + .stats_drop_size = 0x0, +}; + /** * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an * IPA_RM resource @@ -2316,7 +2621,6 @@ void ipa3_cfg_qsb(void) int ipa3_init_hw(void) { u32 ipa_version = 0; - u32 val; struct ipahal_reg_counter_cfg cnt_cfg; /* Read IPA version and make sure we have access to the registers */ @@ -2327,23 +2631,25 @@ int ipa3_init_hw(void) switch (ipa3_ctx->ipa_hw_type) { case IPA_HW_v3_0: case IPA_HW_v3_1: - val = IPA_BCR_REG_VAL_v3_0; + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_0); break; case IPA_HW_v3_5: case IPA_HW_v3_5_1: - val = IPA_BCR_REG_VAL_v3_5; + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v3_5); break; case IPA_HW_v4_0: case IPA_HW_v4_1: - val = IPA_BCR_REG_VAL_v4_0; + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_0); + break; + case IPA_HW_v4_2: + ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL_v4_2); break; default: - IPAERR("unknown HW type in dts\n"); - return -EFAULT; + IPADBG("Do not update BCR - hw_type=%d\n", + ipa3_ctx->ipa_hw_type); + break; } - ipahal_write_reg(IPA_BCR, val); - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { struct ipahal_reg_clkon_cfg clkon_cfg; struct ipahal_reg_tx_cfg tx_cfg; @@ -2411,6 +2717,14 @@ u8 ipa3_get_hw_type_index(void) case IPA_HW_v4_1: hw_type_index = IPA_4_1; break; + case IPA_HW_v4_2: + hw_type_index = IPA_4_2; + break; + case IPA_HW_v4_5: + hw_type_index = IPA_4_5; + if (ipa3_ctx->ipa_config_is_mhi) + hw_type_index = IPA_4_5_MHI; + break; default: IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type); hw_type_index = IPA_3_0; @@ -3278,8 +3592,13 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) if (ep_mode->mode == IPA_DMA) type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY; else - type = - IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP; + /* In IPA4.2 only single pass only supported*/ + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) + type = + IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP; + else + type = + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP; IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type, clnt_hdl); @@ -3432,6 +3751,34 @@ int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) return 0; } +#define MAX_ALLOWED_BASE_VAL 0x1f +#define MAX_ALLOWED_SCALE_VAL 0x1f + +/** + * ipa3_cal_ep_holb_scale_base_val - calculate base and scale value from tmr_val + * + * In IPA4.2 HW version need configure base and scale value in HOL timer reg + * @tmr_val: [in] timer value for HOL timer + * @ipa_ep_cfg: [out] Fill IPA end-point configuration base and scale value + * and return + */ +void ipa3_cal_ep_holb_scale_base_val(u32 tmr_val, + struct ipa_ep_cfg_holb *ep_holb) +{ + u32 base_val, scale, scale_val = 1, base = 2; + + for (scale = 0; scale <= MAX_ALLOWED_SCALE_VAL; scale++) { + base_val = tmr_val/scale_val; + if (scale != 0) + scale_val *= base; + if (base_val <= MAX_ALLOWED_BASE_VAL) + break; + } + ep_holb->base_val = base_val; + ep_holb->scale = scale_val; + +} + /** * ipa3_cfg_ep_holb() - IPA end-point holb configuration * @@ -3467,9 +3814,15 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, ep_holb); - - ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl, - ep_holb); + if (ipa3_ctx->ipa_hw_type == IPA_HW_v4_2) { + ipa3_cal_ep_holb_scale_base_val(ep_holb->tmr_val, + &ipa3_ctx->ep[clnt_hdl].holb); + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + clnt_hdl, &ipa3_ctx->ep[clnt_hdl].holb); + } else { + ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, + clnt_hdl, ep_holb); + } IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl)); @@ -3780,7 +4133,12 @@ int ipa3_init_mem_partition(enum ipa_hw_type type) case IPA_HW_v4_1: ipa3_ctx->ctrl->mem_partition = &ipa_4_1_mem_part; break; - + case IPA_HW_v4_2: + ipa3_ctx->ctrl->mem_partition = &ipa_4_2_mem_part; + break; + case IPA_HW_v4_5: + ipa3_ctx->ctrl->mem_partition = &ipa_4_2_mem_part; + break; case IPA_HW_None: case IPA_HW_v1_0: case IPA_HW_v1_1: @@ -5041,6 +5399,66 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index, } } break; + case IPA_4_2: + if (src) { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + case IPA_4_5: + case IPA_4_5_MHI: + if (src) { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid source resource group,index #%d\n", + group_index); + break; + } + } else { + switch (group_index) { + case IPA_v4_2_GROUP_UL_DL: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, + n, val); + break; + default: + IPAERR( + " Invalid destination resource group,index #%d\n", + group_index); + break; + } + } + break; + default: IPAERR("invalid hw type\n"); WARN_ON(1); @@ -5150,6 +5568,19 @@ void ipa3_set_resorce_groups_min_max_limits(void) src_grp_idx_max = IPA_v4_0_SRC_GROUP_MAX; dst_grp_idx_max = IPA_v4_0_DST_GROUP_MAX; break; + case IPA_4_2: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_2_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_2_DST_GROUP_MAX; + break; + case IPA_4_5: + case IPA_4_5_MHI: + src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; + dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; + src_grp_idx_max = IPA_v4_2_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_2_DST_GROUP_MAX; + break; default: IPAERR("invalid hw type index\n"); WARN_ON(1); @@ -5203,7 +5634,9 @@ void ipa3_set_resorce_groups_min_max_limits(void) ipa3_configure_rx_hps_clients(1, false); } - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) + /* In IPA4.2 no support to HPS weight config*/ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 && + (ipa3_ctx->ipa_hw_type != IPA_HW_v4_2)) ipa3_configure_rx_hps_weight(); IPADBG("EXIT\n"); @@ -5616,10 +6049,12 @@ bool ipa3_is_msm_device(void) case IPA_HW_v3_0: case IPA_HW_v3_5: case IPA_HW_v4_0: + case IPA_HW_v4_5: return false; case IPA_HW_v3_1: case IPA_HW_v3_5_1: case IPA_HW_v4_1: + case IPA_HW_v4_2: return true; default: IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c index 26f071ec4fcef6ecefb578d8e9d0d188e059e43a..1c21ed21cc9472acc26e066ce36398a65bae62e1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -534,6 +534,51 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { [IPA_IS_FRAG] = 15, }, }, + + /* IPAv4.2 */ + [IPA_HW_v4_2] = { + false, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, + }, + }; static int ipa_flt_generate_eq(enum ipa_ip_type ipt, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 48f6e7db24c38f22e0574cfc0169b52fafbc4990..44a2ce4c486f63262e8d407066cabeb5c315d1e8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -1158,6 +1158,21 @@ static void ipareg_construct_endp_init_hol_block_timer_n( IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK); } + +static void ipareg_construct_endp_init_hol_block_timer_n_v4_2( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipa_ep_cfg_holb *ep_holb = + (struct ipa_ep_cfg_holb *)fields; + + IPA_SETFIELD_IN_REG(*val, ep_holb->scale, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2); + IPA_SETFIELD_IN_REG(*val, ep_holb->base_val, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2); +} + static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg, const void *fields, u32 *val) { @@ -2256,6 +2271,13 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v4_0][IPA_ENDP_YELLOW_RED_MARKER] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00000CC0, 0x70, 10, 23, 1}, + [IPA_HW_v4_2][IPA_IDLE_INDICATION_CFG] = { + ipareg_construct_idle_indication_cfg, ipareg_parse_dummy, + 0x00000240, 0, 0, 0, 0}, + [IPA_HW_v4_2][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = { + ipareg_construct_endp_init_hol_block_timer_n_v4_2, + ipareg_parse_dummy, + 0x00000830, 0x70, 8, 17, 1}, }; int ipahal_print_all_regs(void) @@ -2272,7 +2294,7 @@ int ipahal_print_all_regs(void) } for (i = 0; i < IPA_REG_MAX ; i++) { - if (!ipahal_reg_objs[IPA_HW_v4_0][i].en_print) + if (!ipahal_reg_objs[ipahal_ctx->hw_type][i].en_print) continue; j = ipahal_reg_objs[ipahal_ctx->hw_type][i].n_start; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h index a4371e38412adc01469729378d88a4d8b4149f85..2f7803ad5d6ef5b053dc53f8d2319aecf901c105 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h @@ -143,6 +143,11 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); #define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff #define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_SHFT_V_4_2 0 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_BASE_VALUE_BMSK_V_4_2 0x1f +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_SHFT_V_4_2 0x8 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_SCALE_BMSK_V_4_2 0x1f00 + /* IPA_ENDP_INIT_DEAGGR_n register */ #define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000 #define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10 diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c index 4521e7ea719c01919b3a9bb9c94b227310c1d62f..ca7f5e9332dfc69954d7a1bdcc8eb3d3cddb85c7 100644 --- a/drivers/platform/msm/seemp_core/seemp_logk.c +++ b/drivers/platform/msm/seemp_core/seemp_logk.c @@ -22,10 +22,6 @@ #include "seemp_logk.h" #include "seemp_ringbuf.h" -#ifndef VM_RESERVED -#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP) -#endif - #define MASK_BUFFER_SIZE 256 #define FOUR_MB 4 #define YEAR_BASE 1900 @@ -279,6 +275,9 @@ static ssize_t seemp_logk_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { + if (seemp_logk_kernel_begin == NULL) + seemp_logk_attach(); + return seemp_logk_usr_record(buf, count); } @@ -555,7 +554,7 @@ static int seemp_logk_mmap(struct file *filp, return -EIO; } - vma->vm_flags |= VM_RESERVED | VM_SHARED; + vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP) | VM_SHARED; vptr = (char *) slogk_dev->ring; ret = 0; @@ -748,7 +747,6 @@ __init int seemp_logk_init(void) goto class_destroy_fail; } - seemp_logk_attach(); mutex_init(&slogk_dev->lock); init_waitqueue_head(&slogk_dev->readers_wq); init_waitqueue_head(&slogk_dev->writers_wq); diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c index 08e4fd9ee6074717a35fdbe9f113e1e3b430a31f..9621d6dd88c6f6219486b2dc6ec169cc1d91dff1 100644 --- a/drivers/power/supply/ltc2941-battery-gauge.c +++ b/drivers/power/supply/ltc2941-battery-gauge.c @@ -316,15 +316,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val) if (info->id == LTC2942_ID) { reg = LTC2942_REG_TEMPERATURE_MSB; - value = 60000; /* Full-scale is 600 Kelvin */ + value = 6000; /* Full-scale is 600 Kelvin */ } else { reg = LTC2943_REG_TEMPERATURE_MSB; - value = 51000; /* Full-scale is 510 Kelvin */ + value = 5100; /* Full-scale is 510 Kelvin */ } ret = ltc294x_read_regs(info->client, reg, &datar[0], 2); value *= (datar[0] << 8) | datar[1]; - /* Convert to centidegrees */ - *val = value / 0xFFFF - 27215; + /* Convert to tenths of degree Celsius */ + *val = value / 0xFFFF - 2722; return ret; } diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 5b556a13f517f8e7859182fbb6c9ccfe422dd609..9c7eaaeda343ca5e0817416cb5778be4be55caa6 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -1021,6 +1021,7 @@ static int max17042_probe(struct i2c_client *client, i2c_set_clientdata(client, chip); psy_cfg.drv_data = chip; + psy_cfg.of_node = dev->of_node; /* When current is not measured, * CURRENT_NOW and CURRENT_AVG properties should be invisible. */ diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 2e1975ee6761b3e5e545bee6e5aade49974ee5ce..616b450119ebf602e8297013c75e0bef27e4e576 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -368,6 +368,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(batt_full_current), POWER_SUPPLY_ATTR(recharge_soc), POWER_SUPPLY_ATTR(hvdcp_opti_allowed), + POWER_SUPPLY_ATTR(smb_en_mode), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig index 7e174c270a3160305889f9fcf7447621d0c1e5db..db0f56e3c6aa68657a647b3c6fdd1a8857462f46 100644 --- a/drivers/power/supply/qcom/Kconfig +++ b/drivers/power/supply/qcom/Kconfig @@ -19,6 +19,15 @@ config QPNP_FG_GEN4 reported through a BMS power supply property and also sends uevents when the capacity is updated. +config QPNP_QG + bool "QPNP Qgauge driver" + depends on MFD_SPMI_PMIC + help + Say Y here to enable the Qualcomm Technologies, Inc. QGauge driver + which uses the periodic sampling of the battery voltage and current + to determine the battery state-of-charge (SOC) and supports other + battery management features. + config SMB1351_USB_CHARGER tristate "smb1351 usb charger (with VBUS detection)" depends on I2C diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile index c76febf4efc23ee0ab54ecabdc7b67139d737049..22c326e5ebff3f167bdf3be5c254fc79061eda87 100644 --- a/drivers/power/supply/qcom/Makefile +++ b/drivers/power/supply/qcom/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_QPNP_FG_GEN3) += qpnp-fg-gen3.o fg-memif.o fg-util.o obj-$(CONFIG_QPNP_FG_GEN4) += qpnp-fg-gen4.o fg-memif.o fg-util.o fg-alg.o pmic-voter.o +obj-$(CONFIG_QPNP_QG) += qpnp-qg.o pmic-voter.o qg-util.o qg-soc.o qg-sdam.o qg-battery-profile.o qg-profile-lib.o fg-alg.o obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o obj-$(CONFIG_SMB1355_SLAVE_CHARGER) += smb1355-charger.o pmic-voter.o obj-$(CONFIG_QPNP_SMB2) += step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 1433373ff110fa25303941b4da4edc62cb80eb32..f72ade2f2ecea6caa1eed128ab8b4aea41feb820 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -218,7 +218,8 @@ static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr, return count; } -static CLASS_ATTR_RW(slave_pct); +static struct class_attribute class_attr_slave_pct = + __ATTR(parallel_pct, 0644, slave_pct_show, slave_pct_store); /************************ * RESTRICTED CHARGIGNG * diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c index 099e39c0eb4368a7e1c34e819cf2d96f09989df3..726623a3de6c6a33beb8b7d51b81a22e39037170 100644 --- a/drivers/power/supply/qcom/fg-alg.c +++ b/drivers/power/supply/qcom/fg-alg.c @@ -391,13 +391,15 @@ static int cap_learning_process_full_data(struct cap_learning *cl) */ static int cap_learning_begin(struct cap_learning *cl, u32 batt_soc) { - int rc, cc_soc_sw, batt_soc_msb; + int rc, cc_soc_sw, batt_soc_msb, batt_soc_pct; batt_soc_msb = batt_soc >> 24; - if (DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW) > - cl->dt.start_soc) { - pr_debug("Battery SOC %d is high!, not starting\n", - batt_soc_msb); + batt_soc_pct = DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW); + + if (batt_soc_pct > cl->dt.max_start_soc || + batt_soc_pct < cl->dt.min_start_soc) { + pr_debug("Battery SOC %d is high/low, not starting\n", + batt_soc_pct); return -EINVAL; } diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h index 41d278a386ac3dd5e17d4a8dcd0035d14c04d783..0eba2bdc5a7c679e2be060a5b3f94e6618383bb9 100644 --- a/drivers/power/supply/qcom/fg-alg.h +++ b/drivers/power/supply/qcom/fg-alg.h @@ -30,7 +30,8 @@ struct cycle_counter { }; struct cl_params { - int start_soc; + int min_start_soc; + int max_start_soc; int max_temp; int min_temp; int max_cap_inc; diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index bebfa5a25545fe8ad91c3af78af7d2673c4e50c3..06df23611eb23fc65d394f5dfad6f484f6697e46 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -417,7 +417,6 @@ struct fg_dev { int last_msoc; int last_recharge_volt_mv; bool profile_available; - bool profile_loaded; enum prof_load_status profile_load_status; bool battery_missing; bool fg_restarting; diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c index 5e3c2e1e29a92ab80c8f865faea5cb3fc3c9a81d..049639e0548a7c181e7e6b9ae064591967a6d9a4 100644 --- a/drivers/power/supply/qcom/fg-memif.c +++ b/drivers/power/supply/qcom/fg-memif.c @@ -30,8 +30,11 @@ static int fg_set_address(struct fg_dev *fg, u16 address) int rc; buffer[0] = address & 0xFF; - /* MSB has to be written zero */ - buffer[1] = 0; + buffer[1] = address >> 8; + + /* MSB has to be written zero for GEN3 FG */ + if (fg->version == GEN3_FG) + buffer[1] = 0; rc = fg_write(fg, MEM_IF_ADDR_LSB(fg), buffer, 2); if (rc < 0) { @@ -103,27 +106,52 @@ static int fg_run_iacs_clear_sequence(struct fg_dev *fg) usleep_range(35, 40); while (1) { - val = 0; - rc = fg_write(fg, MEM_IF_ADDR_MSB(fg), &val, 1); - if (rc < 0) { - pr_err("failed to write 0x%04x, rc=%d\n", - MEM_IF_ADDR_MSB(fg), rc); - return rc; - } + if (fg->version == GEN4_FG) { + val = 0x4; + rc = fg_write(fg, MEM_IF_ADDR_MSB(fg), &val, 1); + if (rc < 0) { + pr_err("failed to write 0x%04x, rc=%d\n", + MEM_IF_ADDR_MSB(fg), rc); + return rc; + } - val = 0; - rc = fg_write(fg, MEM_IF_WR_DATA3(fg), &val, 1); - if (rc < 0) { - pr_err("failed to write 0x%04x, rc=%d\n", - MEM_IF_WR_DATA3(fg), rc); - return rc; - } + val = 0; + rc = fg_write(fg, MEM_IF_WR_DATA1(fg), &val, 1); + if (rc < 0) { + pr_err("failed to write 0x%04x, rc=%d\n", + MEM_IF_WR_DATA1(fg), rc); + return rc; + } - rc = fg_read(fg, MEM_IF_RD_DATA3(fg), &val, 1); - if (rc < 0) { - pr_err("failed to read 0x%04x, rc=%d\n", - MEM_IF_RD_DATA3(fg), rc); - return rc; + rc = fg_read(fg, MEM_IF_RD_DATA1(fg), &val, 1); + if (rc < 0) { + pr_err("failed to read 0x%04x, rc=%d\n", + MEM_IF_RD_DATA1(fg), rc); + return rc; + } + } else { /* GEN3 FG */ + val = 0; + rc = fg_write(fg, MEM_IF_ADDR_MSB(fg), &val, 1); + if (rc < 0) { + pr_err("failed to write 0x%04x, rc=%d\n", + MEM_IF_ADDR_MSB(fg), rc); + return rc; + } + + val = 0; + rc = fg_write(fg, MEM_IF_WR_DATA3(fg), &val, 1); + if (rc < 0) { + pr_err("failed to write 0x%04x, rc=%d\n", + MEM_IF_WR_DATA3(fg), rc); + return rc; + } + + rc = fg_read(fg, MEM_IF_RD_DATA3(fg), &val, 1); + if (rc < 0) { + pr_err("failed to read 0x%04x, rc=%d\n", + MEM_IF_RD_DATA3(fg), rc); + return rc; + } } /* Delay for IMA hardware to clear */ @@ -313,7 +341,7 @@ static int __fg_interleaved_mem_write(struct fg_dev *fg, u16 address, int offset, u8 *val, int len) { int rc = 0, i; - u8 *ptr = val, byte_enable = 0, num_bytes = 0; + u8 *ptr = val, byte_enable = 0, num_bytes = 0, dummy_byte = 0; fg_dbg(fg, FG_SRAM_WRITE, "length %d addr=%02X offset=%d\n", len, address, offset); @@ -343,14 +371,12 @@ static int __fg_interleaved_mem_write(struct fg_dev *fg, u16 address, } /* - * The last-byte WR_DATA3 starts the write transaction. - * Write a dummy value to WR_DATA3 if it does not have + * The last-byte WR_DATA3/1 starts the write transaction. + * Write a dummy value to WR_DATA3/1 if it does not have * valid data. This dummy data is not written to the - * SRAM as byte_en for WR_DATA3 is not set. + * SRAM as byte_en for WR_DATA3/1 is not set. */ - if (!(byte_enable & BIT(3))) { - u8 dummy_byte = 0x0; - + if (fg->version == GEN3_FG && !(byte_enable & BIT(3))) { rc = fg_write(fg, MEM_IF_WR_DATA3(fg), &dummy_byte, 1); if (rc < 0) { @@ -358,6 +384,14 @@ static int __fg_interleaved_mem_write(struct fg_dev *fg, u16 address, rc); return rc; } + } else if (fg->version == GEN4_FG && !(byte_enable & BIT(1))) { + rc = fg_write(fg, MEM_IF_WR_DATA1(fg), &dummy_byte, + 1); + if (rc < 0) { + pr_err("failed to write dummy-data to WR_DATA1 rc=%d\n", + rc); + return rc; + } } /* check for error condition */ @@ -542,7 +576,8 @@ static int fg_interleaved_mem_config(struct fg_dev *fg, u8 *val, } /* configure for the read/write, single/burst mode */ - burst_mode = fg->use_ima_single_mode ? false : ((offset + len) > 4); + burst_mode = fg->use_ima_single_mode ? false : + (offset + len) > fg->sram.num_bytes_per_word; rc = fg_config_access_mode(fg, access, burst_mode); if (rc < 0) { pr_err("failed to set memory access rc = %d\n", rc); @@ -588,11 +623,17 @@ int fg_interleaved_mem_read(struct fg_dev *fg, u16 address, u8 offset, u8 start_beat_count, end_beat_count, count = 0; bool retry = false; - if (offset > 3) { - pr_err("offset too large %d\n", offset); - return -EINVAL; + if (fg->version == GEN4_FG) { + if (offset > 1) { + pr_err("offset too large %d\n", offset); + return -EINVAL; + } + } else { + if (offset > 3) { + pr_err("offset too large %d\n", offset); + return -EINVAL; + } } - retry: if (count >= RETRY_COUNT) { pr_err("Tried %d times\n", RETRY_COUNT); @@ -673,11 +714,17 @@ int fg_interleaved_mem_write(struct fg_dev *fg, u16 address, u8 offset, u8 start_beat_count, end_beat_count, count = 0; bool retry = false; - if (offset > 3) { - pr_err("offset too large %d\n", offset); - return -EINVAL; + if (fg->version == GEN4_FG) { + if (offset > 1) { + pr_err("offset too large %d\n", offset); + return -EINVAL; + } + } else { + if (offset > 3) { + pr_err("offset too large %d\n", offset); + return -EINVAL; + } } - retry: if (count >= RETRY_COUNT) { pr_err("Tried %d times\n", RETRY_COUNT); diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h index 355471d7f7e3511d00da8bf53fe162fbdf6b0fce..aa30626d9427c92dd733c3ff13d9e9e0281d9527 100644 --- a/drivers/power/supply/qcom/fg-reg.h +++ b/drivers/power/supply/qcom/fg-reg.h @@ -326,8 +326,10 @@ #define MEM_IF_ADDR_LSB(chip) ((chip->mem_if_base) + 0x61) #define MEM_IF_ADDR_MSB(chip) ((chip->mem_if_base) + 0x62) #define MEM_IF_WR_DATA0(chip) ((chip->mem_if_base) + 0x63) +#define MEM_IF_WR_DATA1(chip) ((chip->mem_if_base) + 0x64) #define MEM_IF_WR_DATA3(chip) ((chip->mem_if_base) + 0x66) #define MEM_IF_RD_DATA0(chip) ((chip->mem_if_base) + 0x67) +#define MEM_IF_RD_DATA1(chip) ((chip->mem_if_base) + 0x68) #define MEM_IF_RD_DATA3(chip) ((chip->mem_if_base) + 0x6A) #define MEM_IF_DMA_STS(chip) ((chip->mem_if_base) + 0x70) diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index 69baa56b3bfbf2799ec2c59630e971cad53c59be..7a0e4686ef5175d01759bc601b7eeafe6c44c6d3 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -886,19 +886,26 @@ int fg_get_msoc(struct fg_dev *fg, int *msoc) #define SKIP_BATT_TYPE "Skipped loading battery" const char *fg_get_battery_type(struct fg_dev *fg) { - if (fg->battery_missing || - fg->profile_load_status == PROFILE_MISSING) + switch (fg->profile_load_status) { + case PROFILE_MISSING: + return DEFAULT_BATT_TYPE; + case PROFILE_SKIPPED: + return SKIP_BATT_TYPE; + case PROFILE_LOADED: + if (fg->bp.batt_type_str) + return fg->bp.batt_type_str; + break; + case PROFILE_NOT_LOADED: return MISSING_BATT_TYPE; + default: + break; + }; - if (fg->profile_load_status == PROFILE_SKIPPED) - return SKIP_BATT_TYPE; + if (fg->battery_missing) + return MISSING_BATT_TYPE; - if (fg->bp.batt_type_str) { - if (fg->profile_loaded) - return fg->bp.batt_type_str; - else if (fg->profile_available) - return LOADING_BATT_TYPE; - } + if (fg->profile_available) + return LOADING_BATT_TYPE; return DEFAULT_BATT_TYPE; } diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c new file mode 100644 index 0000000000000000000000000000000000000000..c1f10977a508b5ecf5ebd4220823f4397e27a904 --- /dev/null +++ b/drivers/power/supply/qcom/qg-battery-profile.c @@ -0,0 +1,524 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qg-battery-profile.h" +#include "qg-profile-lib.h" +#include "qg-defs.h" + +struct qg_battery_data { + /* battery-data class node */ + dev_t dev_no; + struct class *battery_class; + struct device *battery_device; + struct cdev battery_cdev; + + /* profile */ + struct device_node *profile_node; + struct profile_table_data profile[TABLE_MAX]; +}; + +struct tables { + int table_index; + char *table_name; +}; + +static struct tables table[] = { + {TABLE_SOC_OCV1, "qcom,pc-temp-v1-lut"}, + {TABLE_SOC_OCV2, "qcom,pc-temp-v2-lut"}, + {TABLE_FCC1, "qcom,fcc1-temp-lut"}, + {TABLE_FCC2, "qcom,fcc2-temp-lut"}, + {TABLE_Z1, "qcom,pc-temp-z1-lut"}, + {TABLE_Z2, "qcom,pc-temp-z2-lut"}, + {TABLE_Z3, "qcom,pc-temp-z3-lut"}, + {TABLE_Z4, "qcom,pc-temp-z4-lut"}, + {TABLE_Z5, "qcom,pc-temp-z5-lut"}, + {TABLE_Z6, "qcom,pc-temp-z6-lut"}, + {TABLE_Y1, "qcom,pc-temp-y1-lut"}, + {TABLE_Y2, "qcom,pc-temp-y2-lut"}, + {TABLE_Y3, "qcom,pc-temp-y3-lut"}, + {TABLE_Y4, "qcom,pc-temp-y4-lut"}, + {TABLE_Y5, "qcom,pc-temp-y5-lut"}, + {TABLE_Y6, "qcom,pc-temp-y6-lut"}, +}; + +static struct qg_battery_data *the_battery; + +static int qg_battery_data_open(struct inode *inode, struct file *file) +{ + struct qg_battery_data *battery = container_of(inode->i_cdev, + struct qg_battery_data, battery_cdev); + + file->private_data = battery; + + return 0; +} + +static long qg_battery_data_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct qg_battery_data *battery = file->private_data; + struct battery_params __user *bp_user = + (struct battery_params __user *)arg; + struct battery_params bp; + int rc = 0, soc, ocv_uv, fcc_mah, var, slope; + + if (!battery->profile_node) { + pr_err("Battery data not set!\n"); + return -EINVAL; + } + + if (!bp_user) { + pr_err("Invalid battery-params user pointer\n"); + return -EINVAL; + } + + if (copy_from_user(&bp, bp_user, sizeof(bp))) { + pr_err("Failed in copy_from_user\n"); + return -EFAULT; + } + + switch (cmd) { + case BPIOCXSOC: + if (bp.table_index != TABLE_SOC_OCV1 && + bp.table_index != TABLE_SOC_OCV2) { + pr_err("Invalid table index %d for SOC-OCV lookup\n", + bp.table_index); + rc = -EINVAL; + } else { + /* OCV is passed as deci-uV - 10^-4 V */ + soc = interpolate_soc(&battery->profile[bp.table_index], + bp.batt_temp, UV_TO_DECIUV(bp.ocv_uv)); + soc = CAP(QG_MIN_SOC, QG_MAX_SOC, soc); + rc = put_user(soc, &bp_user->soc); + if (rc < 0) { + pr_err("BPIOCXSOC: Failed rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXSOC: lut=%s ocv=%d batt_temp=%d soc=%d\n", + battery->profile[bp.table_index].name, + bp.ocv_uv, bp.batt_temp, soc); + } + break; + case BPIOCXOCV: + if (bp.table_index != TABLE_SOC_OCV1 && + bp.table_index != TABLE_SOC_OCV2) { + pr_err("Invalid table index %d for SOC-OCV lookup\n", + bp.table_index); + rc = -EINVAL; + } else { + ocv_uv = interpolate_var( + &battery->profile[bp.table_index], + bp.batt_temp, bp.soc); + ocv_uv = DECIUV_TO_UV(ocv_uv); + ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv); + rc = put_user(ocv_uv, &bp_user->ocv_uv); + if (rc < 0) { + pr_err("BPIOCXOCV: Failed rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXOCV: lut=%s ocv=%d batt_temp=%d soc=%d\n", + battery->profile[bp.table_index].name, + ocv_uv, bp.batt_temp, bp.soc); + } + break; + case BPIOCXFCC: + if (bp.table_index != TABLE_FCC1 && + bp.table_index != TABLE_FCC2) { + pr_err("Invalid table index %d for FCC lookup\n", + bp.table_index); + rc = -EINVAL; + } else { + fcc_mah = interpolate_single_row_lut( + &battery->profile[bp.table_index], + bp.batt_temp, DEGC_SCALE); + fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah); + rc = put_user(fcc_mah, &bp_user->fcc_mah); + if (rc) { + pr_err("BPIOCXFCC: Failed rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXFCC: lut=%s batt_temp=%d fcc_mah=%d\n", + battery->profile[bp.table_index].name, + bp.batt_temp, fcc_mah); + } + break; + case BPIOCXVAR: + if (bp.table_index < TABLE_Z1 || bp.table_index >= TABLE_MAX) { + pr_err("Invalid table index %d for VAR lookup\n", + bp.table_index); + rc = -EINVAL; + } else { + var = interpolate_var(&battery->profile[bp.table_index], + bp.batt_temp, bp.soc); + var = CAP(QG_MIN_VAR, QG_MAX_VAR, var); + rc = put_user(var, &bp_user->var); + if (rc < 0) { + pr_err("BPIOCXVAR: Failed rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXVAR: lut=%s var=%d batt_temp=%d soc=%d\n", + battery->profile[bp.table_index].name, + var, bp.batt_temp, bp.soc); + } + break; + case BPIOCXSLOPE: + if (bp.table_index != TABLE_SOC_OCV1 && + bp.table_index != TABLE_SOC_OCV2) { + pr_err("Invalid table index %d for Slope lookup\n", + bp.table_index); + rc = -EINVAL; + } else { + slope = interpolate_slope( + &battery->profile[bp.table_index], + bp.batt_temp, bp.soc); + slope = CAP(QG_MIN_SLOPE, QG_MAX_SLOPE, slope); + rc = put_user(slope, &bp_user->slope); + if (rc) { + pr_err("BPIOCXSLOPE: Failed rc=%d\n", rc); + goto ret_err; + } + pr_debug("BPIOCXSLOPE: lut=%s soc=%d batt_temp=%d slope=%d\n", + battery->profile[bp.table_index].name, + bp.soc, bp.batt_temp, slope); + } + break; + default: + pr_err("IOCTL %d not supported\n", cmd); + rc = -EINVAL; + } +ret_err: + return rc; +} + +static int qg_battery_data_release(struct inode *inode, struct file *file) +{ + pr_debug("battery_data device closed\n"); + + return 0; +} + +static const struct file_operations qg_battery_data_fops = { + .owner = THIS_MODULE, + .open = qg_battery_data_open, + .unlocked_ioctl = qg_battery_data_ioctl, + .compat_ioctl = qg_battery_data_ioctl, + .release = qg_battery_data_release, +}; + +static int get_length(struct device_node *node, + int *length, char *prop_name, bool ignore_null) +{ + struct property *prop; + + prop = of_find_property(node, prop_name, NULL); + if (!prop) { + if (ignore_null) { + *length = 1; + return 0; + } + pr_err("Failed to find %s property\n", prop_name); + return -ENODATA; + } else if (!prop->value) { + pr_err("Failed to find value for %s property\n", prop_name); + return -ENODATA; + } + + *length = prop->length / sizeof(u32); + + return 0; +} + +static int qg_parse_battery_profile(struct qg_battery_data *battery) +{ + int i, j, k, rows = 0, cols = 0, lut_length = 0, rc = 0; + struct device_node *node; + struct property *prop; + const __be32 *data; + + for (i = 0; i < TABLE_MAX; i++) { + node = of_find_node_by_name(battery->profile_node, + table[i].table_name); + if (!node) { + pr_err("%s table not found\n", table[i].table_name); + rc = -ENODEV; + goto cleanup; + } + + rc = get_length(node, &cols, "qcom,lut-col-legend", false); + if (rc < 0) { + pr_err("Failed to get col-length for %s table rc=%d\n", + table[i].table_name, rc); + goto cleanup; + } + + rc = get_length(node, &rows, "qcom,lut-row-legend", true); + if (rc < 0) { + pr_err("Failed to get row-length for %s table rc=%d\n", + table[i].table_name, rc); + goto cleanup; + } + + rc = get_length(node, &lut_length, "qcom,lut-data", false); + if (rc < 0) { + pr_err("Failed to get lut-length for %s table rc=%d\n", + table[i].table_name, rc); + goto cleanup; + } + + if (lut_length != cols * rows) { + pr_err("Invalid lut-length for %s table\n", + table[i].table_name); + rc = -EINVAL; + goto cleanup; + } + + battery->profile[i].name = kzalloc(strlen(table[i].table_name) + + 1, GFP_KERNEL); + if (!battery->profile[i].name) { + rc = -ENOMEM; + goto cleanup; + } + + strlcpy(battery->profile[i].name, table[i].table_name, + strlen(table[i].table_name)); + battery->profile[i].rows = rows; + battery->profile[i].cols = cols; + + if (rows != 1) { + battery->profile[i].row_entries = kcalloc(rows, + sizeof(*battery->profile[i].row_entries), + GFP_KERNEL); + if (!battery->profile[i].row_entries) { + rc = -ENOMEM; + goto cleanup; + } + } + + battery->profile[i].col_entries = kcalloc(cols, + sizeof(*battery->profile[i].col_entries), + GFP_KERNEL); + if (!battery->profile[i].col_entries) { + rc = -ENOMEM; + goto cleanup; + } + + battery->profile[i].data = kcalloc(rows, + sizeof(*battery->profile[i].data), GFP_KERNEL); + if (!battery->profile[i].data) { + rc = -ENOMEM; + goto cleanup; + } + + for (j = 0; j < rows; j++) { + battery->profile[i].data[j] = kcalloc(cols, + sizeof(**battery->profile[i].data), + GFP_KERNEL); + if (!battery->profile[i].data[j]) { + rc = -ENOMEM; + goto cleanup; + } + } + + /* read profile data */ + rc = of_property_read_u32_array(node, "qcom,lut-col-legend", + battery->profile[i].col_entries, cols); + if (rc < 0) { + pr_err("Failed to read cols values for table %s rc=%d\n", + table[i].table_name, rc); + goto cleanup; + } + + if (rows != 1) { + rc = of_property_read_u32_array(node, + "qcom,lut-row-legend", + battery->profile[i].row_entries, rows); + if (rc < 0) { + pr_err("Failed to read row values for table %s rc=%d\n", + table[i].table_name, rc); + goto cleanup; + } + } + + prop = of_find_property(node, "qcom,lut-data", NULL); + if (!prop) { + pr_err("Failed to find lut-data\n"); + rc = -EINVAL; + goto cleanup; + } + data = prop->value; + for (j = 0; j < rows; j++) { + for (k = 0; k < cols; k++) + battery->profile[i].data[j][k] = + be32_to_cpup(data++); + } + + pr_debug("Profile table %s parsed rows=%d cols=%d\n", + battery->profile[i].name, battery->profile[i].rows, + battery->profile[i].cols); + } + + return 0; + +cleanup: + for (; i >= 0; i++) { + kfree(battery->profile[i].name); + kfree(battery->profile[i].row_entries); + kfree(battery->profile[i].col_entries); + for (j = 0; j < battery->profile[i].rows; j++) { + if (battery->profile[i].data) + kfree(battery->profile[i].data[j]); + } + kfree(battery->profile[i].data); + } + return rc; +} + +int lookup_soc_ocv(u32 *soc, u32 ocv_uv, int batt_temp, bool charging) +{ + u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2; + + if (!the_battery || !the_battery->profile_node) { + pr_err("Battery profile not loaded\n"); + return -ENODEV; + } + + *soc = interpolate_soc(&the_battery->profile[table_index], + batt_temp, UV_TO_DECIUV(ocv_uv)); + + *soc = CAP(0, 100, DIV_ROUND_CLOSEST(*soc, 100)); + + return 0; +} + +int qg_get_nominal_capacity(u32 *nom_cap_uah, int batt_temp, bool charging) +{ + u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2; + u32 fcc_mah; + + if (!the_battery || !the_battery->profile_node) { + pr_err("Battery profile not loaded\n"); + return -ENODEV; + } + + fcc_mah = interpolate_single_row_lut( + &the_battery->profile[table_index], + batt_temp, DEGC_SCALE); + fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah); + + *nom_cap_uah = fcc_mah * 1000; + + return 0; +} + +int qg_batterydata_init(struct device_node *profile_node) +{ + int rc = 0; + struct qg_battery_data *battery; + + battery = kzalloc(sizeof(*battery), GFP_KERNEL); + if (!battery) + return -ENOMEM; + + battery->profile_node = profile_node; + + /* char device to access battery-profile data */ + rc = alloc_chrdev_region(&battery->dev_no, 0, 1, "qg_battery"); + if (rc < 0) { + pr_err("Failed to allocate chrdev rc=%d\n", rc); + goto free_battery; + } + + cdev_init(&battery->battery_cdev, &qg_battery_data_fops); + rc = cdev_add(&battery->battery_cdev, battery->dev_no, 1); + if (rc) { + pr_err("Failed to add battery_cdev rc=%d\n", rc); + goto unregister_chrdev; + } + + battery->battery_class = class_create(THIS_MODULE, "qg_battery"); + if (IS_ERR_OR_NULL(battery->battery_class)) { + pr_err("Failed to create qg-battery class\n"); + rc = -ENODEV; + goto delete_cdev; + } + + battery->battery_device = device_create(battery->battery_class, + NULL, battery->dev_no, + NULL, "qg_battery"); + if (IS_ERR_OR_NULL(battery->battery_device)) { + pr_err("Failed to create battery_device device\n"); + rc = -ENODEV; + goto delete_cdev; + } + + /* parse the battery profile */ + rc = qg_parse_battery_profile(battery); + if (rc < 0) { + pr_err("Failed to parse battery profile rc=%d\n", rc); + goto destroy_device; + } + + the_battery = battery; + + pr_info("QG Battery-profile loaded, '/dev/qg_battery' created!\n"); + + return 0; + +destroy_device: + device_destroy(battery->battery_class, battery->dev_no); +delete_cdev: + cdev_del(&battery->battery_cdev); +unregister_chrdev: + unregister_chrdev_region(battery->dev_no, 1); +free_battery: + kfree(battery); + return rc; +} + +void qg_batterydata_exit(void) +{ + int i, j; + + if (the_battery) { + /* unregister the device node */ + device_destroy(the_battery->battery_class, the_battery->dev_no); + cdev_del(&the_battery->battery_cdev); + unregister_chrdev_region(the_battery->dev_no, 1); + + /* delete all the battery profile memory */ + for (i = 0; i < TABLE_MAX; i++) { + kfree(the_battery->profile[i].name); + kfree(the_battery->profile[i].row_entries); + kfree(the_battery->profile[i].col_entries); + for (j = 0; j < the_battery->profile[i].rows; j++) { + if (the_battery->profile[i].data) + kfree(the_battery->profile[i].data[j]); + } + kfree(the_battery->profile[i].data); + } + } + + kfree(the_battery); + the_battery = NULL; +} diff --git a/drivers/power/supply/qcom/qg-battery-profile.h b/drivers/power/supply/qcom/qg-battery-profile.h new file mode 100644 index 0000000000000000000000000000000000000000..1b0627776ff0ba244d2bd5fdf20c55e9221f00a7 --- /dev/null +++ b/drivers/power/supply/qcom/qg-battery-profile.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QG_BATTERY_PROFILE_H__ +#define __QG_BATTERY_PROFILE_H__ + +int qg_batterydata_init(struct device_node *node); +void qg_batterydata_exit(void); +int lookup_soc_ocv(u32 *soc, u32 ocv_uv, int batt_temp, bool charging); +int qg_get_nominal_capacity(u32 *nom_cap_uah, int batt_temp, bool charging); + +#endif /* __QG_BATTERY_PROFILE_H__ */ diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h new file mode 100644 index 0000000000000000000000000000000000000000..1591aa15a79ab552ec9d52afafbedc475e1eab00 --- /dev/null +++ b/drivers/power/supply/qcom/qg-core.h @@ -0,0 +1,171 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QG_CORE_H__ +#define __QG_CORE_H__ + +#include +#include "fg-alg.h" + +struct qg_batt_props { + const char *batt_type_str; + int float_volt_uv; + int vbatt_full_mv; + int fastchg_curr_ma; + int qg_profile_version; +}; + +struct qg_irq_info { + const char *name; + const irq_handler_t handler; + const bool wake; + int irq; +}; + +struct qg_dt { + int vbatt_empty_mv; + int vbatt_empty_cold_mv; + int vbatt_low_mv; + int vbatt_low_cold_mv; + int vbatt_cutoff_mv; + int iterm_ma; + int s2_fifo_length; + int s2_vbat_low_fifo_length; + int s2_acc_length; + int s2_acc_intvl_ms; + int ocv_timer_expiry_min; + int ocv_tol_threshold_uv; + int s3_entry_fifo_length; + int s3_entry_ibat_ua; + int s3_exit_ibat_ua; + int delta_soc; + int rbat_conn_mohm; + int ignore_shutdown_soc_secs; + int cold_temp_threshold; + bool hold_soc_while_full; + bool linearize_soc; + bool cl_disable; + bool cl_feedback_on; +}; + +struct qpnp_qg { + struct device *dev; + struct pmic_revid_data *pmic_rev_id; + struct regmap *regmap; + struct qpnp_vadc_chip *vadc_dev; + struct power_supply *qg_psy; + struct class *qg_class; + struct device *qg_device; + struct cdev qg_cdev; + dev_t dev_no; + struct work_struct udata_work; + struct work_struct scale_soc_work; + struct work_struct qg_status_change_work; + struct notifier_block nb; + struct mutex bus_lock; + struct mutex data_lock; + struct mutex soc_lock; + wait_queue_head_t qg_wait_q; + struct votable *awake_votable; + struct votable *vbatt_irq_disable_votable; + struct votable *fifo_irq_disable_votable; + struct votable *good_ocv_irq_disable_votable; + u32 qg_base; + + /* local data variables */ + u32 batt_id_ohm; + struct qg_kernel_data kdata; + struct qg_user_data udata; + struct power_supply *batt_psy; + struct power_supply *usb_psy; + struct power_supply *parallel_psy; + + /* status variable */ + u32 *debug_mask; + bool qg_device_open; + bool profile_loaded; + bool battery_missing; + bool data_ready; + bool suspend_data; + bool vbat_low; + bool charge_done; + bool parallel_enabled; + bool usb_present; + bool charge_full; + int charge_status; + int charge_type; + int next_wakeup_ms; + u32 wa_flags; + u32 seq_no; + u32 charge_counter_uah; + ktime_t last_user_update_time; + ktime_t last_fifo_update_time; + struct iio_channel *batt_therm_chan; + struct iio_channel *batt_id_chan; + + /* soc params */ + int catch_up_soc; + int maint_soc; + int msoc; + int pon_soc; + int batt_soc; + int cc_soc; + struct alarm alarm_timer; + u32 sdam_data[SDAM_MAX]; + + /* DT */ + struct qg_dt dt; + struct qg_batt_props bp; + /* capacity learning */ + struct cap_learning *cl; + /* charge counter */ + struct cycle_counter *counter; +}; + +enum ocv_type { + S7_PON_OCV, + S3_GOOD_OCV, + S3_LAST_OCV, + SDAM_PON_OCV, +}; + +enum debug_mask { + QG_DEBUG_PON = BIT(0), + QG_DEBUG_PROFILE = BIT(1), + QG_DEBUG_DEVICE = BIT(2), + QG_DEBUG_STATUS = BIT(3), + QG_DEBUG_FIFO = BIT(4), + QG_DEBUG_IRQ = BIT(5), + QG_DEBUG_SOC = BIT(6), + QG_DEBUG_PM = BIT(7), + QG_DEBUG_BUS_READ = BIT(8), + QG_DEBUG_BUS_WRITE = BIT(9), + QG_DEBUG_ALG_CL = BIT(10), +}; + +enum qg_irq { + QG_BATT_MISSING_IRQ, + QG_VBATT_LOW_IRQ, + QG_VBATT_EMPTY_IRQ, + QG_FIFO_UPDATE_DONE_IRQ, + QG_GOOD_OCV_IRQ, + QG_FSM_STAT_CHG_IRQ, + QG_EVENT_IRQ, + QG_MAX_IRQ, +}; + +enum qg_wa_flags { + QG_VBAT_LOW_WA = BIT(0), + QG_RECHARGE_SOC_WA = BIT(1), +}; + + +#endif /* __QG_CORE_H__ */ diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h new file mode 100644 index 0000000000000000000000000000000000000000..2061208ad55ccf49d8a8754a528c94ce5f58a943 --- /dev/null +++ b/drivers/power/supply/qcom/qg-defs.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QG_DEFS_H__ +#define __QG_DEFS_H__ + +#define qg_dbg(chip, reason, fmt, ...) \ + do { \ + if (*chip->debug_mask & (reason)) \ + pr_info(fmt, ##__VA_ARGS__); \ + else \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0) + +#define is_between(left, right, value) \ + (((left) >= (right) && (left) >= (value) \ + && (value) >= (right)) \ + || ((left) <= (right) && (left) <= (value) \ + && (value) <= (right))) + +#define UDATA_READY_VOTER "UDATA_READY_VOTER" +#define FIFO_DONE_VOTER "FIFO_DONE_VOTER" +#define FIFO_RT_DONE_VOTER "FIFO_RT_DONE_VOTER" +#define SUSPEND_DATA_VOTER "SUSPEND_DATA_VOTER" +#define GOOD_OCV_VOTER "GOOD_OCV_VOTER" +#define PROFILE_IRQ_DISABLE "NO_PROFILE_IRQ_DISABLE" +#define QG_INIT_STATE_IRQ_DISABLE "QG_INIT_STATE_IRQ_DISABLE" + +#define V_RAW_TO_UV(V_RAW) div_u64(194637ULL * (u64)V_RAW, 1000) +#define I_RAW_TO_UA(I_RAW) div_s64(152588LL * (s64)I_RAW, 1000) +#define FIFO_V_RESET_VAL 0x8000 +#define FIFO_I_RESET_VAL 0x8000 + +#define DEGC_SCALE 10 +#define UV_TO_DECIUV(a) (a / 100) +#define DECIUV_TO_UV(a) (a * 100) + +#define CAP(min, max, value) \ + ((min > value) ? min : ((value > max) ? max : value)) + +#define QG_SOC_FULL 10000 +#define BATT_SOC_32BIT GENMASK(31, 0) + +#endif /* __QG_DEFS_H__ */ diff --git a/drivers/power/supply/qcom/qg-profile-lib.c b/drivers/power/supply/qcom/qg-profile-lib.c new file mode 100644 index 0000000000000000000000000000000000000000..2af997ecac0e2514dc24e1a4773497957d1ab4d2 --- /dev/null +++ b/drivers/power/supply/qcom/qg-profile-lib.c @@ -0,0 +1,311 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "qg-profile-lib.h" +#include "qg-defs.h" + +static int linear_interpolate(int y0, int x0, int y1, int x1, int x) +{ + if (y0 == y1 || x == x0) + return y0; + if (x1 == x0 || x == x1) + return y1; + + return y0 + ((y1 - y0) * (x - x0) / (x1 - x0)); +} + +int interpolate_single_row_lut(struct profile_table_data *lut, + int x, int scale) +{ + int i, result; + int cols = lut->cols; + + if (x < lut->col_entries[0] * scale) { + pr_debug("x %d less than known range return y = %d lut = %s\n", + x, lut->data[0][0], lut->name); + return lut->data[0][0]; + } + + if (x > lut->col_entries[cols-1] * scale) { + pr_debug("x %d more than known range return y = %d lut = %s\n", + x, lut->data[0][cols-1], lut->name); + return lut->data[0][cols-1]; + } + + for (i = 0; i < cols; i++) { + if (x <= lut->col_entries[i] * scale) + break; + } + + if (x == lut->col_entries[i] * scale) { + result = lut->data[0][i]; + } else { + result = linear_interpolate( + lut->data[0][i-1], + lut->col_entries[i-1] * scale, + lut->data[0][i], + lut->col_entries[i] * scale, + x); + } + + return result; +} + +int interpolate_soc(struct profile_table_data *lut, + int batt_temp, int ocv) +{ + int i, j, soc_high, soc_low, soc; + int rows = lut->rows; + int cols = lut->cols; + + if (batt_temp < lut->col_entries[0] * DEGC_SCALE) { + pr_debug("batt_temp %d < known temp range\n", batt_temp); + batt_temp = lut->col_entries[0] * DEGC_SCALE; + } + + if (batt_temp > lut->col_entries[cols - 1] * DEGC_SCALE) { + pr_debug("batt_temp %d > known temp range\n", batt_temp); + batt_temp = lut->col_entries[cols - 1] * DEGC_SCALE; + } + + for (j = 0; j < cols; j++) + if (batt_temp <= lut->col_entries[j] * DEGC_SCALE) + break; + + if (batt_temp == lut->col_entries[j] * DEGC_SCALE) { + /* found an exact match for temp in the table */ + if (ocv >= lut->data[0][j]) + return lut->row_entries[0]; + if (ocv <= lut->data[rows - 1][j]) + return lut->row_entries[rows - 1]; + for (i = 0; i < rows; i++) { + if (ocv >= lut->data[i][j]) { + if (ocv == lut->data[i][j]) + return lut->row_entries[i]; + soc = linear_interpolate( + lut->row_entries[i], + lut->data[i][j], + lut->row_entries[i - 1], + lut->data[i - 1][j], + ocv); + return soc; + } + } + } + + /* batt_temp is within temperature for column j-1 and j */ + if (ocv >= lut->data[0][j]) + return lut->row_entries[0]; + if (ocv <= lut->data[rows - 1][j - 1]) + return lut->row_entries[rows - 1]; + + soc_low = soc_high = 0; + for (i = 0; i < rows-1; i++) { + if (soc_high == 0 && is_between(lut->data[i][j], + lut->data[i+1][j], ocv)) { + soc_high = linear_interpolate( + lut->row_entries[i], + lut->data[i][j], + lut->row_entries[i + 1], + lut->data[i+1][j], + ocv); + } + + if (soc_low == 0 && is_between(lut->data[i][j-1], + lut->data[i+1][j-1], ocv)) { + soc_low = linear_interpolate( + lut->row_entries[i], + lut->data[i][j-1], + lut->row_entries[i + 1], + lut->data[i+1][j-1], + ocv); + } + + if (soc_high && soc_low) { + soc = linear_interpolate( + soc_low, + lut->col_entries[j-1] * DEGC_SCALE, + soc_high, + lut->col_entries[j] * DEGC_SCALE, + batt_temp); + return soc; + } + } + + if (soc_high) + return soc_high; + + if (soc_low) + return soc_low; + + pr_debug("%d ocv wasn't found for temp %d in the LUT %s returning 100%%\n", + ocv, batt_temp, lut->name); + return 10000; +} + +int interpolate_var(struct profile_table_data *lut, + int batt_temp, int soc) +{ + int i, var1, var2, var, rows, cols; + int row1 = 0; + int row2 = 0; + + rows = lut->rows; + cols = lut->cols; + if (soc > lut->row_entries[0]) { + pr_debug("soc %d greater than known soc ranges for %s lut\n", + soc, lut->name); + row1 = 0; + row2 = 0; + } else if (soc < lut->row_entries[rows - 1]) { + pr_debug("soc %d less than known soc ranges for %s lut\n", + soc, lut->name); + row1 = rows - 1; + row2 = rows - 1; + } else { + for (i = 0; i < rows; i++) { + if (soc == lut->row_entries[i]) { + row1 = i; + row2 = i; + break; + } + if (soc > lut->row_entries[i]) { + row1 = i - 1; + row2 = i; + break; + } + } + } + + if (batt_temp < lut->col_entries[0] * DEGC_SCALE) + batt_temp = lut->col_entries[0] * DEGC_SCALE; + if (batt_temp > lut->col_entries[cols - 1] * DEGC_SCALE) + batt_temp = lut->col_entries[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) + if (batt_temp <= lut->col_entries[i] * DEGC_SCALE) + break; + + if (batt_temp == lut->col_entries[i] * DEGC_SCALE) { + var = linear_interpolate( + lut->data[row1][i], + lut->row_entries[row1], + lut->data[row2][i], + lut->row_entries[row2], + soc); + return var; + } + + var1 = linear_interpolate( + lut->data[row1][i - 1], + lut->col_entries[i - 1] * DEGC_SCALE, + lut->data[row1][i], + lut->col_entries[i] * DEGC_SCALE, + batt_temp); + + var2 = linear_interpolate( + lut->data[row2][i - 1], + lut->col_entries[i - 1] * DEGC_SCALE, + lut->data[row2][i], + lut->col_entries[i] * DEGC_SCALE, + batt_temp); + + var = linear_interpolate( + var1, + lut->row_entries[row1], + var2, + lut->row_entries[row2], + soc); + + return var; +} + +int interpolate_slope(struct profile_table_data *lut, + int batt_temp, int soc) +{ + int i, ocvrow1, ocvrow2, rows, cols; + int row1 = 0; + int row2 = 0; + int slope; + + rows = lut->rows; + cols = lut->cols; + if (soc >= lut->row_entries[0]) { + pr_debug("soc %d >= max soc range - use the slope at soc=%d for lut %s\n", + soc, lut->row_entries[0], lut->name); + row1 = 0; + row2 = 1; + } else if (soc <= lut->row_entries[rows - 1]) { + pr_debug("soc %d is <= min soc range - use the slope at soc=%d for lut %s\n", + soc, lut->row_entries[rows - 1], lut->name); + row1 = rows - 2; + row2 = rows - 1; + } else { + for (i = 0; i < rows; i++) { + if (soc >= lut->row_entries[i]) { + row1 = i - 1; + row2 = i; + break; + } + } + } + + if (batt_temp < lut->col_entries[0] * DEGC_SCALE) + batt_temp = lut->col_entries[0] * DEGC_SCALE; + if (batt_temp > lut->col_entries[cols - 1] * DEGC_SCALE) + batt_temp = lut->col_entries[cols - 1] * DEGC_SCALE; + + for (i = 0; i < cols; i++) { + if (batt_temp <= lut->col_entries[i] * DEGC_SCALE) + break; + } + + if (batt_temp == lut->col_entries[i] * DEGC_SCALE) { + slope = (lut->data[row1][i] - lut->data[row2][i]); + if (slope <= 0) { + pr_warn_ratelimited("Slope=%d for soc=%d, using 1\n", + slope, soc); + slope = 1; + } + slope *= 10000; + slope /= (lut->row_entries[row1] - + lut->row_entries[row2]); + return slope; + } + ocvrow1 = linear_interpolate( + lut->data[row1][i - 1], + lut->col_entries[i - 1] * DEGC_SCALE, + lut->data[row1][i], + lut->col_entries[i] * DEGC_SCALE, + batt_temp); + + ocvrow2 = linear_interpolate( + lut->data[row2][i - 1], + lut->col_entries[i - 1] * DEGC_SCALE, + lut->data[row2][i], + lut->col_entries[i] * DEGC_SCALE, + batt_temp); + + slope = (ocvrow1 - ocvrow2); + if (slope <= 0) { + pr_warn_ratelimited("Slope=%d for soc=%d, using 1\n", + slope, soc); + slope = 1; + } + slope *= 10000; + slope /= (lut->row_entries[row1] - lut->row_entries[row2]); + + return slope; +} diff --git a/drivers/power/supply/qcom/qg-profile-lib.h b/drivers/power/supply/qcom/qg-profile-lib.h new file mode 100644 index 0000000000000000000000000000000000000000..eb7263dd7395b85791d95c04056d621f8c0d2c54 --- /dev/null +++ b/drivers/power/supply/qcom/qg-profile-lib.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QG_PROFILE_LIB_H__ +#define __QG_PROFILE_LIB_H__ + +struct profile_table_data { + char *name; + int rows; + int cols; + int *row_entries; + int *col_entries; + int **data; +}; + +int interpolate_single_row_lut(struct profile_table_data *lut, + int x, int scale); +int interpolate_soc(struct profile_table_data *lut, + int batt_temp, int ocv); +int interpolate_var(struct profile_table_data *lut, + int batt_temp, int soc); +int interpolate_slope(struct profile_table_data *lut, + int batt_temp, int soc); + +#endif /*__QG_PROFILE_LIB_H__ */ diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h new file mode 100644 index 0000000000000000000000000000000000000000..66f9be11a7df9dd1063e312f725eb2cf1050106c --- /dev/null +++ b/drivers/power/supply/qcom/qg-reg.h @@ -0,0 +1,94 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QG_REG_H__ +#define __QG_REG_H__ + +#define PERPH_TYPE_REG 0x04 +#define QG_TYPE 0x0D + +#define QG_STATUS1_REG 0x08 +#define BATTERY_PRESENT_BIT BIT(0) + +#define QG_STATUS2_REG 0x09 +#define GOOD_OCV_BIT BIT(1) + +#define QG_STATUS3_REG 0x0A +#define COUNT_FIFO_RT_MASK GENMASK(3, 0) + +#define QG_INT_RT_STS_REG 0x10 +#define FIFO_UPDATE_DONE_RT_STS_BIT BIT(3) +#define VBAT_LOW_INT_RT_STS_BIT BIT(1) + +#define QG_INT_LATCHED_STS_REG 0x18 +#define FIFO_UPDATE_DONE_INT_LAT_STS_BIT BIT(3) + +#define QG_DATA_CTL1_REG 0x41 +#define MASTER_HOLD_OR_CLR_BIT BIT(0) + +#define QG_MODE_CTL1_REG 0x43 +#define PARALLEL_IBAT_SENSE_EN_BIT BIT(7) + +#define QG_VBAT_EMPTY_THRESHOLD_REG 0x4B +#define QG_VBAT_LOW_THRESHOLD_REG 0x4C + +#define QG_S2_NORMAL_MEAS_CTL2_REG 0x51 +#define FIFO_LENGTH_MASK GENMASK(5, 3) +#define FIFO_LENGTH_SHIFT 3 +#define NUM_OF_ACCUM_MASK GENMASK(2, 0) + +#define QG_S2_NORMAL_MEAS_CTL3_REG 0x52 + +#define QG_S3_SLEEP_OCV_MEAS_CTL4_REG 0x59 +#define S3_SLEEP_OCV_TIMER_MASK GENMASK(2, 0) + +#define QG_S3_SLEEP_OCV_TREND_CTL2_REG 0x5C +#define TREND_TOL_MASK GENMASK(5, 0) + +#define QG_S3_SLEEP_OCV_IBAT_CTL1_REG 0x5D +#define SLEEP_IBAT_QUALIFIED_LENGTH_MASK GENMASK(2, 0) + +#define QG_S3_ENTRY_IBAT_THRESHOLD_REG 0x5E +#define QG_S3_EXIT_IBAT_THRESHOLD_REG 0x5F + +#define QG_S7_PON_OCV_V_DATA0_REG 0x70 +#define QG_S7_PON_OCV_I_DATA0_REG 0x72 +#define QG_S3_GOOD_OCV_V_DATA0_REG 0x74 +#define QG_S3_GOOD_OCV_I_DATA0_REG 0x76 + +#define QG_V_ACCUM_DATA0_RT_REG 0x88 +#define QG_I_ACCUM_DATA0_RT_REG 0x8B +#define QG_ACCUM_CNT_RT_REG 0x8E + +#define QG_V_FIFO0_DATA0_REG 0x90 +#define QG_I_FIFO0_DATA0_REG 0xA0 + +#define QG_SOC_MONOTONIC_REG 0xBF + +#define QG_LAST_ADC_V_DATA0_REG 0xC0 +#define QG_LAST_ADC_I_DATA0_REG 0xC2 + +#define QG_LAST_S3_SLEEP_V_DATA0_REG 0xCC + +/* SDAM offsets */ +#define QG_SDAM_VALID_OFFSET 0x46 +#define QG_SDAM_SOC_OFFSET 0x47 +#define QG_SDAM_TEMP_OFFSET 0x48 +#define QG_SDAM_RBAT_OFFSET 0x4A +#define QG_SDAM_OCV_OFFSET 0x4C +#define QG_SDAM_IBAT_OFFSET 0x50 +#define QG_SDAM_TIME_OFFSET 0x54 +#define QG_SDAM_CYCLE_COUNT_OFFSET 0x58 +#define QG_SDAM_LEARNED_CAPACITY_OFFSET 0x68 +#define QG_SDAM_PON_OCV_OFFSET 0x7C + +#endif diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c new file mode 100644 index 0000000000000000000000000000000000000000..7bc4afac1b447d600df17254fec6ce34f4d45f3e --- /dev/null +++ b/drivers/power/supply/qcom/qg-sdam.c @@ -0,0 +1,271 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__ + +#include +#include +#include +#include +#include "qg-sdam.h" +#include "qg-reg.h" + +static struct qg_sdam *the_chip; + +struct qg_sdam_info { + char *name; + u32 offset; + u32 length; +}; + +static struct qg_sdam_info sdam_info[] = { + [SDAM_VALID] = { + .name = "VALID", + .offset = QG_SDAM_VALID_OFFSET, + .length = 1, + }, + [SDAM_SOC] = { + .name = "SOC", + .offset = QG_SDAM_SOC_OFFSET, + .length = 1, + }, + [SDAM_TEMP] = { + .name = "BATT_TEMP", + .offset = QG_SDAM_TEMP_OFFSET, + .length = 2, + }, + [SDAM_RBAT_MOHM] = { + .name = "RBAT_MOHM", + .offset = QG_SDAM_RBAT_OFFSET, + .length = 2, + }, + [SDAM_OCV_UV] = { + .name = "OCV_UV", + .offset = QG_SDAM_OCV_OFFSET, + .length = 4, + }, + [SDAM_IBAT_UA] = { + .name = "IBAT_UA", + .offset = QG_SDAM_IBAT_OFFSET, + .length = 4, + }, + [SDAM_TIME_SEC] = { + .name = "TIME_SEC", + .offset = QG_SDAM_TIME_OFFSET, + .length = 4, + }, + [SDAM_PON_OCV_UV] = { + .name = "SDAM_PON_OCV", + .offset = QG_SDAM_PON_OCV_OFFSET, + .length = 2, + }, +}; + +int qg_sdam_write(u8 param, u32 data) +{ + int rc; + struct qg_sdam *chip = the_chip; + u32 offset; + size_t length; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + if (param >= SDAM_MAX) { + pr_err("Invalid SDAM param %d\n", param); + return -EINVAL; + } + + offset = chip->sdam_base + sdam_info[param].offset; + length = sdam_info[param].length; + rc = regmap_bulk_write(chip->regmap, offset, (u8 *)&data, length); + if (rc < 0) + pr_err("Failed to write offset=%0x4x param=%d value=%d\n", + offset, param, data); + else + pr_debug("QG SDAM write param=%s value=%d\n", + sdam_info[param].name, data); + + return rc; +} + +int qg_sdam_read(u8 param, u32 *data) +{ + int rc; + struct qg_sdam *chip = the_chip; + u32 offset; + size_t length; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + if (param >= SDAM_MAX) { + pr_err("Invalid SDAM param %d\n", param); + return -EINVAL; + } + + offset = chip->sdam_base + sdam_info[param].offset; + length = sdam_info[param].length; + rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, length); + if (rc < 0) + pr_err("Failed to read offset=%0x4x param=%d\n", + offset, param); + else + pr_debug("QG SDAM read param=%s value=%d\n", + sdam_info[param].name, *data); + + return rc; +} + +int qg_sdam_multibyte_write(u32 offset, u8 *data, u32 length) +{ + int rc, i; + struct qg_sdam *chip = the_chip; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + offset = chip->sdam_base + offset; + rc = regmap_bulk_write(chip->regmap, offset, data, (size_t)length); + if (rc < 0) { + pr_err("Failed to write offset=%0x4x value=%d\n", + offset, *data); + } else { + for (i = 0; i < length; i++) + pr_debug("QG SDAM write offset=%0x4x value=%d\n", + offset++, data[i]); + } + + return rc; +} + +int qg_sdam_multibyte_read(u32 offset, u8 *data, u32 length) +{ + int rc, i; + struct qg_sdam *chip = the_chip; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + offset = chip->sdam_base + offset; + rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, (size_t)length); + if (rc < 0) { + pr_err("Failed to read offset=%0x4x\n", offset); + } else { + for (i = 0; i < length; i++) + pr_debug("QG SDAM read offset=%0x4x value=%d\n", + offset++, data[i]); + } + + return rc; +} + +int qg_sdam_read_all(u32 *sdam_data) +{ + int i, rc; + struct qg_sdam *chip = the_chip; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + for (i = 0; i < SDAM_MAX; i++) { + rc = qg_sdam_read(i, &sdam_data[i]); + if (rc < 0) { + pr_err("Failed to read SDAM param=%s rc=%d\n", + sdam_info[i].name, rc); + return rc; + } + } + + return 0; +} + +int qg_sdam_write_all(u32 *sdam_data) +{ + int i, rc; + struct qg_sdam *chip = the_chip; + + if (!chip) { + pr_err("Invalid sdam-chip pointer\n"); + return -EINVAL; + } + + for (i = 0; i < SDAM_MAX; i++) { + rc = qg_sdam_write(i, sdam_data[i]); + if (rc < 0) { + pr_err("Failed to write SDAM param=%s rc=%d\n", + sdam_info[i].name, rc); + return rc; + } + } + + return 0; +} + +int qg_sdam_init(struct device *dev) +{ + int rc; + u32 base = 0, type = 0; + struct qg_sdam *chip; + struct device_node *child, *node = dev->of_node; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return 0; + + chip->regmap = dev_get_regmap(dev->parent, NULL); + if (!chip->regmap) { + pr_err("Parent regmap is unavailable\n"); + return -ENXIO; + } + + /* get the SDAM base address */ + for_each_available_child_of_node(node, child) { + rc = of_property_read_u32(child, "reg", &base); + if (rc < 0) { + pr_err("Failed to read base address rc=%d\n", rc); + return rc; + } + + rc = regmap_read(chip->regmap, base + PERPH_TYPE_REG, &type); + if (rc < 0) { + pr_err("Failed to read type rc=%d\n", rc); + return rc; + } + + switch (type) { + case SDAM_TYPE: + chip->sdam_base = base; + break; + default: + break; + } + } + if (!chip->sdam_base) { + pr_err("QG SDAM node not defined\n"); + return -EINVAL; + } + + the_chip = chip; + + return 0; +} diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h new file mode 100644 index 0000000000000000000000000000000000000000..10e684f8ec403d70e23c1c75caa8541af438e24d --- /dev/null +++ b/drivers/power/supply/qcom/qg-sdam.h @@ -0,0 +1,43 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QG_SDAM_H__ +#define __QG_SDAM_H__ + +#define SDAM_TYPE 0x2E + +enum qg_sdam_param { + SDAM_VALID, + SDAM_SOC, + SDAM_TEMP, + SDAM_RBAT_MOHM, + SDAM_OCV_UV, + SDAM_IBAT_UA, + SDAM_TIME_SEC, + SDAM_PON_OCV_UV, + SDAM_MAX, +}; + +struct qg_sdam { + struct regmap *regmap; + u16 sdam_base; +}; + +int qg_sdam_init(struct device *dev); +int qg_sdam_write(u8 param, u32 data); +int qg_sdam_read(u8 param, u32 *data); +int qg_sdam_write_all(u32 *sdam_data); +int qg_sdam_read_all(u32 *sdam_data); +int qg_sdam_multibyte_write(u32 offset, u8 *sdam_data, u32 length); +int qg_sdam_multibyte_read(u32 offset, u8 *sdam_data, u32 length); + +#endif diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c new file mode 100644 index 0000000000000000000000000000000000000000..af8b158b7c9575f14e668b05a742a67b2dc40667 --- /dev/null +++ b/drivers/power/supply/qcom/qg-soc.c @@ -0,0 +1,284 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include "fg-alg.h" +#include "qg-sdam.h" +#include "qg-core.h" +#include "qg-reg.h" +#include "qg-util.h" +#include "qg-defs.h" + +#define DEFAULT_UPDATE_TIME_MS 64000 +#define SOC_SCALE_HYST_MS 2000 + +static int qg_delta_soc_interval_ms = 20000; +module_param_named( + soc_interval_ms, qg_delta_soc_interval_ms, int, 0600 +); + +static int qg_delta_soc_cold_interval_ms = 4000; +module_param_named( + soc_cold_interval_ms, qg_delta_soc_cold_interval_ms, int, 0600 +); + +static void get_next_update_time(struct qpnp_qg *chip) +{ + int soc_points = 0, batt_temp = 0; + int min_delta_soc_interval_ms = qg_delta_soc_interval_ms; + int rc = 0, rt_time_ms = 0, full_time_ms = DEFAULT_UPDATE_TIME_MS; + + get_fifo_done_time(chip, false, &full_time_ms); + get_fifo_done_time(chip, true, &rt_time_ms); + + full_time_ms = CAP(0, DEFAULT_UPDATE_TIME_MS, + full_time_ms - rt_time_ms); + + soc_points = abs(chip->msoc - chip->catch_up_soc); + if (chip->maint_soc > 0) + soc_points = max(abs(chip->msoc - chip->maint_soc), soc_points); + soc_points /= chip->dt.delta_soc; + + /* Lower the delta soc interval by half at cold */ + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc < 0) + pr_err("Failed to read battery temperature rc=%d\n", rc); + else if (batt_temp < chip->dt.cold_temp_threshold) + min_delta_soc_interval_ms = qg_delta_soc_cold_interval_ms; + + if (!min_delta_soc_interval_ms) + min_delta_soc_interval_ms = 1000; /* 1 second */ + + chip->next_wakeup_ms = (full_time_ms / (soc_points + 1)) + - SOC_SCALE_HYST_MS; + chip->next_wakeup_ms = max(chip->next_wakeup_ms, + min_delta_soc_interval_ms); + + qg_dbg(chip, QG_DEBUG_SOC, "fifo_full_time=%d secs fifo_real_time=%d secs soc_scale_points=%d\n", + full_time_ms / 1000, rt_time_ms / 1000, soc_points); +} + +static bool is_scaling_required(struct qpnp_qg *chip) +{ + if (!chip->profile_loaded) + return false; + + if (chip->maint_soc > 0 && + (abs(chip->maint_soc - chip->msoc) >= chip->dt.delta_soc)) + return true; + + if ((abs(chip->catch_up_soc - chip->msoc) < chip->dt.delta_soc) && + chip->catch_up_soc != 0 && chip->catch_up_soc != 100) + return false; + + if (chip->catch_up_soc == chip->msoc) + /* SOC has not changed */ + return false; + + + if (chip->catch_up_soc > chip->msoc && !is_usb_present(chip)) + /* USB is not present and SOC has increased */ + return false; + + return true; +} + +static void update_msoc(struct qpnp_qg *chip) +{ + int rc = 0, batt_temp = 0, batt_soc_32bit = 0; + bool usb_present = is_usb_present(chip); + + if (chip->catch_up_soc > chip->msoc) { + /* SOC increased */ + if (usb_present) /* Increment if USB is present */ + chip->msoc += chip->dt.delta_soc; + } else if (chip->catch_up_soc < chip->msoc) { + /* SOC dropped */ + chip->msoc -= chip->dt.delta_soc; + } + chip->msoc = CAP(0, 100, chip->msoc); + + if (chip->maint_soc > 0 && chip->msoc < chip->maint_soc) { + chip->maint_soc -= chip->dt.delta_soc; + chip->maint_soc = CAP(0, 100, chip->maint_soc); + } + + /* maint_soc dropped below msoc, skip using it */ + if (chip->maint_soc <= chip->msoc) + chip->maint_soc = -EINVAL; + + /* update the SOC register */ + rc = qg_write_monotonic_soc(chip, chip->msoc); + if (rc < 0) + pr_err("Failed to update MSOC register rc=%d\n", rc); + + /* update SDAM with the new MSOC */ + chip->sdam_data[SDAM_SOC] = chip->msoc; + rc = qg_sdam_write(SDAM_SOC, chip->msoc); + if (rc < 0) + pr_err("Failed to update SDAM with MSOC rc=%d\n", rc); + + if (!chip->dt.cl_disable && chip->cl->active) { + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc < 0) { + pr_err("Failed to read BATT_TEMP rc=%d\n", rc); + } else { + batt_soc_32bit = div64_u64( + chip->batt_soc * BATT_SOC_32BIT, + QG_SOC_FULL); + cap_learning_update(chip->cl, batt_temp, batt_soc_32bit, + chip->charge_status, chip->charge_done, + usb_present, false); + } + } + + cycle_count_update(chip->counter, + DIV_ROUND_CLOSEST(chip->msoc * 255, 100), + chip->charge_status, chip->charge_done, + usb_present); + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale: Update maint_soc=%d msoc=%d catch_up_soc=%d delta_soc=%d\n", + chip->maint_soc, chip->msoc, + chip->catch_up_soc, chip->dt.delta_soc); +} + +static void scale_soc_stop(struct qpnp_qg *chip) +{ + chip->next_wakeup_ms = 0; + alarm_cancel(&chip->alarm_timer); + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale stopped: msoc=%d catch_up_soc=%d\n", + chip->msoc, chip->catch_up_soc); +} + +static void scale_soc_work(struct work_struct *work) +{ + struct qpnp_qg *chip = container_of(work, + struct qpnp_qg, scale_soc_work); + + mutex_lock(&chip->soc_lock); + + if (!is_scaling_required(chip)) { + scale_soc_stop(chip); + goto done; + } + + update_msoc(chip); + + if (is_scaling_required(chip)) { + alarm_start_relative(&chip->alarm_timer, + ms_to_ktime(chip->next_wakeup_ms)); + } else { + scale_soc_stop(chip); + goto done_psy; + } + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale: Work msoc=%d catch_up_soc=%d delta_soc=%d next_wakeup=%d sec\n", + chip->msoc, chip->catch_up_soc, chip->dt.delta_soc, + chip->next_wakeup_ms / 1000); + +done_psy: + power_supply_changed(chip->qg_psy); +done: + pm_relax(chip->dev); + mutex_unlock(&chip->soc_lock); +} + +static enum alarmtimer_restart + qpnp_msoc_timer(struct alarm *alarm, ktime_t now) +{ + struct qpnp_qg *chip = container_of(alarm, + struct qpnp_qg, alarm_timer); + + /* timer callback runs in atomic context, cannot use voter */ + pm_stay_awake(chip->dev); + schedule_work(&chip->scale_soc_work); + + return ALARMTIMER_NORESTART; +} + +int qg_scale_soc(struct qpnp_qg *chip, bool force_soc) +{ + int rc = 0; + + mutex_lock(&chip->soc_lock); + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale: Start msoc=%d catch_up_soc=%d delta_soc=%d\n", + chip->msoc, chip->catch_up_soc, chip->dt.delta_soc); + + if (force_soc) { + chip->msoc = chip->catch_up_soc; + rc = qg_write_monotonic_soc(chip, chip->msoc); + if (rc < 0) + pr_err("Failed to update MSOC register rc=%d\n", rc); + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale: Forced msoc=%d\n", chip->msoc); + goto done_psy; + } + + if (!is_scaling_required(chip)) { + scale_soc_stop(chip); + goto done; + } + + update_msoc(chip); + + if (is_scaling_required(chip)) { + get_next_update_time(chip); + alarm_start_relative(&chip->alarm_timer, + ms_to_ktime(chip->next_wakeup_ms)); + } else { + scale_soc_stop(chip); + goto done_psy; + } + + qg_dbg(chip, QG_DEBUG_SOC, + "SOC scale: msoc=%d catch_up_soc=%d delta_soc=%d next_wakeup=%d sec\n", + chip->msoc, chip->catch_up_soc, chip->dt.delta_soc, + chip->next_wakeup_ms / 1000); + +done_psy: + power_supply_changed(chip->qg_psy); +done: + mutex_unlock(&chip->soc_lock); + return rc; +} + +int qg_soc_init(struct qpnp_qg *chip) +{ + if (alarmtimer_get_rtcdev()) { + alarm_init(&chip->alarm_timer, ALARM_BOOTTIME, + qpnp_msoc_timer); + } else { + pr_err("Failed to get soc alarm-timer\n"); + return -EINVAL; + } + INIT_WORK(&chip->scale_soc_work, scale_soc_work); + + return 0; +} + +void qg_soc_exit(struct qpnp_qg *chip) +{ + alarm_cancel(&chip->alarm_timer); +} diff --git a/drivers/power/supply/qcom/qg-soc.h b/drivers/power/supply/qcom/qg-soc.h new file mode 100644 index 0000000000000000000000000000000000000000..3b4eb6031c1a5a14faa45d919218056aeeeda2b6 --- /dev/null +++ b/drivers/power/supply/qcom/qg-soc.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QG_SOC_H__ +#define __QG_SOC_H__ + +int qg_scale_soc(struct qpnp_qg *chip, bool force_soc); +int qg_soc_init(struct qpnp_qg *chip); +void qg_soc_exit(struct qpnp_qg *chip); + +#endif /* __QG_SOC_H__ */ diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c new file mode 100644 index 0000000000000000000000000000000000000000..9daa20479d6b92c9454f0c0b51baef6a32d6d494 --- /dev/null +++ b/drivers/power/supply/qcom/qg-util.c @@ -0,0 +1,313 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qg-sdam.h" +#include "qg-core.h" +#include "qg-reg.h" +#include "qg-defs.h" +#include "qg-util.h" + +static inline bool is_sticky_register(u32 addr) +{ + if ((addr & 0xFF) == QG_STATUS2_REG) + return true; + + return false; +} + +int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len) +{ + int rc, i; + u32 dummy = 0; + + rc = regmap_bulk_read(chip->regmap, addr, val, len); + if (rc < 0) { + pr_err("Failed regmap_read for address %04x rc=%d\n", addr, rc); + return rc; + } + + if (is_sticky_register(addr)) { + /* write to the sticky register to clear it */ + rc = regmap_write(chip->regmap, addr, dummy); + if (rc < 0) { + pr_err("Failed regmap_write for %04x rc=%d\n", + addr, rc); + return rc; + } + } + + if (*chip->debug_mask & QG_DEBUG_BUS_READ) { + pr_info("length %d addr=%04x\n", len, addr); + for (i = 0; i < len; i++) + pr_info("val[%d]: %02x\n", i, val[i]); + } + + return 0; +} + +int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len) +{ + int rc, i; + + mutex_lock(&chip->bus_lock); + + if (len > 1) + rc = regmap_bulk_write(chip->regmap, addr, val, len); + else + rc = regmap_write(chip->regmap, addr, *val); + + if (rc < 0) { + pr_err("Failed regmap_write for address %04x rc=%d\n", + addr, rc); + goto out; + } + + if (*chip->debug_mask & QG_DEBUG_BUS_WRITE) { + pr_info("length %d addr=%04x\n", len, addr); + for (i = 0; i < len; i++) + pr_info("val[%d]: %02x\n", i, val[i]); + } +out: + mutex_unlock(&chip->bus_lock); + return rc; +} + +int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val) +{ + int rc; + + mutex_lock(&chip->bus_lock); + + rc = regmap_update_bits(chip->regmap, addr, mask, val); + if (rc < 0) { + pr_err("Failed regmap_update_bits for address %04x rc=%d\n", + addr, rc); + goto out; + } + + if (*chip->debug_mask & QG_DEBUG_BUS_WRITE) + pr_info("addr=%04x mask: %02x val: %02x\n", addr, mask, val); + +out: + mutex_unlock(&chip->bus_lock); + return rc; +} + +int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt) +{ + int rc; + u8 reg = 0; + u32 addr; + + addr = rt ? QG_STATUS3_REG : QG_S2_NORMAL_MEAS_CTL2_REG; + rc = qg_read(chip, chip->qg_base + addr, ®, 1); + if (rc < 0) { + pr_err("Failed to read FIFO length rc=%d\n", rc); + return rc; + } + + if (rt) { + *fifo_length = reg & COUNT_FIFO_RT_MASK; + } else { + *fifo_length = (reg & FIFO_LENGTH_MASK) >> FIFO_LENGTH_SHIFT; + *fifo_length += 1; + } + + return rc; +} + +int get_sample_count(struct qpnp_qg *chip, u32 *sample_count) +{ + int rc; + u8 reg = 0; + + rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL2_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to read FIFO sample count rc=%d\n", rc); + return rc; + } + + *sample_count = 1 << ((reg & NUM_OF_ACCUM_MASK) + 1); + + return rc; +} + +int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval) +{ + int rc; + u8 reg = 0; + + rc = qg_read(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL3_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to read FIFO sample interval rc=%d\n", rc); + return rc; + } + + *sample_interval = reg * 10; + + return rc; +} + +int get_rtc_time(unsigned long *rtc_time) +{ + struct rtc_time tm; + struct rtc_device *rtc; + int rc; + + rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + if (rtc == NULL) { + pr_err("Failed to open rtc device (%s)\n", + CONFIG_RTC_HCTOSYS_DEVICE); + return -EINVAL; + } + + rc = rtc_read_time(rtc, &tm); + if (rc) { + pr_err("Failed to read rtc time (%s) : %d\n", + CONFIG_RTC_HCTOSYS_DEVICE, rc); + goto close_time; + } + + rc = rtc_valid_tm(&tm); + if (rc) { + pr_err("Invalid RTC time (%s): %d\n", + CONFIG_RTC_HCTOSYS_DEVICE, rc); + goto close_time; + } + rtc_tm_to_time(&tm, rtc_time); + +close_time: + rtc_class_close(rtc); + return rc; +} + +int get_fifo_done_time(struct qpnp_qg *chip, bool rt, int *time_ms) +{ + int rc, length = 0; + u32 sample_count = 0, sample_interval = 0, acc_count = 0; + + rc = get_fifo_length(chip, &length, rt ? true : false); + if (rc < 0) + return rc; + + rc = get_sample_count(chip, &sample_count); + if (rc < 0) + return rc; + + rc = get_sample_interval(chip, &sample_interval); + if (rc < 0) + return rc; + + *time_ms = length * sample_count * sample_interval; + + if (rt) { + rc = qg_read(chip, chip->qg_base + QG_ACCUM_CNT_RT_REG, + (u8 *)&acc_count, 1); + if (rc < 0) + return rc; + + *time_ms += ((sample_count - acc_count) * sample_interval); + } + + return 0; +} + +static bool is_usb_available(struct qpnp_qg *chip) +{ + if (chip->usb_psy) + return true; + + chip->usb_psy = power_supply_get_by_name("usb"); + if (!chip->usb_psy) + return false; + + return true; +} + +bool is_usb_present(struct qpnp_qg *chip) +{ + union power_supply_propval pval = {0, }; + + if (is_usb_available(chip)) + power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + + return pval.intval ? true : false; +} + +static bool is_parallel_available(struct qpnp_qg *chip) +{ + if (chip->parallel_psy) + return true; + + chip->parallel_psy = power_supply_get_by_name("parallel"); + if (!chip->parallel_psy) + return false; + + return true; +} + +bool is_parallel_enabled(struct qpnp_qg *chip) +{ + union power_supply_propval pval = {0, }; + + if (is_parallel_available(chip)) { + power_supply_get_property(chip->parallel_psy, + POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval); + } + + return pval.intval ? true : false; +} + +int qg_write_monotonic_soc(struct qpnp_qg *chip, int msoc) +{ + u8 reg = 0; + int rc; + + reg = (msoc * 255) / 100; + rc = qg_write(chip, chip->qg_base + QG_SOC_MONOTONIC_REG, + ®, 1); + if (rc < 0) + pr_err("Failed to update QG_SOC_MONOTINIC reg rc=%d\n", rc); + + return rc; +} + +int qg_get_battery_temp(struct qpnp_qg *chip, int *temp) +{ + int rc = 0; + + if (chip->battery_missing) { + *temp = 250; + return 0; + } + + rc = iio_read_channel_processed(chip->batt_therm_chan, temp); + if (rc < 0) { + pr_err("Failed reading BAT_TEMP over ADC rc=%d\n", rc); + return rc; + } + pr_debug("batt_temp = %d\n", *temp); + + return rc; +} diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h new file mode 100644 index 0000000000000000000000000000000000000000..385c9e07256257734db3fbe06d2a65af3461dcb9 --- /dev/null +++ b/drivers/power/supply/qcom/qg-util.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QG_UTIL_H__ +#define __QG_UTIL_H__ + +int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len); +int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len); +int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val); +int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt); +int get_sample_count(struct qpnp_qg *chip, u32 *sample_count); +int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval); +int get_fifo_done_time(struct qpnp_qg *chip, bool rt, int *time_ms); +int get_rtc_time(unsigned long *rtc_time); +bool is_usb_present(struct qpnp_qg *chip); +bool is_parallel_enabled(struct qpnp_qg *chip); +int qg_write_monotonic_soc(struct qpnp_qg *chip, int msoc); +int qg_get_battery_temp(struct qpnp_qg *chip, int *batt_temp); + +#endif diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index ac73180649dbda7921bffd6f77d6e3ca9f25356c..a0f5226c6f973321a9235d388a3da08f2eed73c9 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -1686,12 +1686,21 @@ static int fg_set_recharge_soc(struct fg_dev *fg, int recharge_soc) static int fg_adjust_recharge_soc(struct fg_dev *fg) { struct fg_gen3_chip *chip = container_of(fg, struct fg_gen3_chip, fg); + union power_supply_propval prop = {0, }; int rc, msoc, recharge_soc, new_recharge_soc = 0; bool recharge_soc_status; if (!chip->dt.auto_recharge_soc) return 0; + rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH, + &prop); + if (rc < 0) { + pr_err("Error in getting battery health, rc=%d\n", rc); + return rc; + } + chip->health = prop.intval; + recharge_soc = chip->dt.recharge_soc_thr; recharge_soc_status = fg->recharge_soc_adjusted; /* @@ -1722,6 +1731,9 @@ static int fg_adjust_recharge_soc(struct fg_dev *fg) if (!fg->recharge_soc_adjusted) return 0; + if (chip->health != POWER_SUPPLY_HEALTH_GOOD) + return 0; + /* Restore the default value */ new_recharge_soc = recharge_soc; fg->recharge_soc_adjusted = false; @@ -2548,9 +2560,6 @@ static void profile_load_work(struct work_struct *work) batt_psy_initialized(fg); fg_notify_charger(fg); - if (fg->profile_load_status == PROFILE_LOADED) - fg->profile_loaded = true; - fg_dbg(fg, FG_STATUS, "profile loaded successfully"); out: fg->soc_reporting_ready = true; @@ -3915,7 +3924,6 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data) if (fg->battery_missing) { fg->profile_available = false; - fg->profile_loaded = false; fg->profile_load_status = PROFILE_NOT_LOADED; fg->soc_reporting_ready = false; fg->batt_id_ohms = -EINVAL; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index 93c5ad38bcfa00bf02f83889859c69efe94d06a2..b15f5960d68ed5f4096756e6290d80c6c25fe60a 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -148,6 +148,7 @@ struct fg_gen4_chip { struct ttf ttf; struct delayed_work ttf_work; char batt_profile[PROFILE_LEN]; + int recharge_soc_thr; bool ki_coeff_dischg_en; bool slope_limit_en; }; @@ -1031,9 +1032,6 @@ static void profile_load_work(struct work_struct *work) batt_psy_initialized(fg); fg_notify_charger(fg); - if (fg->profile_load_status == PROFILE_LOADED) - fg->profile_loaded = true; - fg_dbg(fg, FG_STATUS, "profile loaded successfully"); out: fg->soc_reporting_ready = true; @@ -1046,6 +1044,7 @@ static void profile_load_work(struct work_struct *work) static void get_batt_psy_props(struct fg_dev *fg) { + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); union power_supply_propval prop = {0, }; int rc; @@ -1084,6 +1083,67 @@ static void get_batt_psy_props(struct fg_dev *fg) } fg->health = prop.intval; + + if (!chip->recharge_soc_thr) { + rc = power_supply_get_property(fg->batt_psy, + POWER_SUPPLY_PROP_RECHARGE_SOC, &prop); + if (rc < 0) { + pr_err("Error in getting recharge SOC, rc=%d\n", rc); + return; + } + + if (prop.intval < 0) + pr_debug("Recharge SOC not configured %d\n", + prop.intval); + else + chip->recharge_soc_thr = prop.intval; + } +} + +static int fg_gen4_update_maint_soc(struct fg_dev *fg) +{ + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + int rc = 0, msoc; + + if (!chip->dt.linearize_soc) + return 0; + + mutex_lock(&fg->charge_full_lock); + if (fg->delta_soc <= 0) + goto out; + + rc = fg_get_msoc(fg, &msoc); + if (rc < 0) { + pr_err("Error in getting msoc, rc=%d\n", rc); + goto out; + } + + if (msoc > fg->maint_soc) { + /* + * When the monotonic SOC goes above maintenance SOC, we should + * stop showing the maintenance SOC. + */ + fg->delta_soc = 0; + fg->maint_soc = 0; + } else if (fg->maint_soc && msoc <= fg->last_msoc) { + /* MSOC is decreasing. Decrease maintenance SOC as well */ + fg->maint_soc -= 1; + if (!(msoc % 10)) { + /* + * Reduce the maintenance SOC additionally by 1 whenever + * it crosses a SOC multiple of 10. + */ + fg->maint_soc -= 1; + fg->delta_soc -= 1; + } + } + + fg_dbg(fg, FG_STATUS, "msoc: %d last_msoc: %d maint_soc: %d delta_soc: %d\n", + msoc, fg->last_msoc, fg->maint_soc, fg->delta_soc); + fg->last_msoc = msoc; +out: + mutex_unlock(&fg->charge_full_lock); + return rc; } static int fg_gen4_configure_full_soc(struct fg_dev *fg, int bsoc) @@ -1113,6 +1173,99 @@ static int fg_gen4_configure_full_soc(struct fg_dev *fg, int bsoc) return 0; } +static int fg_gen4_set_recharge_soc(struct fg_dev *fg, int recharge_soc) +{ + union power_supply_propval prop = {0, }; + int rc; + + if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY || !fg->batt_psy) + return 0; + + prop.intval = recharge_soc; + rc = power_supply_set_property(fg->batt_psy, + POWER_SUPPLY_PROP_RECHARGE_SOC, &prop); + if (rc < 0) { + pr_err("Error in setting recharge SOC, rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int fg_gen4_adjust_recharge_soc(struct fg_gen4_chip *chip) +{ + struct fg_dev *fg = &chip->fg; + int rc, msoc, recharge_soc, new_recharge_soc = 0; + bool recharge_soc_status; + + if (!chip->recharge_soc_thr) + return 0; + + recharge_soc = chip->recharge_soc_thr; + recharge_soc_status = fg->recharge_soc_adjusted; + + /* + * If the input is present and charging had been terminated, adjust + * the recharge SOC threshold based on the monotonic SOC at which + * the charge termination had happened. + */ + if (is_input_present(fg)) { + if (fg->charge_done) { + if (!fg->recharge_soc_adjusted) { + /* Get raw monotonic SOC for calculation */ + rc = fg_get_msoc(fg, &msoc); + if (rc < 0) { + pr_err("Error in getting msoc, rc=%d\n", + rc); + return rc; + } + + /* Adjust the recharge_soc threshold */ + new_recharge_soc = msoc - (FULL_CAPACITY - + recharge_soc); + fg->recharge_soc_adjusted = true; + } else { + /* adjusted already, do nothing */ + return 0; + } + } else { + if (!fg->recharge_soc_adjusted) + return 0; + + /* + * If we are here, then it means that recharge SOC + * had been adjusted already and it could be probably + * because of early termination. We shouldn't restore + * the original recharge SOC threshold if the health is + * not good, which means battery is in JEITA zone. + */ + if (fg->health != POWER_SUPPLY_HEALTH_GOOD) + return 0; + + /* Restore the default value */ + new_recharge_soc = recharge_soc; + fg->recharge_soc_adjusted = false; + } + } else { + /* Restore the default value */ + new_recharge_soc = recharge_soc; + fg->recharge_soc_adjusted = false; + } + + if (recharge_soc_status == fg->recharge_soc_adjusted) + return 0; + + rc = fg_gen4_set_recharge_soc(fg, new_recharge_soc); + if (rc < 0) { + fg->recharge_soc_adjusted = recharge_soc_status; + pr_err("Couldn't set recharge SOC, rc=%d\n", rc); + return rc; + } + + fg_dbg(fg, FG_STATUS, "recharge soc set to %d\n", new_recharge_soc); + return 0; +} + static int fg_gen4_charge_full_update(struct fg_dev *fg) { struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); @@ -1284,7 +1437,6 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data) if (fg->battery_missing) { fg->profile_available = false; - fg->profile_loaded = false; fg->profile_load_status = PROFILE_NOT_LOADED; fg->soc_reporting_ready = false; fg->batt_id_ohms = -EINVAL; @@ -1396,6 +1548,10 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in charge_full_update, rc=%d\n", rc); + rc = fg_gen4_update_maint_soc(fg); + if (rc < 0) + pr_err("Error in updating maint_soc, rc=%d\n", rc); + rc = fg_gen4_adjust_ki_coeff_dischg(fg); if (rc < 0) pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); @@ -1676,6 +1832,10 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); + rc = fg_gen4_adjust_recharge_soc(chip); + if (rc < 0) + pr_err("Error in adjusting recharge SOC, rc=%d\n", rc); + fg_ttf_update(fg); fg->prev_charge_status = fg->charge_status; out: @@ -2941,9 +3101,9 @@ static int fg_gen4_parse_dt(struct fg_gen4_chip *chip) rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp); if (rc < 0) - chip->cl->dt.start_soc = DEFAULT_CL_START_SOC; + chip->cl->dt.max_start_soc = DEFAULT_CL_START_SOC; else - chip->cl->dt.start_soc = temp; + chip->cl->dt.max_start_soc = temp; rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp); if (rc < 0) diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c new file mode 100644 index 0000000000000000000000000000000000000000..d111259f3ee883c31a18370667b57b38b0e0c9d2 --- /dev/null +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -0,0 +1,3064 @@ +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fg-alg.h" +#include "qg-sdam.h" +#include "qg-core.h" +#include "qg-reg.h" +#include "qg-util.h" +#include "qg-soc.h" +#include "qg-battery-profile.h" +#include "qg-defs.h" + +static int qg_debug_mask; +module_param_named( + debug_mask, qg_debug_mask, int, 0600 +); + +static bool is_battery_present(struct qpnp_qg *chip) +{ + u8 reg = 0; + int rc; + + rc = qg_read(chip, chip->qg_base + QG_STATUS1_REG, ®, 1); + if (rc < 0) + pr_err("Failed to read battery presence, rc=%d\n", rc); + + return !!(reg & BATTERY_PRESENT_BIT); +} + +#define DEBUG_BATT_ID_LOW 6000 +#define DEBUG_BATT_ID_HIGH 8500 +static bool is_debug_batt_id(struct qpnp_qg *chip) +{ + if (is_between(DEBUG_BATT_ID_LOW, DEBUG_BATT_ID_HIGH, + chip->batt_id_ohm)) + return true; + + return false; +} + +static int qg_read_ocv(struct qpnp_qg *chip, u32 *ocv_uv, u32 *ocv_raw, u8 type) +{ + int rc, addr; + u64 temp = 0; + char ocv_name[20]; + + switch (type) { + case S3_GOOD_OCV: + addr = QG_S3_GOOD_OCV_V_DATA0_REG; + strlcpy(ocv_name, "S3_GOOD_OCV", 20); + break; + case S7_PON_OCV: + addr = QG_S7_PON_OCV_V_DATA0_REG; + strlcpy(ocv_name, "S7_PON_OCV", 20); + break; + case S3_LAST_OCV: + addr = QG_LAST_S3_SLEEP_V_DATA0_REG; + strlcpy(ocv_name, "S3_LAST_OCV", 20); + break; + case SDAM_PON_OCV: + addr = QG_SDAM_PON_OCV_OFFSET; + strlcpy(ocv_name, "SDAM_PON_OCV", 20); + break; + default: + pr_err("Invalid OCV type %d\n", type); + return -EINVAL; + } + + if (type == SDAM_PON_OCV) { + rc = qg_sdam_read(SDAM_PON_OCV_UV, ocv_raw); + if (rc < 0) { + pr_err("Failed to read SDAM PON OCV rc=%d\n", rc); + return rc; + } + } else { + rc = qg_read(chip, chip->qg_base + addr, (u8 *)ocv_raw, 2); + if (rc < 0) { + pr_err("Failed to read ocv, rc=%d\n", rc); + return rc; + } + } + + temp = *ocv_raw; + *ocv_uv = V_RAW_TO_UV(temp); + + pr_debug("%s: OCV_RAW=%x OCV=%duV\n", ocv_name, *ocv_raw, *ocv_uv); + + return rc; +} + +#define DEFAULT_S3_FIFO_LENGTH 3 +static int qg_update_fifo_length(struct qpnp_qg *chip, u8 length) +{ + int rc; + u8 s3_entry_fifo_length = 0; + + if (!length || length > 8) { + pr_err("Invalid FIFO length %d\n", length); + return -EINVAL; + } + + rc = qg_masked_write(chip, chip->qg_base + QG_S2_NORMAL_MEAS_CTL2_REG, + FIFO_LENGTH_MASK, (length - 1) << FIFO_LENGTH_SHIFT); + if (rc < 0) + pr_err("Failed to write S2 FIFO length, rc=%d\n", rc); + + /* update the S3 FIFO length, when S2 length is updated */ + if (length > 3) + s3_entry_fifo_length = (chip->dt.s3_entry_fifo_length > 0) ? + chip->dt.s3_entry_fifo_length : DEFAULT_S3_FIFO_LENGTH; + else /* Use S3 length as 1 for any S2 length <= 3 */ + s3_entry_fifo_length = 1; + + rc = qg_masked_write(chip, + chip->qg_base + QG_S3_SLEEP_OCV_IBAT_CTL1_REG, + SLEEP_IBAT_QUALIFIED_LENGTH_MASK, + s3_entry_fifo_length - 1); + if (rc < 0) + pr_err("Failed to write S3-entry fifo-length, rc=%d\n", + rc); + + return rc; +} + +static int qg_master_hold(struct qpnp_qg *chip, bool hold) +{ + int rc; + + /* clear the master */ + rc = qg_masked_write(chip, chip->qg_base + QG_DATA_CTL1_REG, + MASTER_HOLD_OR_CLR_BIT, 0); + if (rc < 0) + return rc; + + if (hold) { + /* 0 -> 1, hold the master */ + rc = qg_masked_write(chip, chip->qg_base + QG_DATA_CTL1_REG, + MASTER_HOLD_OR_CLR_BIT, + MASTER_HOLD_OR_CLR_BIT); + if (rc < 0) + return rc; + } + + qg_dbg(chip, QG_DEBUG_STATUS, "Master hold = %d\n", hold); + + return rc; +} + +static void qg_notify_charger(struct qpnp_qg *chip) +{ + union power_supply_propval prop = {0, }; + int rc; + + if (!chip->batt_psy) + return; + + if (is_debug_batt_id(chip)) { + prop.intval = 1; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_DEBUG_BATTERY, &prop); + return; + } + + if (!chip->profile_loaded) + return; + + prop.intval = chip->bp.float_volt_uv; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop); + if (rc < 0) { + pr_err("Failed to set voltage_max property on batt_psy, rc=%d\n", + rc); + return; + } + + prop.intval = chip->bp.fastchg_curr_ma * 1000; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop); + if (rc < 0) { + pr_err("Failed to set constant_charge_current_max property on batt_psy, rc=%d\n", + rc); + return; + } + + pr_debug("Notified charger on float voltage and FCC\n"); +} + +static bool is_batt_available(struct qpnp_qg *chip) +{ + if (chip->batt_psy) + return true; + + chip->batt_psy = power_supply_get_by_name("battery"); + if (!chip->batt_psy) + return false; + + /* batt_psy is initialized, set the fcc and fv */ + qg_notify_charger(chip); + + return true; +} + +static int qg_update_sdam_params(struct qpnp_qg *chip) +{ + int rc, batt_temp = 0, i; + unsigned long rtc_sec = 0; + + rc = get_rtc_time(&rtc_sec); + if (rc < 0) + pr_err("Failed to get RTC time, rc=%d\n", rc); + else + chip->sdam_data[SDAM_TIME_SEC] = rtc_sec; + + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc < 0) + pr_err("Failed to get battery-temp, rc = %d\n", rc); + else + chip->sdam_data[SDAM_TEMP] = (u32)batt_temp; + + rc = qg_sdam_write_all(chip->sdam_data); + if (rc < 0) + pr_err("Failed to write to SDAM rc=%d\n", rc); + + for (i = 0; i < SDAM_MAX; i++) + qg_dbg(chip, QG_DEBUG_STATUS, "SDAM write param %d value=%d\n", + i, chip->sdam_data[i]); + + return rc; +} + +static int qg_process_fifo(struct qpnp_qg *chip, u32 fifo_length) +{ + int rc = 0, i, j = 0, temp; + u8 v_fifo[MAX_FIFO_LENGTH * 2], i_fifo[MAX_FIFO_LENGTH * 2]; + u32 sample_interval = 0, sample_count = 0, fifo_v = 0, fifo_i = 0; + unsigned long rtc_sec = 0; + + rc = get_rtc_time(&rtc_sec); + if (rc < 0) + pr_err("Failed to get RTC time, rc=%d\n", rc); + + chip->kdata.fifo_time = (u32)rtc_sec; + + if (!fifo_length) { + pr_debug("No FIFO data\n"); + return 0; + } + + qg_dbg(chip, QG_DEBUG_FIFO, "FIFO length=%d\n", fifo_length); + + rc = get_sample_interval(chip, &sample_interval); + if (rc < 0) { + pr_err("Failed to get FIFO sample interval, rc=%d\n", rc); + return rc; + } + + rc = get_sample_count(chip, &sample_count); + if (rc < 0) { + pr_err("Failed to get FIFO sample count, rc=%d\n", rc); + return rc; + } + + /* + * If there is pending data from suspend, append the new FIFO + * data to it. + */ + if (chip->suspend_data) { + j = chip->kdata.fifo_length; /* append the data */ + chip->suspend_data = false; + qg_dbg(chip, QG_DEBUG_FIFO, + "Pending suspend-data FIFO length=%d\n", j); + } else { + /* clear any old pending data */ + chip->kdata.fifo_length = 0; + } + + for (i = 0; i < fifo_length * 2; i = i + 2, j++) { + rc = qg_read(chip, chip->qg_base + QG_V_FIFO0_DATA0_REG + i, + &v_fifo[i], 2); + if (rc < 0) { + pr_err("Failed to read QG_V_FIFO, rc=%d\n", rc); + return rc; + } + rc = qg_read(chip, chip->qg_base + QG_I_FIFO0_DATA0_REG + i, + &i_fifo[i], 2); + if (rc < 0) { + pr_err("Failed to read QG_I_FIFO, rc=%d\n", rc); + return rc; + } + + fifo_v = v_fifo[i] | (v_fifo[i + 1] << 8); + fifo_i = i_fifo[i] | (i_fifo[i + 1] << 8); + + if (fifo_v == FIFO_V_RESET_VAL || fifo_i == FIFO_I_RESET_VAL) { + pr_err("Invalid FIFO data V_RAW=%x I_RAW=%x - FIFO rejected\n", + fifo_v, fifo_i); + return -EINVAL; + } + + temp = sign_extend32(fifo_i, 15); + + chip->kdata.fifo[j].v = V_RAW_TO_UV(fifo_v); + chip->kdata.fifo[j].i = I_RAW_TO_UA(temp); + chip->kdata.fifo[j].interval = sample_interval; + chip->kdata.fifo[j].count = sample_count; + + qg_dbg(chip, QG_DEBUG_FIFO, "FIFO %d raw_v=%d uV=%d raw_i=%d uA=%d interval=%d count=%d\n", + j, fifo_v, + chip->kdata.fifo[j].v, + fifo_i, + (int)chip->kdata.fifo[j].i, + chip->kdata.fifo[j].interval, + chip->kdata.fifo[j].count); + } + + chip->kdata.fifo_length += fifo_length; + chip->kdata.seq_no = chip->seq_no++ % U32_MAX; + + return rc; +} + +static int qg_process_accumulator(struct qpnp_qg *chip) +{ + int rc, sample_interval = 0; + u8 count, index = chip->kdata.fifo_length; + u64 acc_v = 0, acc_i = 0; + s64 temp = 0; + + rc = qg_read(chip, chip->qg_base + QG_ACCUM_CNT_RT_REG, + &count, 1); + if (rc < 0) { + pr_err("Failed to read ACC count, rc=%d\n", rc); + return rc; + } + + if (!count) { + pr_debug("No ACCUMULATOR data!\n"); + return 0; + } + + rc = get_sample_interval(chip, &sample_interval); + if (rc < 0) { + pr_err("Failed to get ACC sample interval, rc=%d\n", rc); + return 0; + } + + rc = qg_read(chip, chip->qg_base + QG_V_ACCUM_DATA0_RT_REG, + (u8 *)&acc_v, 3); + if (rc < 0) { + pr_err("Failed to read ACC RT V data, rc=%d\n", rc); + return rc; + } + + rc = qg_read(chip, chip->qg_base + QG_I_ACCUM_DATA0_RT_REG, + (u8 *)&acc_i, 3); + if (rc < 0) { + pr_err("Failed to read ACC RT I data, rc=%d\n", rc); + return rc; + } + + temp = sign_extend64(acc_i, 23); + + chip->kdata.fifo[index].v = V_RAW_TO_UV(div_u64(acc_v, count)); + chip->kdata.fifo[index].i = I_RAW_TO_UA(div_s64(temp, count)); + chip->kdata.fifo[index].interval = sample_interval; + chip->kdata.fifo[index].count = count; + chip->kdata.fifo_length++; + + if (chip->kdata.fifo_length == 1) /* Only accumulator data */ + chip->kdata.seq_no = chip->seq_no++ % U32_MAX; + + qg_dbg(chip, QG_DEBUG_FIFO, "ACC v_avg=%duV i_avg=%duA interval=%d count=%d\n", + chip->kdata.fifo[index].v, + (int)chip->kdata.fifo[index].i, + chip->kdata.fifo[index].interval, + chip->kdata.fifo[index].count); + + return rc; +} + +static int qg_process_rt_fifo(struct qpnp_qg *chip) +{ + int rc; + u32 fifo_length = 0; + + /* Get the real-time FIFO length */ + rc = get_fifo_length(chip, &fifo_length, true); + if (rc < 0) { + pr_err("Failed to read RT FIFO length, rc=%d\n", rc); + return rc; + } + + rc = qg_process_fifo(chip, fifo_length); + if (rc < 0) { + pr_err("Failed to process FIFO data, rc=%d\n", rc); + return rc; + } + + rc = qg_process_accumulator(chip); + if (rc < 0) { + pr_err("Failed to process ACC data, rc=%d\n", rc); + return rc; + } + + return rc; +} + +#define VBAT_LOW_HYST_UV 50000 /* 50mV */ +static int qg_vbat_low_wa(struct qpnp_qg *chip) +{ + int rc, i, temp = 0; + u32 vbat_low_uv = 0; + + rc = qg_get_battery_temp(chip, &temp); + if (rc < 0) { + pr_err("Failed to read batt_temp rc=%d\n", rc); + temp = 250; + } + + vbat_low_uv = 1000 * ((temp < chip->dt.cold_temp_threshold) ? + chip->dt.vbatt_low_cold_mv : + chip->dt.vbatt_low_mv); + vbat_low_uv += VBAT_LOW_HYST_UV; + + if (!(chip->wa_flags & QG_VBAT_LOW_WA) || !chip->vbat_low) + return 0; + + /* + * PMI632 1.0 does not generate a falling VBAT_LOW IRQ. + * To exit from VBAT_LOW config, check if any of the FIFO + * averages is > vbat_low threshold and reconfigure the + * FIFO length to normal. + */ + for (i = 0; i < chip->kdata.fifo_length; i++) { + if (chip->kdata.fifo[i].v > vbat_low_uv) { + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + goto done; + } + rc = qg_update_fifo_length(chip, + chip->dt.s2_fifo_length); + if (rc < 0) + goto done; + + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + goto done; + } + /* FIFOs restarted */ + chip->last_fifo_update_time = ktime_get(); + + chip->vbat_low = false; + pr_info("Exit VBAT_LOW vbat_avg=%duV vbat_low=%duV updated fifo_length=%d\n", + chip->kdata.fifo[i].v, vbat_low_uv, + chip->dt.s2_fifo_length); + break; + } + } + + return 0; + +done: + qg_master_hold(chip, false); + return rc; +} + +static int qg_vbat_thresholds_config(struct qpnp_qg *chip) +{ + int rc, temp = 0, vbat_mv; + u8 reg; + + rc = qg_get_battery_temp(chip, &temp); + if (rc < 0) { + pr_err("Failed to read batt_temp rc=%d\n", rc); + return rc; + } + + vbat_mv = (temp < chip->dt.cold_temp_threshold) ? + chip->dt.vbatt_empty_cold_mv : + chip->dt.vbatt_empty_mv; + + rc = qg_read(chip, chip->qg_base + QG_VBAT_EMPTY_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to read vbat-empty, rc=%d\n", rc); + return rc; + } + + if (vbat_mv == (reg * 50)) /* No change */ + goto config_vbat_low; + + reg = vbat_mv / 50; + rc = qg_write(chip, chip->qg_base + QG_VBAT_EMPTY_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to write vbat-empty, rc=%d\n", rc); + return rc; + } + + qg_dbg(chip, QG_DEBUG_STATUS, + "VBAT EMPTY threshold updated to %dmV temp=%d\n", + vbat_mv, temp); + +config_vbat_low: + vbat_mv = (temp < chip->dt.cold_temp_threshold) ? + chip->dt.vbatt_low_cold_mv : + chip->dt.vbatt_low_mv; + + rc = qg_read(chip, chip->qg_base + QG_VBAT_LOW_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to read vbat-low, rc=%d\n", rc); + return rc; + } + + if (vbat_mv == (reg * 50)) /* No change */ + return 0; + + reg = vbat_mv / 50; + rc = qg_write(chip, chip->qg_base + QG_VBAT_LOW_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to write vbat-low, rc=%d\n", rc); + return rc; + } + + qg_dbg(chip, QG_DEBUG_STATUS, + "VBAT LOW threshold updated to %dmV temp=%d\n", + vbat_mv, temp); + + return rc; +} + +#define MIN_FIFO_FULL_TIME_MS 12000 +static int process_rt_fifo_data(struct qpnp_qg *chip, + bool vbat_low, bool update_smb) +{ + int rc = 0; + ktime_t now = ktime_get(); + s64 time_delta; + + /* + * Reject the FIFO read event if there are back-to-back requests + * This is done to gaurantee that there is always a minimum FIFO + * data to be processed, ignore this if vbat_low is set. + */ + time_delta = ktime_ms_delta(now, chip->last_user_update_time); + + qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n", + time_delta, vbat_low); + + if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) { + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + goto done; + } + + rc = qg_process_rt_fifo(chip); + if (rc < 0) { + pr_err("Failed to process FIFO real-time, rc=%d\n", rc); + goto done; + } + + if (vbat_low) { + /* change FIFO length */ + rc = qg_update_fifo_length(chip, + chip->dt.s2_vbat_low_fifo_length); + if (rc < 0) + goto done; + + qg_dbg(chip, QG_DEBUG_STATUS, + "FIFO length updated to %d vbat_low=%d\n", + chip->dt.s2_vbat_low_fifo_length, + vbat_low); + } + + if (update_smb) { + rc = qg_masked_write(chip, chip->qg_base + + QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT, + chip->parallel_enabled ? + PARALLEL_IBAT_SENSE_EN_BIT : 0); + if (rc < 0) { + pr_err("Failed to update SMB_EN, rc=%d\n", rc); + goto done; + } + qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n", + chip->parallel_enabled); + } + + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + goto done; + } + /* FIFOs restarted */ + chip->last_fifo_update_time = ktime_get(); + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + chip->last_user_update_time = now; + + /* vote to stay awake until userspace reads data */ + vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0); + } else { + qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n", + time_delta); + } +done: + qg_master_hold(chip, false); + return rc; +} + +static void process_udata_work(struct work_struct *work) +{ + struct qpnp_qg *chip = container_of(work, + struct qpnp_qg, udata_work); + int rc; + + if (chip->udata.param[QG_CC_SOC].valid) + chip->cc_soc = chip->udata.param[QG_CC_SOC].data; + + if (chip->udata.param[QG_BATT_SOC].valid) + chip->batt_soc = chip->udata.param[QG_BATT_SOC].data; + + if (chip->udata.param[QG_SOC].valid) { + qg_dbg(chip, QG_DEBUG_SOC, "udata SOC=%d last SOC=%d\n", + chip->udata.param[QG_SOC].data, chip->catch_up_soc); + + chip->catch_up_soc = chip->udata.param[QG_SOC].data; + qg_scale_soc(chip, false); + + /* update parameters to SDAM */ + chip->sdam_data[SDAM_SOC] = chip->msoc; + chip->sdam_data[SDAM_OCV_UV] = + chip->udata.param[QG_OCV_UV].data; + chip->sdam_data[SDAM_RBAT_MOHM] = + chip->udata.param[QG_RBAT_MOHM].data; + chip->sdam_data[SDAM_VALID] = 1; + + rc = qg_update_sdam_params(chip); + if (rc < 0) + pr_err("Failed to update SDAM params, rc=%d\n", rc); + } + + if (chip->udata.param[QG_CHARGE_COUNTER].valid) + chip->charge_counter_uah = + chip->udata.param[QG_CHARGE_COUNTER].data; + + vote(chip->awake_votable, UDATA_READY_VOTER, false, 0); +} + +static irqreturn_t qg_default_irq_handler(int irq, void *data) +{ + struct qpnp_qg *chip = data; + + qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + + return IRQ_HANDLED; +} + +#define MAX_FIFO_DELTA_PERCENT 10 +static irqreturn_t qg_fifo_update_done_handler(int irq, void *data) +{ + ktime_t now = ktime_get(); + int rc, hw_delta_ms = 0, margin_ms = 0; + u32 fifo_length = 0; + s64 time_delta_ms = 0; + struct qpnp_qg *chip = data; + + time_delta_ms = ktime_ms_delta(now, chip->last_fifo_update_time); + chip->last_fifo_update_time = now; + + qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + mutex_lock(&chip->data_lock); + + rc = get_fifo_length(chip, &fifo_length, false); + if (rc < 0) { + pr_err("Failed to get FIFO length, rc=%d\n", rc); + goto done; + } + + rc = qg_process_fifo(chip, fifo_length); + if (rc < 0) { + pr_err("Failed to process QG FIFO, rc=%d\n", rc); + goto done; + } + + rc = qg_vbat_thresholds_config(chip); + if (rc < 0) + pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc); + + rc = qg_vbat_low_wa(chip); + if (rc < 0) { + pr_err("Failed to apply VBAT LOW WA, rc=%d\n", rc); + goto done; + } + + rc = get_fifo_done_time(chip, false, &hw_delta_ms); + if (rc < 0) + hw_delta_ms = 0; + else + margin_ms = (hw_delta_ms * MAX_FIFO_DELTA_PERCENT) / 100; + + if (abs(hw_delta_ms - time_delta_ms) < margin_ms) { + chip->kdata.param[QG_FIFO_TIME_DELTA].data = time_delta_ms; + chip->kdata.param[QG_FIFO_TIME_DELTA].valid = true; + qg_dbg(chip, QG_DEBUG_FIFO, "FIFO_done time_delta_ms=%lld\n", + time_delta_ms); + } + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + + /* vote to stay awake until userspace reads data */ + vote(chip->awake_votable, FIFO_DONE_VOTER, true, 0); + +done: + mutex_unlock(&chip->data_lock); + return IRQ_HANDLED; +} + +static irqreturn_t qg_vbat_low_handler(int irq, void *data) +{ + int rc; + struct qpnp_qg *chip = data; + u8 status = 0; + + qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + mutex_lock(&chip->data_lock); + + rc = qg_read(chip, chip->qg_base + QG_INT_RT_STS_REG, &status, 1); + if (rc < 0) { + pr_err("Failed to read RT status, rc=%d\n", rc); + goto done; + } + chip->vbat_low = !!(status & VBAT_LOW_INT_RT_STS_BIT); + + rc = process_rt_fifo_data(chip, chip->vbat_low, false); + if (rc < 0) + pr_err("Failed to process RT FIFO data, rc=%d\n", rc); + + qg_dbg(chip, QG_DEBUG_IRQ, "VBAT_LOW = %d\n", chip->vbat_low); +done: + mutex_unlock(&chip->data_lock); + return IRQ_HANDLED; +} + +static irqreturn_t qg_vbat_empty_handler(int irq, void *data) +{ + struct qpnp_qg *chip = data; + u32 ocv_uv = 0; + + qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + pr_warn("VBATT EMPTY SOC = 0\n"); + + chip->catch_up_soc = 0; + qg_scale_soc(chip, true); + + qg_sdam_read(SDAM_OCV_UV, &ocv_uv); + chip->sdam_data[SDAM_SOC] = 0; + chip->sdam_data[SDAM_OCV_UV] = ocv_uv; + chip->sdam_data[SDAM_VALID] = 1; + + qg_update_sdam_params(chip); + + if (chip->qg_psy) + power_supply_changed(chip->qg_psy); + + return IRQ_HANDLED; +} + +static irqreturn_t qg_good_ocv_handler(int irq, void *data) +{ + int rc; + u8 status = 0; + u32 ocv_uv = 0, ocv_raw = 0; + struct qpnp_qg *chip = data; + + qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + + mutex_lock(&chip->data_lock); + + rc = qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1); + if (rc < 0) { + pr_err("Failed to read status2 register rc=%d\n", rc); + goto done; + } + + if (!(status & GOOD_OCV_BIT)) + goto done; + + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_GOOD_OCV); + if (rc < 0) { + pr_err("Failed to read good_ocv, rc=%d\n", rc); + goto done; + } + + chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv; + chip->kdata.param[QG_GOOD_OCV_UV].valid = true; + + vote(chip->awake_votable, GOOD_OCV_VOTER, true, 0); + + /* signal the readd thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); +done: + mutex_unlock(&chip->data_lock); + return IRQ_HANDLED; +} + +static struct qg_irq_info qg_irqs[] = { + [QG_BATT_MISSING_IRQ] = { + .name = "qg-batt-missing", + .handler = qg_default_irq_handler, + }, + [QG_VBATT_LOW_IRQ] = { + .name = "qg-vbat-low", + .handler = qg_vbat_low_handler, + .wake = true, + }, + [QG_VBATT_EMPTY_IRQ] = { + .name = "qg-vbat-empty", + .handler = qg_vbat_empty_handler, + .wake = true, + }, + [QG_FIFO_UPDATE_DONE_IRQ] = { + .name = "qg-fifo-done", + .handler = qg_fifo_update_done_handler, + .wake = true, + }, + [QG_GOOD_OCV_IRQ] = { + .name = "qg-good-ocv", + .handler = qg_good_ocv_handler, + .wake = true, + }, + [QG_FSM_STAT_CHG_IRQ] = { + .name = "qg-fsm-state-chg", + .handler = qg_default_irq_handler, + }, + [QG_EVENT_IRQ] = { + .name = "qg-event", + .handler = qg_default_irq_handler, + }, +}; + +static int qg_awake_cb(struct votable *votable, void *data, int awake, + const char *client) +{ + struct qpnp_qg *chip = data; + + /* ignore if the QG device is not open */ + if (!chip->qg_device_open) + return 0; + + if (awake) + pm_stay_awake(chip->dev); + else + pm_relax(chip->dev); + + pr_debug("client: %s awake: %d\n", client, awake); + return 0; +} + +static int qg_fifo_irq_disable_cb(struct votable *votable, void *data, + int disable, const char *client) +{ + if (disable) { + if (qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].wake) + disable_irq_wake( + qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq); + if (qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq) + disable_irq_nosync( + qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq); + } else { + if (qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq) + enable_irq(qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq); + if (qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].wake) + enable_irq_wake( + qg_irqs[QG_FIFO_UPDATE_DONE_IRQ].irq); + } + + return 0; +} + +static int qg_vbatt_irq_disable_cb(struct votable *votable, void *data, + int disable, const char *client) +{ + if (disable) { + if (qg_irqs[QG_VBATT_LOW_IRQ].wake) + disable_irq_wake(qg_irqs[QG_VBATT_LOW_IRQ].irq); + if (qg_irqs[QG_VBATT_EMPTY_IRQ].wake) + disable_irq_wake(qg_irqs[QG_VBATT_EMPTY_IRQ].irq); + if (qg_irqs[QG_VBATT_LOW_IRQ].irq) + disable_irq_nosync(qg_irqs[QG_VBATT_LOW_IRQ].irq); + if (qg_irqs[QG_VBATT_EMPTY_IRQ].irq) + disable_irq_nosync(qg_irqs[QG_VBATT_EMPTY_IRQ].irq); + } else { + if (qg_irqs[QG_VBATT_LOW_IRQ].irq) + enable_irq(qg_irqs[QG_VBATT_LOW_IRQ].irq); + if (qg_irqs[QG_VBATT_EMPTY_IRQ].irq) + enable_irq(qg_irqs[QG_VBATT_EMPTY_IRQ].irq); + if (qg_irqs[QG_VBATT_LOW_IRQ].wake) + enable_irq_wake(qg_irqs[QG_VBATT_LOW_IRQ].irq); + if (qg_irqs[QG_VBATT_EMPTY_IRQ].wake) + enable_irq_wake(qg_irqs[QG_VBATT_EMPTY_IRQ].irq); + } + + return 0; +} + +static int qg_good_ocv_irq_disable_cb(struct votable *votable, void *data, + int disable, const char *client) +{ + if (disable) { + if (qg_irqs[QG_GOOD_OCV_IRQ].wake) + disable_irq_wake(qg_irqs[QG_GOOD_OCV_IRQ].irq); + if (qg_irqs[QG_GOOD_OCV_IRQ].irq) + disable_irq_nosync(qg_irqs[QG_GOOD_OCV_IRQ].irq); + } else { + if (qg_irqs[QG_GOOD_OCV_IRQ].irq) + enable_irq(qg_irqs[QG_GOOD_OCV_IRQ].irq); + if (qg_irqs[QG_GOOD_OCV_IRQ].wake) + enable_irq_wake(qg_irqs[QG_GOOD_OCV_IRQ].irq); + } + + return 0; +} + +/* ALG callback functions below */ + +static int qg_get_learned_capacity(void *data, int64_t *learned_cap_uah) +{ + struct qpnp_qg *chip = data; + int16_t cc_mah; + int rc; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !chip->profile_loaded) + return -EPERM; + + rc = qg_sdam_multibyte_read(QG_SDAM_LEARNED_CAPACITY_OFFSET, + (u8 *)&cc_mah, 2); + if (rc < 0) { + pr_err("Error in reading learned_capacity, rc=%d\n", rc); + return rc; + } + *learned_cap_uah = cc_mah * 1000; + + qg_dbg(chip, QG_DEBUG_ALG_CL, "Retrieved learned capacity %llduah\n", + *learned_cap_uah); + return 0; +} + +static int qg_store_learned_capacity(void *data, int64_t learned_cap_uah) +{ + struct qpnp_qg *chip = data; + int16_t cc_mah; + int rc; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !learned_cap_uah) + return -EPERM; + + cc_mah = div64_s64(learned_cap_uah, 1000); + rc = qg_sdam_multibyte_write(QG_SDAM_LEARNED_CAPACITY_OFFSET, + (u8 *)&cc_mah, 2); + if (rc < 0) { + pr_err("Error in writing learned_capacity, rc=%d\n", rc); + return rc; + } + + qg_dbg(chip, QG_DEBUG_ALG_CL, "Stored learned capacity %llduah\n", + learned_cap_uah); + return 0; +} + +static int qg_get_cc_soc(void *data, int *cc_soc) +{ + struct qpnp_qg *chip = data; + + if (!chip) + return -ENODEV; + + if (chip->cc_soc == INT_MIN) + return -EINVAL; + + *cc_soc = chip->cc_soc; + + return 0; +} + +static int qg_restore_cycle_count(void *data, u16 *buf, int length) +{ + struct qpnp_qg *chip = data; + int id, rc = 0; + u8 tmp[2]; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !chip->profile_loaded) + return -EPERM; + + if (!buf || length > BUCKET_COUNT) + return -EINVAL; + + for (id = 0; id < length; id++) { + rc = qg_sdam_multibyte_read( + QG_SDAM_CYCLE_COUNT_OFFSET + (id * 2), + (u8 *)tmp, 2); + if (rc < 0) { + pr_err("failed to read bucket %d rc=%d\n", id, rc); + return rc; + } + *buf++ = tmp[0] | tmp[1] << 8; + } + + return rc; +} + +static int qg_store_cycle_count(void *data, u16 *buf, int id, int length) +{ + struct qpnp_qg *chip = data; + int rc = 0; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !chip->profile_loaded) + return -EPERM; + + if (!buf || length > BUCKET_COUNT * 2 || id < 0 || + id > BUCKET_COUNT - 1 || + (((id * 2) + length) > BUCKET_COUNT * 2)) + return -EINVAL; + + rc = qg_sdam_multibyte_write( + QG_SDAM_CYCLE_COUNT_OFFSET + (id * 2), + (u8 *)buf, length); + if (rc < 0) + pr_err("failed to write bucket %d rc=%d\n", id, rc); + + return rc; +} + +#define DEFAULT_BATT_TYPE "Unknown Battery" +#define MISSING_BATT_TYPE "Missing Battery" +#define DEBUG_BATT_TYPE "Debug Board" +static const char *qg_get_battery_type(struct qpnp_qg *chip) +{ + if (chip->battery_missing) + return MISSING_BATT_TYPE; + + if (is_debug_batt_id(chip)) + return DEBUG_BATT_TYPE; + + if (chip->bp.batt_type_str) { + if (chip->profile_loaded) + return chip->bp.batt_type_str; + } + + return DEFAULT_BATT_TYPE; +} + +static int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua) +{ + int rc = 0, last_ibat = 0; + u32 fifo_length = 0; + + if (chip->battery_missing) { + *ibat_ua = 0; + return 0; + } + + if (chip->parallel_enabled) { + /* read the last real-time FIFO */ + rc = get_fifo_length(chip, &fifo_length, true); + if (rc < 0) { + pr_err("Failed to read RT FIFO length, rc=%d\n", rc); + return rc; + } + fifo_length = (fifo_length == 0) ? 0 : fifo_length - 1; + fifo_length *= 2; + rc = qg_read(chip, chip->qg_base + QG_I_FIFO0_DATA0_REG + + fifo_length, (u8 *)&last_ibat, 2); + if (rc < 0) { + pr_err("Failed to read FIFO_I_%d reg, rc=%d\n", + fifo_length / 2, rc); + return rc; + } + } else { + rc = qg_read(chip, chip->qg_base + QG_LAST_ADC_I_DATA0_REG, + (u8 *)&last_ibat, 2); + if (rc < 0) { + pr_err("Failed to read LAST_ADV_I reg, rc=%d\n", rc); + return rc; + } + } + + last_ibat = sign_extend32(last_ibat, 15); + *ibat_ua = I_RAW_TO_UA(last_ibat); + + return rc; +} + +static int qg_get_battery_voltage(struct qpnp_qg *chip, int *vbat_uv) +{ + int rc = 0; + u64 last_vbat = 0; + + if (chip->battery_missing) { + *vbat_uv = 3700000; + return 0; + } + + rc = qg_read(chip, chip->qg_base + QG_LAST_ADC_V_DATA0_REG, + (u8 *)&last_vbat, 2); + if (rc < 0) { + pr_err("Failed to read LAST_ADV_V reg, rc=%d\n", rc); + return rc; + } + + *vbat_uv = V_RAW_TO_UV(last_vbat); + + return rc; +} + +#define DEBUG_BATT_SOC 67 +#define BATT_MISSING_SOC 50 +#define EMPTY_SOC 0 +#define FULL_SOC 100 +static int qg_get_battery_capacity(struct qpnp_qg *chip, int *soc) +{ + if (is_debug_batt_id(chip)) { + *soc = DEBUG_BATT_SOC; + return 0; + } + + if (chip->battery_missing || !chip->profile_loaded) { + *soc = BATT_MISSING_SOC; + return 0; + } + + if (chip->charge_full) { + *soc = FULL_SOC; + return 0; + } + + mutex_lock(&chip->soc_lock); + + if (chip->dt.linearize_soc && chip->maint_soc > 0) + *soc = chip->maint_soc; + else + *soc = chip->msoc; + + mutex_unlock(&chip->soc_lock); + + return 0; +} + +static int qg_psy_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *pval) +{ + struct qpnp_qg *chip = power_supply_get_drvdata(psy); + int rc = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + if (chip->dt.cl_disable) { + pr_warn("Capacity learning disabled!\n"); + return 0; + } + if (chip->cl->active) { + pr_warn("Capacity learning active!\n"); + return 0; + } + if (pval->intval <= 0 || pval->intval > chip->cl->nom_cap_uah) { + pr_err("charge_full is out of bounds\n"); + return -EINVAL; + } + mutex_lock(&chip->cl->lock); + rc = qg_store_learned_capacity(chip, pval->intval); + if (!rc) + chip->cl->learned_cap_uah = pval->intval; + mutex_unlock(&chip->cl->lock); + break; + default: + break; + } + return 0; +} + +static int qg_psy_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *pval) +{ + struct qpnp_qg *chip = power_supply_get_drvdata(psy); + int rc = 0; + int64_t temp = 0; + + pval->intval = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY: + rc = qg_get_battery_capacity(chip, &pval->intval); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + rc = qg_get_battery_voltage(chip, &pval->intval); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + rc = qg_get_battery_current(chip, &pval->intval); + break; + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + rc = qg_sdam_read(SDAM_OCV_UV, &pval->intval); + break; + case POWER_SUPPLY_PROP_TEMP: + rc = qg_get_battery_temp(chip, &pval->intval); + break; + case POWER_SUPPLY_PROP_RESISTANCE_ID: + pval->intval = chip->batt_id_ohm; + break; + case POWER_SUPPLY_PROP_DEBUG_BATTERY: + pval->intval = is_debug_batt_id(chip); + break; + case POWER_SUPPLY_PROP_RESISTANCE: + rc = qg_sdam_read(SDAM_RBAT_MOHM, &pval->intval); + if (!rc) + pval->intval *= 1000; + break; + case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE: + pval->intval = chip->dt.rbat_conn_mohm; + break; + case POWER_SUPPLY_PROP_BATTERY_TYPE: + pval->strval = qg_get_battery_type(chip); + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + pval->intval = chip->dt.vbatt_cutoff_mv * 1000; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + pval->intval = chip->bp.float_volt_uv; + break; + case POWER_SUPPLY_PROP_BATT_FULL_CURRENT: + pval->intval = chip->dt.iterm_ma * 1000; + break; + case POWER_SUPPLY_PROP_BATT_PROFILE_VERSION: + pval->intval = chip->bp.qg_profile_version; + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + pval->intval = chip->charge_counter_uah; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + if (!chip->dt.cl_disable && chip->dt.cl_feedback_on) + rc = qg_get_learned_capacity(chip, &temp); + else + rc = qg_get_nominal_capacity((int *)&temp, 250, true); + if (!rc) + pval->intval = (int)temp; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + rc = qg_get_nominal_capacity((int *)&temp, 250, true); + if (!rc) + pval->intval = (int)temp; + break; + case POWER_SUPPLY_PROP_CYCLE_COUNTS: + rc = get_cycle_counts(chip->counter, &pval->strval); + if (rc < 0) + pval->strval = NULL; + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + rc = get_cycle_count(chip->counter, &pval->intval); + break; + default: + pr_debug("Unsupported property %d\n", psp); + break; + } + + return rc; +} + +static int qg_property_is_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_FULL: + return 1; + default: + break; + } + return 0; +} + +static enum power_supply_property qg_psy_props[] = { + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_RESISTANCE, + POWER_SUPPLY_PROP_RESISTANCE_ID, + POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE, + POWER_SUPPLY_PROP_DEBUG_BATTERY, + POWER_SUPPLY_PROP_BATTERY_TYPE, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_BATT_FULL_CURRENT, + POWER_SUPPLY_PROP_BATT_PROFILE_VERSION, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_CYCLE_COUNTS, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, +}; + +static const struct power_supply_desc qg_psy_desc = { + .name = "bms", + .type = POWER_SUPPLY_TYPE_BMS, + .properties = qg_psy_props, + .num_properties = ARRAY_SIZE(qg_psy_props), + .get_property = qg_psy_get_property, + .set_property = qg_psy_set_property, + .property_is_writeable = qg_property_is_writeable, +}; + +#define DEFAULT_RECHARGE_SOC 95 +static int qg_charge_full_update(struct qpnp_qg *chip) +{ + union power_supply_propval prop = {0, }; + int rc, recharge_soc, health; + + if (!chip->dt.hold_soc_while_full) + goto out; + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &prop); + if (rc < 0) { + pr_err("Failed to get battery health, rc=%d\n", rc); + goto out; + } + health = prop.intval; + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_RECHARGE_SOC, &prop); + if (rc < 0 || prop.intval < 0) { + pr_debug("Failed to get recharge-soc\n"); + recharge_soc = DEFAULT_RECHARGE_SOC; + } + recharge_soc = prop.intval; + + qg_dbg(chip, QG_DEBUG_STATUS, "msoc=%d health=%d charge_full=%d\n", + chip->msoc, health, chip->charge_full); + if (chip->charge_done && !chip->charge_full) { + if (chip->msoc >= 99 && health == POWER_SUPPLY_HEALTH_GOOD) { + chip->charge_full = true; + qg_dbg(chip, QG_DEBUG_STATUS, "Setting charge_full (0->1) @ msoc=%d\n", + chip->msoc); + } else if (health != POWER_SUPPLY_HEALTH_GOOD) { + /* terminated in JEITA */ + qg_dbg(chip, QG_DEBUG_STATUS, "Terminated charging @ msoc=%d\n", + chip->msoc); + } + } else if ((!chip->charge_done || chip->msoc < recharge_soc) + && chip->charge_full) { + + if (chip->wa_flags & QG_RECHARGE_SOC_WA) { + /* Force recharge */ + prop.intval = 0; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_RECHARGE_SOC, &prop); + if (rc < 0) + pr_err("Failed to force recharge rc=%d\n", rc); + else + qg_dbg(chip, QG_DEBUG_STATUS, + "Forced recharge\n"); + } + + /* + * If recharge or discharge has started and + * if linearize soc dtsi property defined + * scale msoc from 100% for better UX. + */ + if (chip->dt.linearize_soc && chip->msoc < 99) { + chip->maint_soc = FULL_SOC; + qg_scale_soc(chip, false); + } + + qg_dbg(chip, QG_DEBUG_STATUS, "msoc=%d recharge_soc=%d charge_full (1->0)\n", + chip->msoc, recharge_soc); + chip->charge_full = false; + } +out: + return 0; +} + +static int qg_parallel_status_update(struct qpnp_qg *chip) +{ + int rc; + bool parallel_enabled = is_parallel_enabled(chip); + + if (parallel_enabled == chip->parallel_enabled) + return 0; + + chip->parallel_enabled = parallel_enabled; + qg_dbg(chip, QG_DEBUG_STATUS, + "Parallel status changed Enabled=%d\n", parallel_enabled); + + mutex_lock(&chip->data_lock); + + rc = process_rt_fifo_data(chip, false, true); + if (rc < 0) + pr_err("Failed to process RT FIFO data, rc=%d\n", rc); + + mutex_unlock(&chip->data_lock); + + return 0; +} + +static int qg_usb_status_update(struct qpnp_qg *chip) +{ + bool usb_present = is_usb_present(chip); + + if (chip->usb_present != usb_present) { + qg_dbg(chip, QG_DEBUG_STATUS, + "USB status changed Present=%d\n", + usb_present); + qg_scale_soc(chip, false); + } + + chip->usb_present = usb_present; + + return 0; +} + +static void qg_status_change_work(struct work_struct *work) +{ + struct qpnp_qg *chip = container_of(work, + struct qpnp_qg, qg_status_change_work); + union power_supply_propval prop = {0, }; + int rc = 0, batt_temp = 0, batt_soc_32b = 0; + + if (!is_batt_available(chip)) { + pr_debug("batt-psy not available\n"); + goto out; + } + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &prop); + if (rc < 0) + pr_err("Failed to get charger status, rc=%d\n", rc); + else + chip->charge_status = prop.intval; + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_DONE, &prop); + if (rc < 0) + pr_err("Failed to get charge done status, rc=%d\n", rc); + else + chip->charge_done = prop.intval; + + qg_dbg(chip, QG_DEBUG_STATUS, "charge_status=%d charge_done=%d\n", + chip->charge_status, chip->charge_done); + + rc = qg_parallel_status_update(chip); + if (rc < 0) + pr_err("Failed to update parallel-status, rc=%d\n", rc); + + rc = qg_usb_status_update(chip); + if (rc < 0) + pr_err("Failed to update usb status, rc=%d\n", rc); + + cycle_count_update(chip->counter, + DIV_ROUND_CLOSEST(chip->msoc * 255, 100), + chip->charge_status, chip->charge_done, + chip->usb_present); + + if (!chip->dt.cl_disable) { + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc < 0) { + pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc); + } else { + batt_soc_32b = div64_u64( + chip->batt_soc * BATT_SOC_32BIT, + QG_SOC_FULL); + cap_learning_update(chip->cl, batt_temp, batt_soc_32b, + chip->charge_status, chip->charge_done, + chip->usb_present, false); + } + } + rc = qg_charge_full_update(chip); + if (rc < 0) + pr_err("Failed in charge_full_update, rc=%d\n", rc); +out: + pm_relax(chip->dev); +} + +static int qg_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct power_supply *psy = data; + struct qpnp_qg *chip = container_of(nb, struct qpnp_qg, nb); + + if (event != PSY_EVENT_PROP_CHANGED) + return NOTIFY_OK; + + if (work_pending(&chip->qg_status_change_work)) + return NOTIFY_OK; + + if ((strcmp(psy->desc->name, "battery") == 0) + || (strcmp(psy->desc->name, "parallel") == 0) + || (strcmp(psy->desc->name, "usb") == 0)) { + /* + * We cannot vote for awake votable here as that takes + * a mutex lock and this is executed in an atomic context. + */ + pm_stay_awake(chip->dev); + schedule_work(&chip->qg_status_change_work); + } + + return NOTIFY_OK; +} + +static int qg_init_psy(struct qpnp_qg *chip) +{ + struct power_supply_config qg_psy_cfg; + int rc; + + qg_psy_cfg.drv_data = chip; + qg_psy_cfg.of_node = NULL; + qg_psy_cfg.supplied_to = NULL; + qg_psy_cfg.num_supplicants = 0; + chip->qg_psy = devm_power_supply_register(chip->dev, + &qg_psy_desc, &qg_psy_cfg); + if (IS_ERR_OR_NULL(chip->qg_psy)) { + pr_err("Failed to register qg_psy rc = %ld\n", + PTR_ERR(chip->qg_psy)); + return -ENODEV; + } + + chip->nb.notifier_call = qg_notifier_cb; + rc = power_supply_reg_notifier(&chip->nb); + if (rc < 0) + pr_err("Failed register psy notifier rc = %d\n", rc); + + return rc; +} + +static ssize_t qg_device_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + int rc; + struct qpnp_qg *chip = file->private_data; + unsigned long data_size = sizeof(chip->kdata); + + /* non-blocking access, return */ + if (!chip->data_ready && (file->f_flags & O_NONBLOCK)) + return 0; + + /* blocking access wait on data_ready */ + if (!(file->f_flags & O_NONBLOCK)) { + rc = wait_event_interruptible(chip->qg_wait_q, + chip->data_ready); + if (rc < 0) { + pr_debug("Failed wait! rc=%d\n", rc); + return rc; + } + } + + mutex_lock(&chip->data_lock); + + if (!chip->data_ready) { + pr_debug("No Data, false wakeup\n"); + rc = -EFAULT; + goto fail_read; + } + + + if (copy_to_user(buf, &chip->kdata, data_size)) { + pr_err("Failed in copy_to_user\n"); + rc = -EFAULT; + goto fail_read; + } + chip->data_ready = false; + + /* release all wake sources */ + vote(chip->awake_votable, GOOD_OCV_VOTER, false, 0); + vote(chip->awake_votable, FIFO_DONE_VOTER, false, 0); + vote(chip->awake_votable, FIFO_RT_DONE_VOTER, false, 0); + vote(chip->awake_votable, SUSPEND_DATA_VOTER, false, 0); + + qg_dbg(chip, QG_DEBUG_DEVICE, + "QG device read complete Seq_no=%u Size=%ld\n", + chip->kdata.seq_no, data_size); + + /* clear data */ + memset(&chip->kdata, 0, sizeof(chip->kdata)); + + mutex_unlock(&chip->data_lock); + + return data_size; + +fail_read: + mutex_unlock(&chip->data_lock); + return rc; +} + +static ssize_t qg_device_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + struct qpnp_qg *chip = file->private_data; + unsigned long data_size = sizeof(chip->udata); + + mutex_lock(&chip->data_lock); + if (count == 0) { + pr_err("No data!\n"); + goto fail; + } + + if (count != 0 && count < data_size) { + pr_err("Invalid datasize %zu expected %lu\n", count, data_size); + goto fail; + } + + if (copy_from_user(&chip->udata, buf, data_size)) { + pr_err("Failed in copy_from_user\n"); + rc = -EFAULT; + goto fail; + } + + rc = data_size; + vote(chip->awake_votable, UDATA_READY_VOTER, true, 0); + schedule_work(&chip->udata_work); + qg_dbg(chip, QG_DEBUG_DEVICE, "QG write complete size=%d\n", rc); +fail: + mutex_unlock(&chip->data_lock); + return rc; +} + +static unsigned int qg_device_poll(struct file *file, poll_table *wait) +{ + struct qpnp_qg *chip = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &chip->qg_wait_q, wait); + + if (chip->data_ready) + mask = POLLIN | POLLRDNORM; + + return mask; +} + +static int qg_device_open(struct inode *inode, struct file *file) +{ + struct qpnp_qg *chip = container_of(inode->i_cdev, + struct qpnp_qg, qg_cdev); + + file->private_data = chip; + chip->qg_device_open = true; + qg_dbg(chip, QG_DEBUG_DEVICE, "QG device opened!\n"); + + return 0; +} + +static int qg_device_release(struct inode *inode, struct file *file) +{ + struct qpnp_qg *chip = container_of(inode->i_cdev, + struct qpnp_qg, qg_cdev); + + file->private_data = chip; + chip->qg_device_open = false; + qg_dbg(chip, QG_DEBUG_DEVICE, "QG device closed!\n"); + + return 0; +} + +static const struct file_operations qg_fops = { + .owner = THIS_MODULE, + .open = qg_device_open, + .release = qg_device_release, + .read = qg_device_read, + .write = qg_device_write, + .poll = qg_device_poll, +}; + +static int qg_register_device(struct qpnp_qg *chip) +{ + int rc; + + rc = alloc_chrdev_region(&chip->dev_no, 0, 1, "qg"); + if (rc < 0) { + pr_err("Failed to allocate chardev rc=%d\n", rc); + return rc; + } + + cdev_init(&chip->qg_cdev, &qg_fops); + rc = cdev_add(&chip->qg_cdev, chip->dev_no, 1); + if (rc < 0) { + pr_err("Failed to cdev_add rc=%d\n", rc); + goto unregister_chrdev; + } + + chip->qg_class = class_create(THIS_MODULE, "qg"); + if (IS_ERR_OR_NULL(chip->qg_class)) { + pr_err("Failed to create qg class\n"); + rc = -EINVAL; + goto delete_cdev; + } + chip->qg_device = device_create(chip->qg_class, NULL, chip->dev_no, + NULL, "qg"); + if (IS_ERR(chip->qg_device)) { + pr_err("Failed to create qg_device\n"); + rc = -EINVAL; + goto destroy_class; + } + + qg_dbg(chip, QG_DEBUG_DEVICE, "'/dev/qg' successfully created\n"); + + return 0; + +destroy_class: + class_destroy(chip->qg_class); +delete_cdev: + cdev_del(&chip->qg_cdev); +unregister_chrdev: + unregister_chrdev_region(chip->dev_no, 1); + return rc; +} + +#define BID_RPULL_OHM 100000 +#define BID_VREF_MV 1875 +static int get_batt_id_ohm(struct qpnp_qg *chip, u32 *batt_id_ohm) +{ + int rc, batt_id_mv; + int64_t denom; + + /* Read battery-id */ + rc = iio_read_channel_processed(chip->batt_id_chan, &batt_id_mv); + if (rc) { + pr_err("Failed to read BATT_ID over ADC, rc=%d\n", rc); + return rc; + } + + batt_id_mv = div_s64(batt_id_mv, 1000); + if (batt_id_mv == 0) { + pr_debug("batt_id_mv = 0 from ADC\n"); + return 0; + } + + denom = div64_s64(BID_VREF_MV * 1000, batt_id_mv) - 1000; + if (denom <= 0) { + /* batt id connector might be open, return 0 kohms */ + return 0; + } + + *batt_id_ohm = div64_u64(BID_RPULL_OHM * 1000 + denom / 2, denom); + + qg_dbg(chip, QG_DEBUG_PROFILE, "batt_id_mv=%d, batt_id_ohm=%d\n", + batt_id_mv, *batt_id_ohm); + + return 0; +} + +static int qg_load_battery_profile(struct qpnp_qg *chip) +{ + struct device_node *node = chip->dev->of_node; + struct device_node *batt_node, *profile_node; + int rc; + + batt_node = of_find_node_by_name(node, "qcom,battery-data"); + if (!batt_node) { + pr_err("Batterydata not available\n"); + return -ENXIO; + } + + profile_node = of_batterydata_get_best_profile(batt_node, + chip->batt_id_ohm / 1000, NULL); + if (IS_ERR(profile_node)) { + rc = PTR_ERR(profile_node); + pr_err("Failed to detect valid QG battery profile %d\n", rc); + return rc; + } + + rc = of_property_read_string(profile_node, "qcom,battery-type", + &chip->bp.batt_type_str); + if (rc < 0) { + pr_err("Failed to detect battery type rc:%d\n", rc); + return rc; + } + + rc = qg_batterydata_init(profile_node); + if (rc < 0) { + pr_err("Failed to initialize battery-profile rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv", + &chip->bp.float_volt_uv); + if (rc < 0) { + pr_err("Failed to read battery float-voltage rc:%d\n", rc); + chip->bp.float_volt_uv = -EINVAL; + } + + rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma", + &chip->bp.fastchg_curr_ma); + if (rc < 0) { + pr_err("Failed to read battery fastcharge current rc:%d\n", rc); + chip->bp.fastchg_curr_ma = -EINVAL; + } + + rc = of_property_read_u32(profile_node, "qcom,qg-batt-profile-ver", + &chip->bp.qg_profile_version); + if (rc < 0) { + pr_err("Failed to read QG profile version rc:%d\n", rc); + chip->bp.qg_profile_version = -EINVAL; + } + + qg_dbg(chip, QG_DEBUG_PROFILE, "profile=%s FV=%duV FCC=%dma\n", + chip->bp.batt_type_str, chip->bp.float_volt_uv, + chip->bp.fastchg_curr_ma); + + return 0; +} + +static int qg_setup_battery(struct qpnp_qg *chip) +{ + int rc; + + if (!is_battery_present(chip)) { + qg_dbg(chip, QG_DEBUG_PROFILE, "Battery Missing!\n"); + chip->battery_missing = true; + chip->profile_loaded = false; + } else { + /* battery present */ + rc = get_batt_id_ohm(chip, &chip->batt_id_ohm); + if (rc < 0) { + pr_err("Failed to detect batt_id rc=%d\n", rc); + chip->profile_loaded = false; + } else { + rc = qg_load_battery_profile(chip); + if (rc < 0) + pr_err("Failed to load battery-profile rc=%d\n", + rc); + else + chip->profile_loaded = true; + } + } + + qg_dbg(chip, QG_DEBUG_PROFILE, "battery_missing=%d batt_id_ohm=%d Ohm profile_loaded=%d profile=%s\n", + chip->battery_missing, chip->batt_id_ohm, + chip->profile_loaded, chip->bp.batt_type_str); + + return 0; +} + +static int qg_determine_pon_soc(struct qpnp_qg *chip) +{ + int rc = 0, batt_temp = 0; + bool use_pon_ocv = true; + unsigned long rtc_sec = 0; + u32 ocv_uv = 0, ocv_raw = 0, soc = 0, shutdown[SDAM_MAX] = {0}; + char ocv_type[20] = "NONE"; + + if (!chip->profile_loaded) { + qg_dbg(chip, QG_DEBUG_PON, "No Profile, skipping PON soc\n"); + return 0; + } + + rc = get_rtc_time(&rtc_sec); + if (rc < 0) { + pr_err("Failed to read RTC time rc=%d\n", rc); + goto use_pon_ocv; + } + + rc = qg_sdam_read_all(shutdown); + if (rc < 0) { + pr_err("Failed to read shutdown params rc=%d\n", rc); + goto use_pon_ocv; + } + + qg_dbg(chip, QG_DEBUG_PON, "Shutdown: Valid=%d SOC=%d OCV=%duV time=%dsecs, time_now=%ldsecs\n", + shutdown[SDAM_VALID], + shutdown[SDAM_SOC], + shutdown[SDAM_OCV_UV], + shutdown[SDAM_TIME_SEC], + rtc_sec); + /* + * Use the shutdown SOC if + * 1. The device was powered off for < ignore_shutdown_time + * 2. SDAM read is a success & SDAM data is valid + */ + if (shutdown[SDAM_VALID] && is_between(0, + chip->dt.ignore_shutdown_soc_secs, + (rtc_sec - shutdown[SDAM_TIME_SEC]))) { + use_pon_ocv = false; + ocv_uv = shutdown[SDAM_OCV_UV]; + soc = shutdown[SDAM_SOC]; + strlcpy(ocv_type, "SHUTDOWN_SOC", 20); + qg_dbg(chip, QG_DEBUG_PON, "Using SHUTDOWN_SOC @ PON\n"); + } + +use_pon_ocv: + if (use_pon_ocv == true) { + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc) { + pr_err("Failed to read BATT_TEMP at PON rc=%d\n", rc); + goto done; + } + + /* + * Read S3_LAST_OCV, if S3_LAST_OCV is invalid, + * read the SDAM_PON_OCV + * if SDAM is not-set, use S7_PON_OCV. + */ + strlcpy(ocv_type, "S3_LAST_SOC", 20); + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_LAST_OCV); + if (rc < 0) + goto done; + + if (ocv_raw == FIFO_V_RESET_VAL) { + /* S3_LAST_OCV is invalid */ + strlcpy(ocv_type, "SDAM_PON_SOC", 20); + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, SDAM_PON_OCV); + if (rc < 0) + goto done; + + if (!ocv_uv) { + /* SDAM_PON_OCV is not set */ + strlcpy(ocv_type, "S7_PON_SOC", 20); + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, + S7_PON_OCV); + if (rc < 0) + goto done; + } + } + + rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false); + if (rc < 0) { + pr_err("Failed to lookup SOC@PON rc=%d\n", rc); + goto done; + } + } +done: + if (rc < 0) { + pr_err("Failed to get %s @ PON, rc=%d\n", ocv_type, rc); + return rc; + } + + chip->pon_soc = chip->catch_up_soc = chip->msoc = soc; + chip->kdata.param[QG_PON_OCV_UV].data = ocv_uv; + chip->kdata.param[QG_PON_OCV_UV].valid = true; + + /* write back to SDAM */ + chip->sdam_data[SDAM_SOC] = soc; + chip->sdam_data[SDAM_OCV_UV] = ocv_uv; + chip->sdam_data[SDAM_VALID] = 1; + + rc = qg_write_monotonic_soc(chip, chip->msoc); + if (rc < 0) + pr_err("Failed to update MSOC register rc=%d\n", rc); + + rc = qg_update_sdam_params(chip); + if (rc < 0) + pr_err("Failed to update sdam params rc=%d\n", rc); + + pr_info("using %s @ PON ocv_uv=%duV soc=%d\n", + ocv_type, ocv_uv, chip->msoc); + + return 0; +} + +static int qg_set_wa_flags(struct qpnp_qg *chip) +{ + switch (chip->pmic_rev_id->pmic_subtype) { + case PMI632_SUBTYPE: + chip->wa_flags |= QG_RECHARGE_SOC_WA; + if (chip->pmic_rev_id->rev4 == PMI632_V1P0_REV4) + chip->wa_flags |= QG_VBAT_LOW_WA; + break; + default: + pr_err("Unsupported PMIC subtype %d\n", + chip->pmic_rev_id->pmic_subtype); + return -EINVAL; + } + + qg_dbg(chip, QG_DEBUG_PON, "wa_flags = %x\n", chip->wa_flags); + + return 0; +} + +static int qg_hw_init(struct qpnp_qg *chip) +{ + int rc, temp; + u8 reg; + + rc = qg_set_wa_flags(chip); + if (rc < 0) { + pr_err("Failed to update PMIC type flags, rc=%d\n", rc); + return rc; + } + + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + goto done_fifo; + } + + rc = qg_process_rt_fifo(chip); + if (rc < 0) { + pr_err("Failed to process FIFO real-time, rc=%d\n", rc); + goto done_fifo; + } + + /* update the changed S2 fifo DT parameters */ + if (chip->dt.s2_fifo_length > 0) { + rc = qg_update_fifo_length(chip, chip->dt.s2_fifo_length); + if (rc < 0) + goto done_fifo; + } + + if (chip->dt.s2_acc_length > 0) { + reg = ilog2(chip->dt.s2_acc_length) - 1; + rc = qg_masked_write(chip, chip->qg_base + + QG_S2_NORMAL_MEAS_CTL2_REG, + NUM_OF_ACCUM_MASK, reg); + if (rc < 0) { + pr_err("Failed to write S2 ACC length, rc=%d\n", rc); + goto done_fifo; + } + } + + if (chip->dt.s2_acc_intvl_ms > 0) { + reg = chip->dt.s2_acc_intvl_ms / 10; + rc = qg_write(chip, chip->qg_base + + QG_S2_NORMAL_MEAS_CTL3_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to write S2 ACC intrvl, rc=%d\n", rc); + goto done_fifo; + } + } + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + +done_fifo: + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + return rc; + } + chip->last_fifo_update_time = ktime_get(); + + if (chip->dt.ocv_timer_expiry_min != -EINVAL) { + if (chip->dt.ocv_timer_expiry_min < 2) + chip->dt.ocv_timer_expiry_min = 2; + else if (chip->dt.ocv_timer_expiry_min > 30) + chip->dt.ocv_timer_expiry_min = 30; + + reg = (chip->dt.ocv_timer_expiry_min - 2) / 4; + rc = qg_masked_write(chip, + chip->qg_base + QG_S3_SLEEP_OCV_MEAS_CTL4_REG, + SLEEP_IBAT_QUALIFIED_LENGTH_MASK, reg); + if (rc < 0) { + pr_err("Failed to write OCV timer, rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.ocv_tol_threshold_uv != -EINVAL) { + if (chip->dt.ocv_tol_threshold_uv < 0) + chip->dt.ocv_tol_threshold_uv = 0; + else if (chip->dt.ocv_tol_threshold_uv > 12262) + chip->dt.ocv_tol_threshold_uv = 12262; + + reg = chip->dt.ocv_tol_threshold_uv / 195; + rc = qg_masked_write(chip, + chip->qg_base + QG_S3_SLEEP_OCV_TREND_CTL2_REG, + TREND_TOL_MASK, reg); + if (rc < 0) { + pr_err("Failed to write OCV tol-thresh, rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.s3_entry_fifo_length != -EINVAL) { + if (chip->dt.s3_entry_fifo_length < 1) + chip->dt.s3_entry_fifo_length = 1; + else if (chip->dt.s3_entry_fifo_length > 8) + chip->dt.s3_entry_fifo_length = 8; + + reg = chip->dt.s3_entry_fifo_length - 1; + rc = qg_masked_write(chip, + chip->qg_base + QG_S3_SLEEP_OCV_IBAT_CTL1_REG, + SLEEP_IBAT_QUALIFIED_LENGTH_MASK, reg); + if (rc < 0) { + pr_err("Failed to write S3-entry fifo-length, rc=%d\n", + rc); + return rc; + } + } + + if (chip->dt.s3_entry_ibat_ua != -EINVAL) { + if (chip->dt.s3_entry_ibat_ua < 0) + chip->dt.s3_entry_ibat_ua = 0; + else if (chip->dt.s3_entry_ibat_ua > 155550) + chip->dt.s3_entry_ibat_ua = 155550; + + reg = chip->dt.s3_entry_ibat_ua / 610; + rc = qg_write(chip, chip->qg_base + + QG_S3_ENTRY_IBAT_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to write S3-entry ibat-uA, rc=%d\n", rc); + return rc; + } + } + + if (chip->dt.s3_exit_ibat_ua != -EINVAL) { + if (chip->dt.s3_exit_ibat_ua < 0) + chip->dt.s3_exit_ibat_ua = 0; + else if (chip->dt.s3_exit_ibat_ua > 155550) + chip->dt.s3_exit_ibat_ua = 155550; + + rc = qg_read(chip, chip->qg_base + + QG_S3_ENTRY_IBAT_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to read S3-entry ibat-uA, rc=%d", rc); + return rc; + } + temp = reg * 610; + if (chip->dt.s3_exit_ibat_ua < temp) + chip->dt.s3_exit_ibat_ua = temp; + else + chip->dt.s3_exit_ibat_ua -= temp; + + reg = chip->dt.s3_exit_ibat_ua / 610; + rc = qg_write(chip, + chip->qg_base + QG_S3_EXIT_IBAT_THRESHOLD_REG, + ®, 1); + if (rc < 0) { + pr_err("Failed to write S3-entry ibat-uA, rc=%d\n", rc); + return rc; + } + } + + /* vbat based configs */ + if (chip->dt.vbatt_low_mv < 0) + chip->dt.vbatt_low_mv = 0; + else if (chip->dt.vbatt_low_mv > 12750) + chip->dt.vbatt_low_mv = 12750; + + if (chip->dt.vbatt_empty_mv < 0) + chip->dt.vbatt_empty_mv = 0; + else if (chip->dt.vbatt_empty_mv > 12750) + chip->dt.vbatt_empty_mv = 12750; + + if (chip->dt.vbatt_empty_cold_mv < 0) + chip->dt.vbatt_empty_cold_mv = 0; + else if (chip->dt.vbatt_empty_cold_mv > 12750) + chip->dt.vbatt_empty_cold_mv = 12750; + + rc = qg_vbat_thresholds_config(chip); + if (rc < 0) { + pr_err("Failed to configure VBAT empty/low rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int qg_post_init(struct qpnp_qg *chip) +{ + /* disable all IRQs if profile is not loaded */ + if (!chip->profile_loaded) { + vote(chip->vbatt_irq_disable_votable, + PROFILE_IRQ_DISABLE, true, 0); + vote(chip->fifo_irq_disable_votable, + PROFILE_IRQ_DISABLE, true, 0); + vote(chip->good_ocv_irq_disable_votable, + PROFILE_IRQ_DISABLE, true, 0); + } else { + /* disable GOOD_OCV IRQ at init */ + vote(chip->good_ocv_irq_disable_votable, + QG_INIT_STATE_IRQ_DISABLE, true, 0); + } + + return 0; +} + +static int qg_get_irq_index_byname(const char *irq_name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qg_irqs); i++) { + if (strcmp(qg_irqs[i].name, irq_name) == 0) + return i; + } + + return -ENOENT; +} + +static int qg_request_interrupt(struct qpnp_qg *chip, + struct device_node *node, const char *irq_name) +{ + int rc, irq, irq_index; + + irq = of_irq_get_byname(node, irq_name); + if (irq < 0) { + pr_err("Failed to get irq %s byname\n", irq_name); + return irq; + } + + irq_index = qg_get_irq_index_byname(irq_name); + if (irq_index < 0) { + pr_err("%s is not a defined irq\n", irq_name); + return irq_index; + } + + if (!qg_irqs[irq_index].handler) + return 0; + + rc = devm_request_threaded_irq(chip->dev, irq, NULL, + qg_irqs[irq_index].handler, + IRQF_ONESHOT, irq_name, chip); + if (rc < 0) { + pr_err("Failed to request irq %d\n", irq); + return rc; + } + + qg_irqs[irq_index].irq = irq; + if (qg_irqs[irq_index].wake) + enable_irq_wake(irq); + + qg_dbg(chip, QG_DEBUG_PON, "IRQ %s registered wakeable=%d\n", + qg_irqs[irq_index].name, qg_irqs[irq_index].wake); + + return 0; +} + +static int qg_request_irqs(struct qpnp_qg *chip) +{ + struct device_node *node = chip->dev->of_node; + struct device_node *child; + const char *name; + struct property *prop; + int rc = 0; + + for_each_available_child_of_node(node, child) { + of_property_for_each_string(child, "interrupt-names", + prop, name) { + rc = qg_request_interrupt(chip, child, name); + if (rc < 0) + return rc; + } + } + + + return 0; +} + +static int qg_alg_init(struct qpnp_qg *chip) +{ + struct cycle_counter *counter; + struct cap_learning *cl; + struct device_node *node = chip->dev->of_node; + int rc; + + counter = devm_kzalloc(chip->dev, sizeof(*counter), GFP_KERNEL); + if (!counter) + return -ENOMEM; + + counter->restore_count = qg_restore_cycle_count; + counter->store_count = qg_store_cycle_count; + counter->data = chip; + + rc = cycle_count_init(counter); + if (rc < 0) { + dev_err(chip->dev, "Error in initializing cycle counter, rc:%d\n", + rc); + counter->data = NULL; + devm_kfree(chip->dev, counter); + return rc; + } + + chip->counter = counter; + + chip->dt.cl_disable = of_property_read_bool(node, + "qcom,cl-disable"); + + /*Return if capacity learning is disabled*/ + if (chip->dt.cl_disable) + return 0; + + cl = devm_kzalloc(chip->dev, sizeof(*cl), GFP_KERNEL); + if (!cl) + return -ENOMEM; + + cl->cc_soc_max = QG_SOC_FULL; + cl->get_cc_soc = qg_get_cc_soc; + cl->get_learned_capacity = qg_get_learned_capacity; + cl->store_learned_capacity = qg_store_learned_capacity; + cl->data = chip; + + rc = cap_learning_init(cl); + if (rc < 0) { + dev_err(chip->dev, "Error in initializing capacity learning, rc:%d\n", + rc); + counter->data = NULL; + cl->data = NULL; + devm_kfree(chip->dev, counter); + devm_kfree(chip->dev, cl); + return rc; + } + + chip->cl = cl; + return 0; +} + +#define DEFAULT_VBATT_EMPTY_MV 3200 +#define DEFAULT_VBATT_EMPTY_COLD_MV 3000 +#define DEFAULT_VBATT_CUTOFF_MV 3400 +#define DEFAULT_VBATT_LOW_MV 3500 +#define DEFAULT_VBATT_LOW_COLD_MV 3800 +#define DEFAULT_ITERM_MA 100 +#define DEFAULT_S2_FIFO_LENGTH 5 +#define DEFAULT_S2_VBAT_LOW_LENGTH 2 +#define DEFAULT_S2_ACC_LENGTH 128 +#define DEFAULT_S2_ACC_INTVL_MS 100 +#define DEFAULT_DELTA_SOC 1 +#define DEFAULT_SHUTDOWN_SOC_SECS 360 +#define DEFAULT_COLD_TEMP_THRESHOLD 0 +#define DEFAULT_CL_MIN_START_SOC 10 +#define DEFAULT_CL_MAX_START_SOC 15 +#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150 +#define DEFAULT_CL_MAX_TEMP_DECIDEGC 500 +#define DEFAULT_CL_MAX_INC_DECIPERC 5 +#define DEFAULT_CL_MAX_DEC_DECIPERC 100 +#define DEFAULT_CL_MIN_LIM_DECIPERC 0 +#define DEFAULT_CL_MAX_LIM_DECIPERC 0 +static int qg_parse_dt(struct qpnp_qg *chip) +{ + int rc = 0; + struct device_node *revid_node, *child, *node = chip->dev->of_node; + u32 base, temp; + u8 type; + + if (!node) { + pr_err("Failed to find device-tree node\n"); + return -ENXIO; + } + + revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0); + if (!revid_node) { + pr_err("Missing qcom,pmic-revid property - driver failed\n"); + return -EINVAL; + } + + chip->pmic_rev_id = get_revid_data(revid_node); + of_node_put(revid_node); + if (IS_ERR_OR_NULL(chip->pmic_rev_id)) { + pr_err("Failed to get pmic_revid, rc=%ld\n", + PTR_ERR(chip->pmic_rev_id)); + /* + * the revid peripheral must be registered, any failure + * here only indicates that the rev-id module has not + * probed yet. + */ + return -EPROBE_DEFER; + } + + qg_dbg(chip, QG_DEBUG_PON, "PMIC subtype %d Digital major %d\n", + chip->pmic_rev_id->pmic_subtype, chip->pmic_rev_id->rev4); + + for_each_available_child_of_node(node, child) { + rc = of_property_read_u32(child, "reg", &base); + if (rc < 0) { + pr_err("Failed to read base address, rc=%d\n", rc); + return rc; + } + + rc = qg_read(chip, base + PERPH_TYPE_REG, &type, 1); + if (rc < 0) { + pr_err("Failed to read type, rc=%d\n", rc); + return rc; + } + + switch (type) { + case QG_TYPE: + chip->qg_base = base; + break; + default: + break; + } + } + + if (!chip->qg_base) { + pr_err("QG device node missing\n"); + return -EINVAL; + } + + /* S2 state params */ + rc = of_property_read_u32(node, "qcom,s2-fifo-length", &temp); + if (rc < 0) + chip->dt.s2_fifo_length = DEFAULT_S2_FIFO_LENGTH; + else + chip->dt.s2_fifo_length = temp; + + rc = of_property_read_u32(node, "qcom,s2-vbat-low-fifo-length", &temp); + if (rc < 0) + chip->dt.s2_vbat_low_fifo_length = DEFAULT_S2_VBAT_LOW_LENGTH; + else + chip->dt.s2_vbat_low_fifo_length = temp; + + rc = of_property_read_u32(node, "qcom,s2-acc-length", &temp); + if (rc < 0) + chip->dt.s2_acc_length = DEFAULT_S2_ACC_LENGTH; + else + chip->dt.s2_acc_length = temp; + + rc = of_property_read_u32(node, "qcom,s2-acc-interval-ms", &temp); + if (rc < 0) + chip->dt.s2_acc_intvl_ms = DEFAULT_S2_ACC_INTVL_MS; + else + chip->dt.s2_acc_intvl_ms = temp; + + qg_dbg(chip, QG_DEBUG_PON, "DT: S2 FIFO length=%d low_vbat_length=%d acc_length=%d acc_interval=%d\n", + chip->dt.s2_fifo_length, chip->dt.s2_vbat_low_fifo_length, + chip->dt.s2_acc_length, chip->dt.s2_acc_intvl_ms); + + /* OCV params */ + rc = of_property_read_u32(node, "qcom,ocv-timer-expiry-min", &temp); + if (rc < 0) + chip->dt.ocv_timer_expiry_min = -EINVAL; + else + chip->dt.ocv_timer_expiry_min = temp; + + rc = of_property_read_u32(node, "qcom,ocv-tol-threshold-uv", &temp); + if (rc < 0) + chip->dt.ocv_tol_threshold_uv = -EINVAL; + else + chip->dt.ocv_tol_threshold_uv = temp; + + qg_dbg(chip, QG_DEBUG_PON, "DT: OCV timer_expiry =%dmin ocv_tol_threshold=%duV\n", + chip->dt.ocv_timer_expiry_min, chip->dt.ocv_tol_threshold_uv); + + /* S3 sleep configuration */ + rc = of_property_read_u32(node, "qcom,s3-entry-fifo-length", &temp); + if (rc < 0) + chip->dt.s3_entry_fifo_length = -EINVAL; + else + chip->dt.s3_entry_fifo_length = temp; + + rc = of_property_read_u32(node, "qcom,s3-entry-ibat-ua", &temp); + if (rc < 0) + chip->dt.s3_entry_ibat_ua = -EINVAL; + else + chip->dt.s3_entry_ibat_ua = temp; + + rc = of_property_read_u32(node, "qcom,s3-exit-ibat-ua", &temp); + if (rc < 0) + chip->dt.s3_exit_ibat_ua = -EINVAL; + else + chip->dt.s3_exit_ibat_ua = temp; + + /* VBAT thresholds */ + rc = of_property_read_u32(node, "qcom,vbatt-empty-mv", &temp); + if (rc < 0) + chip->dt.vbatt_empty_mv = DEFAULT_VBATT_EMPTY_MV; + else + chip->dt.vbatt_empty_mv = temp; + + rc = of_property_read_u32(node, "qcom,vbatt-empty-cold-mv", &temp); + if (rc < 0) + chip->dt.vbatt_empty_cold_mv = DEFAULT_VBATT_EMPTY_COLD_MV; + else + chip->dt.vbatt_empty_cold_mv = temp; + + rc = of_property_read_u32(node, "qcom,cold-temp-threshold", &temp); + if (rc < 0) + chip->dt.cold_temp_threshold = DEFAULT_COLD_TEMP_THRESHOLD; + else + chip->dt.cold_temp_threshold = temp; + + rc = of_property_read_u32(node, "qcom,vbatt-low-mv", &temp); + if (rc < 0) + chip->dt.vbatt_low_mv = DEFAULT_VBATT_LOW_MV; + else + chip->dt.vbatt_low_mv = temp; + + rc = of_property_read_u32(node, "qcom,vbatt-low-cold-mv", &temp); + if (rc < 0) + chip->dt.vbatt_low_cold_mv = DEFAULT_VBATT_LOW_COLD_MV; + else + chip->dt.vbatt_low_cold_mv = temp; + + rc = of_property_read_u32(node, "qcom,vbatt-cutoff-mv", &temp); + if (rc < 0) + chip->dt.vbatt_cutoff_mv = DEFAULT_VBATT_CUTOFF_MV; + else + chip->dt.vbatt_cutoff_mv = temp; + + /* IBAT thresholds */ + rc = of_property_read_u32(node, "qcom,qg-iterm-ma", &temp); + if (rc < 0) + chip->dt.iterm_ma = DEFAULT_ITERM_MA; + else + chip->dt.iterm_ma = temp; + + rc = of_property_read_u32(node, "qcom,delta-soc", &temp); + if (rc < 0) + chip->dt.delta_soc = DEFAULT_DELTA_SOC; + else + chip->dt.delta_soc = temp; + + rc = of_property_read_u32(node, "qcom,ignore-shutdown-soc-secs", &temp); + if (rc < 0) + chip->dt.ignore_shutdown_soc_secs = DEFAULT_SHUTDOWN_SOC_SECS; + else + chip->dt.ignore_shutdown_soc_secs = temp; + + chip->dt.hold_soc_while_full = of_property_read_bool(node, + "qcom,hold-soc-while-full"); + + chip->dt.linearize_soc = of_property_read_bool(node, + "qcom,linearize-soc"); + + rc = of_property_read_u32(node, "qcom,rbat-conn-mohm", &temp); + if (rc < 0) + chip->dt.rbat_conn_mohm = 0; + else + chip->dt.rbat_conn_mohm = temp; + + /* Capacity learning params*/ + if (!chip->dt.cl_disable) { + chip->dt.cl_feedback_on = of_property_read_bool(node, + "qcom,cl-feedback-on"); + + rc = of_property_read_u32(node, "qcom,cl-min-start-soc", &temp); + if (rc < 0) + chip->cl->dt.min_start_soc = DEFAULT_CL_MIN_START_SOC; + else + chip->cl->dt.min_start_soc = temp; + + rc = of_property_read_u32(node, "qcom,cl-max-start-soc", &temp); + if (rc < 0) + chip->cl->dt.max_start_soc = DEFAULT_CL_MAX_START_SOC; + else + chip->cl->dt.max_start_soc = temp; + + rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp); + if (rc < 0) + chip->cl->dt.min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC; + else + chip->cl->dt.min_temp = temp; + + rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp); + if (rc < 0) + chip->cl->dt.max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC; + else + chip->cl->dt.max_temp = temp; + + rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp); + if (rc < 0) + chip->cl->dt.max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC; + else + chip->cl->dt.max_cap_inc = temp; + + rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp); + if (rc < 0) + chip->cl->dt.max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC; + else + chip->cl->dt.max_cap_dec = temp; + + rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp); + if (rc < 0) + chip->cl->dt.min_cap_limit = + DEFAULT_CL_MIN_LIM_DECIPERC; + else + chip->cl->dt.min_cap_limit = temp; + + rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp); + if (rc < 0) + chip->cl->dt.max_cap_limit = + DEFAULT_CL_MAX_LIM_DECIPERC; + else + chip->cl->dt.max_cap_limit = temp; + + qg_dbg(chip, QG_DEBUG_PON, "DT: cl_min_start_soc=%d cl_max_start_soc=%d cl_min_temp=%d cl_max_temp=%d\n", + chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc, + chip->cl->dt.min_temp, chip->cl->dt.max_temp); + } + qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d\n", + chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv, + chip->dt.delta_soc); + + return 0; +} + +static int process_suspend(struct qpnp_qg *chip) +{ + u8 status = 0; + int rc; + u32 fifo_rt_length = 0, sleep_fifo_length = 0; + + /* skip if profile is not loaded */ + if (!chip->profile_loaded) + return 0; + + /* disable GOOD_OCV IRQ in sleep */ + vote(chip->good_ocv_irq_disable_votable, + QG_INIT_STATE_IRQ_DISABLE, true, 0); + + chip->suspend_data = false; + + /* ignore any suspend processing if we are charging */ + if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) { + qg_dbg(chip, QG_DEBUG_PM, "Charging @ suspend - ignore processing\n"); + return 0; + } + + rc = get_fifo_length(chip, &fifo_rt_length, true); + if (rc < 0) { + pr_err("Failed to read FIFO RT count, rc=%d\n", rc); + return rc; + } + + rc = qg_read(chip, chip->qg_base + QG_S3_SLEEP_OCV_IBAT_CTL1_REG, + (u8 *)&sleep_fifo_length, 1); + if (rc < 0) { + pr_err("Failed to read sleep FIFO count, rc=%d\n", rc); + return rc; + } + sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK; + /* + * If the real-time FIFO count is greater than + * the the #fifo to enter sleep, save the FIFO data + * and reset the fifo count. + */ + if (fifo_rt_length >= (chip->dt.s2_fifo_length - sleep_fifo_length)) { + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + return rc; + } + + rc = qg_process_rt_fifo(chip); + if (rc < 0) { + pr_err("Failed to process FIFO real-time, rc=%d\n", rc); + qg_master_hold(chip, false); + return rc; + } + + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + return rc; + } + /* FIFOs restarted */ + chip->last_fifo_update_time = ktime_get(); + + chip->suspend_data = true; + } + + /* read STATUS2 register to clear its last state */ + qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1); + + qg_dbg(chip, QG_DEBUG_PM, "FIFO rt_length=%d sleep_fifo_length=%d default_s2_count=%d suspend_data=%d\n", + fifo_rt_length, sleep_fifo_length, + chip->dt.s2_fifo_length, chip->suspend_data); + + return rc; +} + +static int process_resume(struct qpnp_qg *chip) +{ + u8 status2 = 0, rt_status = 0; + u32 ocv_uv = 0, ocv_raw = 0; + int rc, batt_temp = 0; + + /* skip if profile is not loaded */ + if (!chip->profile_loaded) + return 0; + + /* enable GOOD_OCV IRQ when awake */ + vote(chip->good_ocv_irq_disable_votable, + QG_INIT_STATE_IRQ_DISABLE, false, 0); + + rc = qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status2, 1); + if (rc < 0) { + pr_err("Failed to read status2 register, rc=%d\n", rc); + return rc; + } + + if (status2 & GOOD_OCV_BIT) { + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_GOOD_OCV); + if (rc < 0) { + pr_err("Failed to read good_ocv, rc=%d\n", rc); + return rc; + } + rc = qg_get_battery_temp(chip, &batt_temp); + if (rc < 0) { + pr_err("Failed to read BATT_TEMP, rc=%d\n", rc); + return rc; + } + + chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv; + chip->kdata.param[QG_GOOD_OCV_UV].valid = true; + /* Clear suspend data as there has been a GOOD OCV */ + memset(&chip->kdata, 0, sizeof(chip->kdata)); + chip->suspend_data = false; + + qg_dbg(chip, QG_DEBUG_PM, "GOOD OCV @ resume good_ocv=%d uV\n", + ocv_uv); + } + + rc = qg_read(chip, chip->qg_base + QG_INT_LATCHED_STS_REG, + &rt_status, 1); + if (rc < 0) { + pr_err("Failed to read latched status register, rc=%d\n", rc); + return rc; + } + rt_status &= FIFO_UPDATE_DONE_INT_LAT_STS_BIT; + + qg_dbg(chip, QG_DEBUG_PM, "FIFO_DONE_STS=%d suspend_data=%d good_ocv=%d\n", + !!rt_status, chip->suspend_data, + chip->kdata.param[QG_GOOD_OCV_UV].valid); + /* + * If this is not a wakeup from FIFO-done, + * process the data immediately if - we have data from + * suspend or there is a good OCV. + */ + if (!rt_status && (chip->suspend_data || + chip->kdata.param[QG_GOOD_OCV_UV].valid)) { + vote(chip->awake_votable, SUSPEND_DATA_VOTER, true, 0); + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + chip->suspend_data = false; + } + + return rc; +} + +static int qpnp_qg_suspend_noirq(struct device *dev) +{ + int rc; + struct qpnp_qg *chip = dev_get_drvdata(dev); + + mutex_lock(&chip->data_lock); + + rc = process_suspend(chip); + if (rc < 0) + pr_err("Failed to process QG suspend, rc=%d\n", rc); + + mutex_unlock(&chip->data_lock); + + return 0; +} + +static int qpnp_qg_resume_noirq(struct device *dev) +{ + int rc; + struct qpnp_qg *chip = dev_get_drvdata(dev); + + mutex_lock(&chip->data_lock); + + rc = process_resume(chip); + if (rc < 0) + pr_err("Failed to process QG resume, rc=%d\n", rc); + + mutex_unlock(&chip->data_lock); + + return 0; +} + +static const struct dev_pm_ops qpnp_qg_pm_ops = { + .suspend_noirq = qpnp_qg_suspend_noirq, + .resume_noirq = qpnp_qg_resume_noirq, +}; + +static int qpnp_qg_probe(struct platform_device *pdev) +{ + int rc = 0, soc = 0, nom_cap_uah; + struct qpnp_qg *chip; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->regmap) { + pr_err("Parent regmap is unavailable\n"); + return -ENXIO; + } + + /* ADC for BID & THERM */ + chip->batt_id_chan = iio_channel_get(&pdev->dev, "batt-id"); + if (IS_ERR(chip->batt_id_chan)) { + rc = PTR_ERR(chip->batt_id_chan); + if (rc != -EPROBE_DEFER) { + pr_err("batt-id channel unavailable, rc=%d\n", rc); + chip->batt_id_chan = NULL; + return rc; + } + } + + chip->batt_therm_chan = iio_channel_get(&pdev->dev, "batt-therm"); + if (IS_ERR(chip->batt_therm_chan)) { + rc = PTR_ERR(chip->batt_therm_chan); + if (rc != -EPROBE_DEFER) { + pr_err("batt-therm channel unavailable, rc=%d\n", rc); + chip->batt_therm_chan = NULL; + return rc; + } + } + + chip->dev = &pdev->dev; + chip->debug_mask = &qg_debug_mask; + platform_set_drvdata(pdev, chip); + INIT_WORK(&chip->udata_work, process_udata_work); + INIT_WORK(&chip->qg_status_change_work, qg_status_change_work); + mutex_init(&chip->bus_lock); + mutex_init(&chip->soc_lock); + mutex_init(&chip->data_lock); + init_waitqueue_head(&chip->qg_wait_q); + chip->maint_soc = -EINVAL; + chip->batt_soc = INT_MIN; + chip->cc_soc = INT_MIN; + + rc = qg_alg_init(chip); + if (rc < 0) { + pr_err("Error in alg_init, rc:%d\n", rc); + return rc; + } + + rc = qg_parse_dt(chip); + if (rc < 0) { + pr_err("Failed to parse DT, rc=%d\n", rc); + return rc; + } + + rc = qg_hw_init(chip); + if (rc < 0) { + pr_err("Failed to hw_init, rc=%d\n", rc); + return rc; + } + + rc = qg_setup_battery(chip); + if (rc < 0) { + pr_err("Failed to setup battery, rc=%d\n", rc); + return rc; + } + + rc = qg_register_device(chip); + if (rc < 0) { + pr_err("Failed to register QG char device, rc=%d\n", rc); + return rc; + } + + rc = qg_sdam_init(chip->dev); + if (rc < 0) { + pr_err("Failed to initialize QG SDAM, rc=%d\n", rc); + return rc; + } + + rc = qg_soc_init(chip); + if (rc < 0) { + pr_err("Failed to initialize SOC scaling init rc=%d\n", rc); + return rc; + } + + if (chip->profile_loaded) { + if (!chip->dt.cl_disable) { + /* + * Use FCC @ 25 C and charge-profile for + * Nominal Capacity + */ + rc = qg_get_nominal_capacity(&nom_cap_uah, 250, true); + if (!rc) { + rc = cap_learning_post_profile_init(chip->cl, + nom_cap_uah); + if (rc < 0) { + pr_err("Error in cap_learning_post_profile_init rc=%d\n", + rc); + return rc; + } + } + } + rc = restore_cycle_count(chip->counter); + if (rc < 0) { + pr_err("Error in restoring cycle_count, rc=%d\n", rc); + return rc; + } + } + + rc = qg_determine_pon_soc(chip); + if (rc < 0) { + pr_err("Failed to determine initial state, rc=%d\n", rc); + goto fail_device; + } + + chip->awake_votable = create_votable("QG_WS", VOTE_SET_ANY, + qg_awake_cb, chip); + if (IS_ERR(chip->awake_votable)) { + rc = PTR_ERR(chip->awake_votable); + chip->awake_votable = NULL; + goto fail_device; + } + + chip->vbatt_irq_disable_votable = create_votable("QG_VBATT_IRQ_DISABLE", + VOTE_SET_ANY, qg_vbatt_irq_disable_cb, chip); + if (IS_ERR(chip->vbatt_irq_disable_votable)) { + rc = PTR_ERR(chip->vbatt_irq_disable_votable); + chip->vbatt_irq_disable_votable = NULL; + goto fail_device; + } + + chip->fifo_irq_disable_votable = create_votable("QG_FIFO_IRQ_DISABLE", + VOTE_SET_ANY, qg_fifo_irq_disable_cb, chip); + if (IS_ERR(chip->fifo_irq_disable_votable)) { + rc = PTR_ERR(chip->fifo_irq_disable_votable); + chip->fifo_irq_disable_votable = NULL; + goto fail_device; + } + + chip->good_ocv_irq_disable_votable = + create_votable("QG_GOOD_IRQ_DISABLE", + VOTE_SET_ANY, qg_good_ocv_irq_disable_cb, chip); + if (IS_ERR(chip->good_ocv_irq_disable_votable)) { + rc = PTR_ERR(chip->good_ocv_irq_disable_votable); + chip->good_ocv_irq_disable_votable = NULL; + goto fail_device; + } + + rc = qg_init_psy(chip); + if (rc < 0) { + pr_err("Failed to initialize QG psy, rc=%d\n", rc); + goto fail_votable; + } + + rc = qg_request_irqs(chip); + if (rc < 0) { + pr_err("Failed to register QG interrupts, rc=%d\n", rc); + goto fail_votable; + } + + rc = qg_post_init(chip); + if (rc < 0) { + pr_err("Failed in qg_post_init rc=%d\n", rc); + goto fail_votable; + } + + qg_get_battery_capacity(chip, &soc); + pr_info("QG initialized! battery_profile=%s SOC=%d\n", + qg_get_battery_type(chip), soc); + + return rc; + +fail_votable: + destroy_votable(chip->awake_votable); +fail_device: + device_destroy(chip->qg_class, chip->dev_no); + cdev_del(&chip->qg_cdev); + unregister_chrdev_region(chip->dev_no, 1); + return rc; +} + +static int qpnp_qg_remove(struct platform_device *pdev) +{ + struct qpnp_qg *chip = platform_get_drvdata(pdev); + + qg_batterydata_exit(); + qg_soc_exit(chip); + + cancel_work_sync(&chip->udata_work); + cancel_work_sync(&chip->qg_status_change_work); + device_destroy(chip->qg_class, chip->dev_no); + cdev_del(&chip->qg_cdev); + unregister_chrdev_region(chip->dev_no, 1); + mutex_destroy(&chip->bus_lock); + mutex_destroy(&chip->data_lock); + mutex_destroy(&chip->soc_lock); + if (chip->awake_votable) + destroy_votable(chip->awake_votable); + + return 0; +} + +static void qpnp_qg_shutdown(struct platform_device *pdev) +{ + struct qpnp_qg *chip = platform_get_drvdata(pdev); + + if (!is_usb_present(chip) || !chip->profile_loaded) + return; + /* + * Charging status doesn't matter when the device shuts down and we + * have to treat this as charge done. Hence pass charge_done as true. + */ + cycle_count_update(chip->counter, + DIV_ROUND_CLOSEST(chip->msoc * 255, 100), + POWER_SUPPLY_STATUS_NOT_CHARGING, + true, chip->usb_present); +} + +static const struct of_device_id match_table[] = { + { .compatible = "qcom,qpnp-qg", }, + { }, +}; + +static struct platform_driver qpnp_qg_driver = { + .driver = { + .name = "qcom,qpnp-qg", + .owner = THIS_MODULE, + .of_match_table = match_table, + .pm = &qpnp_qg_pm_ops, + }, + .probe = qpnp_qg_probe, + .remove = qpnp_qg_remove, + .shutdown = qpnp_qg_shutdown, +}; +module_platform_driver(qpnp_qg_driver); + +MODULE_DESCRIPTION("QPNP QG Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index 53bc8bbbc863030c54c7f641dcc28398e9a6959c..e1cdf902097faf6afab2622b27bcd69b52e226eb 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -159,10 +159,10 @@ static struct smb_params smb5_pm8150b_params = { .freq_switcher = { .name = "switching frequency", .reg = DCDC_FSW_SEL_REG, - .min_u = 1200, - .max_u = 2400, + .min_u = 600, + .max_u = 1200, .step_u = 400, - .set_proc = NULL, + .set_proc = smblib_set_chg_freq, }, }; @@ -179,6 +179,9 @@ struct smb_dt_props { int wd_bark_time; int batt_profile_fcc_ua; int batt_profile_fv_uv; + int term_current_src; + int term_current_thresh_hi_ma; + int term_current_thresh_lo_ma; }; struct smb5 { @@ -239,29 +242,34 @@ static int smb5_chg_config_init(struct smb5 *chip) chg->param = smb5_pmi632_params; chg->use_extcon = true; chg->name = "pmi632_charger"; + /* PMI632 does not support PD */ + chg->pd_not_supported = true; chg->hw_max_icl_ua = (chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua : PMI632_MAX_ICL_UA; - chg->chg_freq.freq_5V = 600; - chg->chg_freq.freq_6V_8V = 800; - chg->chg_freq.freq_9V = 1050; - chg->chg_freq.freq_removal = 1050; - chg->chg_freq.freq_below_otg_threshold = 800; - chg->chg_freq.freq_above_otg_threshold = 800; break; default: pr_err("PMIC subtype %d not supported\n", pmic_rev_id->pmic_subtype); rc = -EINVAL; + goto out; } + chg->chg_freq.freq_5V = 600; + chg->chg_freq.freq_6V_8V = 800; + chg->chg_freq.freq_9V = 1050; + chg->chg_freq.freq_12V = 1200; + chg->chg_freq.freq_removal = 1050; + chg->chg_freq.freq_below_otg_threshold = 800; + chg->chg_freq.freq_above_otg_threshold = 800; + out: of_node_put(revid_dev_node); return rc; } -#define MICRO_1P5A 1500000 -#define MICRO_P1A 100000 +#define MICRO_1P5A 1500000 +#define MICRO_P1A 100000 #define OTG_DEFAULT_DEGLITCH_TIME_MS 50 #define MIN_WD_BARK_TIME 16 #define DEFAULT_WD_BARK_TIME 64 @@ -281,6 +289,13 @@ static int smb5_parse_dt(struct smb5 *chip) of_property_read_u32(node, "qcom,sec-charger-config", &chip->dt.sec_charger_config); + chg->sec_cp_present = + chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP || + chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP_PL; + + chg->sec_pl_present = + chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_PL || + chip->dt.sec_charger_config == POWER_SUPPLY_CHARGER_SEC_CP_PL; chg->step_chg_enabled = of_property_read_bool(node, "qcom,step-charging-enable"); @@ -316,6 +331,18 @@ static int smb5_parse_dt(struct smb5 *chip) if (rc < 0) chg->otg_cl_ua = MICRO_1P5A; + rc = of_property_read_u32(node, "qcom,chg-term-src", + &chip->dt.term_current_src); + if (rc < 0) + chip->dt.term_current_src = ITERM_SRC_UNSPECIFIED; + + rc = of_property_read_u32(node, "qcom,chg-term-current-ma", + &chip->dt.term_current_thresh_hi_ma); + + if (chip->dt.term_current_src == ITERM_SRC_ADC) + rc = of_property_read_u32(node, "qcom,chg-term-base-current-ma", + &chip->dt.term_current_thresh_lo_ma); + if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) { chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len, GFP_KERNEL); @@ -456,6 +483,7 @@ static enum power_supply_property smb5_usb_props[] = { POWER_SUPPLY_PROP_SDP_CURRENT_MAX, POWER_SUPPLY_PROP_CONNECTOR_TYPE, POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_SMB_EN_MODE, POWER_SUPPLY_PROP_SCOPE, }; @@ -575,6 +603,9 @@ static int smb5_usb_get_prop(struct power_supply *psy, : chg->otg_present ? POWER_SUPPLY_SCOPE_SYSTEM : POWER_SUPPLY_SCOPE_UNKNOWN; break; + case POWER_SUPPLY_PROP_SMB_EN_MODE: + val->intval = chg->sec_chg_selected; + break; default: pr_err("get prop %d is not supported in usb\n", psp); rc = -EINVAL; @@ -1023,6 +1054,7 @@ static enum power_supply_property smb5_batt_props[] = { POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, @@ -1106,6 +1138,9 @@ static int smb5_batt_get_prop(struct power_supply *psy, val->intval = get_client_vote(chg->fcc_votable, BATT_PROFILE_VOTER); break; + case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: + rc = smblib_get_prop_batt_iterm(chg, val); + break; case POWER_SUPPLY_PROP_TEMP: rc = smblib_get_prop_batt_temp(chg, val); break; @@ -1144,6 +1179,9 @@ static int smb5_batt_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_RECHARGE_SOC: val->intval = chg->auto_recharge_soc; break; + case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE: + val->intval = 0; + break; default: pr_err("batt power supply prop %d not supported\n", psp); return -EINVAL; @@ -1220,6 +1258,9 @@ static int smb5_batt_set_prop(struct power_supply *psy, chg->die_health = val->intval; power_supply_changed(chg->batt_psy); break; + case POWER_SUPPLY_PROP_RECHARGE_SOC: + rc = smblib_set_prop_rechg_soc_thresh(chg, val); + break; default: rc = -EINVAL; } @@ -1374,9 +1415,7 @@ static int smb5_configure_typec(struct smb_charger *chg) int rc; /* disable apsd */ - rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT, - 0); + rc = smblib_configure_hvdcp_apsd(chg, false); if (rc < 0) { dev_err(chg->dev, "Couldn't disable APSD rc=%d\n", rc); return rc; @@ -1399,6 +1438,14 @@ static int smb5_configure_typec(struct smb_charger *chg) return rc; } + rc = smblib_masked_write(chg, TYPE_C_MODE_CFG_REG, + EN_TRY_SNK_BIT, EN_TRY_SNK_BIT); + if (rc < 0) { + dev_err(chg->dev, + "Couldn't enable try.snk rc=%d\n", rc); + return rc; + } + /* configure VCONN for software control */ rc = smblib_masked_write(chg, TYPE_C_VCONN_CONTROL_REG, VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT, @@ -1428,11 +1475,77 @@ static int smb5_configure_micro_usb(struct smb_charger *chg) return rc; } +static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip) +{ + int rc = 0; + int raw_hi_thresh, raw_lo_thresh; + struct smb_charger *chg = &chip->chg; + + if (chip->dt.term_current_thresh_hi_ma < -10000 || + chip->dt.term_current_thresh_hi_ma > 10000 || + chip->dt.term_current_thresh_lo_ma < -10000 || + chip->dt.term_current_thresh_lo_ma > 10000) { + dev_err(chg->dev, "ITERM threshold out of range rc=%d\n", rc); + return -EINVAL; + } + + /* + * Conversion: + * raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000) + */ + + if (chip->dt.term_current_thresh_hi_ma) { + raw_hi_thresh = ((chip->dt.term_current_thresh_hi_ma * + ADC_CHG_TERM_MASK) / 10000); + raw_hi_thresh = sign_extend32(raw_hi_thresh, 15); + + rc = smblib_batch_write(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, + (u8 *)&raw_hi_thresh, 2); + if (rc < 0) { + dev_err(chg->dev, "Couldn't configure ITERM threshold HIGH rc=%d\n", + rc); + return rc; + } + } + + if (chip->dt.term_current_thresh_lo_ma) { + raw_lo_thresh = ((chip->dt.term_current_thresh_lo_ma * + ADC_CHG_TERM_MASK) / 10000); + raw_lo_thresh = sign_extend32(raw_lo_thresh, 15); + + rc = smblib_batch_write(chg, CHGR_ADC_ITERM_LO_THD_MSB_REG, + (u8 *)&raw_lo_thresh, 2); + if (rc < 0) { + dev_err(chg->dev, "Couldn't configure ITERM threshold LOW rc=%d\n", + rc); + return rc; + } + } + + return rc; +} + +static int smb5_configure_iterm_thresholds(struct smb5 *chip) +{ + int rc = 0; + + switch (chip->dt.term_current_src) { + case ITERM_SRC_ADC: + rc = smb5_configure_iterm_thresholds_adc(chip); + break; + default: + break; + } + + return rc; +} + static int smb5_init_hw(struct smb5 *chip) { struct smb_charger *chg = &chip->chg; int rc, type = 0; u8 val = 0; + union power_supply_propval pval; if (chip->dt.no_battery) chg->fake_capacity = 50; @@ -1448,12 +1561,6 @@ static int smb5_init_hw(struct smb5 *chip) smblib_get_charge_param(chg, &chg->param.usb_icl, &chg->default_icl_ua); - chg->sec_cp_present = chip->dt.sec_charger_config == SEC_CHG_CP_ONLY - || chip->dt.sec_charger_config == SEC_CHG_CP_AND_PL; - - chg->sec_pl_present = chip->dt.sec_charger_config == SEC_CHG_PL_ONLY - || chip->dt.sec_charger_config == SEC_CHG_CP_AND_PL; - /* Use SW based VBUS control, disable HW autonomous mode */ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, @@ -1495,7 +1602,6 @@ static int smb5_init_hw(struct smb5 *chip) if (type) { chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB; - smblib_rerun_apsd_if_required(chg); rc = smb5_configure_micro_usb(chg); } else { chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC; @@ -1509,10 +1615,14 @@ static int smb5_init_hw(struct smb5 *chip) /* * PMI632 based hw init: + * - Rerun APSD to ensure proper charger detection if device + * boots with charger connected. * - Initialize flash module for PMI632 */ - if (chg->smb_version == PMI632_SUBTYPE) + if (chg->smb_version == PMI632_SUBTYPE) { schgm_flash_init(chg); + smblib_rerun_apsd_if_required(chg); + } /* vote 0mA on usb_icl for non battery platforms */ vote(chg->usb_icl_votable, @@ -1538,12 +1648,15 @@ static int smb5_init_hw(struct smb5 *chip) * AICL configuration: * start from min and AICL ADC disable, and enable aicl rerun */ - rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG, - USBIN_AICL_PERIODIC_RERUN_EN_BIT | USBIN_AICL_ADC_EN_BIT, - USBIN_AICL_PERIODIC_RERUN_EN_BIT); - if (rc < 0) { - dev_err(chg->dev, "Couldn't configure AICL rc=%d\n", rc); - return rc; + if (chg->smb_version != PMI632_SUBTYPE) { + rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG, + USBIN_AICL_PERIODIC_RERUN_EN_BIT + | USBIN_AICL_ADC_EN_BIT, + USBIN_AICL_PERIODIC_RERUN_EN_BIT); + if (rc < 0) { + dev_err(chg->dev, "Couldn't config AICL rc=%d\n", rc); + return rc; + } } rc = smblib_write(chg, AICL_RERUN_TIME_CFG_REG, @@ -1593,6 +1706,14 @@ static int smb5_init_hw(struct smb5 *chip) return rc; } + /* set termination current threshold values */ + rc = smb5_configure_iterm_thresholds(chip); + if (rc < 0) { + pr_err("Couldn't configure ITERM thresholds rc=%d\n", + rc); + return rc; + } + /* configure float charger options */ switch (chip->dt.float_option) { case FLOAT_DCP: @@ -1706,13 +1827,14 @@ static int smb5_init_hw(struct smb5 *chip) /* program the auto-recharge threshold */ if (chip->dt.auto_recharge_soc != -EINVAL) { - rc = smblib_write(chg, CHARGE_RCHG_SOC_THRESHOLD_CFG_REG, - (chip->dt.auto_recharge_soc * 255) / 100); + pval.intval = chip->dt.auto_recharge_soc; + rc = smblib_set_prop_rechg_soc_thresh(chg, &pval); if (rc < 0) { dev_err(chg->dev, "Couldn't configure CHG_RCHG_SOC_REG rc=%d\n", - rc); + rc); return rc; } + /* Program the sample count for SOC based recharge to 1 */ rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG, NO_OF_SAMPLE_FOR_RCHG, 0); diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c index a181b386ea70e0fb2c7dab57a2ae5a7eca45b237..6ba5fffee61574e6fcd86c4c62f03e218c94afbb 100644 --- a/drivers/power/supply/qcom/smb1390-charger.c +++ b/drivers/power/supply/qcom/smb1390-charger.c @@ -409,18 +409,13 @@ static void smb1390_status_change_work(struct work_struct *work) goto out; rc = power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_TYPEC_MODE, &pval); + POWER_SUPPLY_PROP_SMB_EN_MODE, &pval); if (rc < 0) { pr_err("Couldn't get usb present rc=%d\n", rc); goto out; } - if (pval.intval != POWER_SUPPLY_TYPEC_SOURCE_DEFAULT - && pval.intval != POWER_SUPPLY_TYPEC_SOURCE_MEDIUM - && pval.intval != POWER_SUPPLY_TYPEC_SOURCE_HIGH) { - vote(chip->disable_votable, USB_VOTER, true, 0); - vote(chip->fcc_votable, CP_VOTER, false, 0); - } else { + if (pval.intval == POWER_SUPPLY_CHARGER_SEC_CP) { vote(chip->disable_votable, USB_VOTER, false, 0); /* @@ -463,6 +458,9 @@ static void smb1390_status_change_work(struct work_struct *work) &chip->taper_work); } } + } else { + vote(chip->disable_votable, USB_VOTER, true, 0); + vote(chip->fcc_votable, CP_VOTER, false, 0); } out: diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 5d6c02898b768b42bbdbf5695bee7720fe5e54e0..2d63bfe57a89720942608109ce9558715fa68ade 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -117,8 +117,7 @@ int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua) return 0; } -static int smblib_select_sec_charger(struct smb_charger *chg, - enum sec_charger_type sec_chg) +static int smblib_select_sec_charger(struct smb_charger *chg, int sec_chg) { int rc; @@ -126,7 +125,7 @@ static int smblib_select_sec_charger(struct smb_charger *chg, return 0; switch (sec_chg) { - case SEC_CHG_CP: + case POWER_SUPPLY_CHARGER_SEC_CP: /* select Charge Pump instead of slave charger */ rc = smblib_masked_write(chg, MISC_SMB_CFG_REG, SMB_EN_SEL_BIT, SMB_EN_SEL_BIT); @@ -143,7 +142,7 @@ static int smblib_select_sec_charger(struct smb_charger *chg, return rc; } break; - case SEC_CHG_PL: + case POWER_SUPPLY_CHARGER_SEC_PL: /* select slave charger instead of Charge Pump */ rc = smblib_masked_write(chg, MISC_SMB_CFG_REG, SMB_EN_SEL_BIT, 0); @@ -160,7 +159,7 @@ static int smblib_select_sec_charger(struct smb_charger *chg, return rc; } break; - case SEC_CHG_NONE: + case POWER_SUPPLY_CHARGER_SEC_NONE: default: /* SW override, disabling secondary charger(s) */ rc = smblib_write(chg, MISC_SMB_EN_CMD_REG, @@ -557,6 +556,23 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg, /******************** * HELPER FUNCTIONS * ********************/ +int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable) +{ + int rc; + u8 mask = HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT; + + if (chg->pd_not_supported) + return 0; + + rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, mask, + enable ? mask : 0); + if (rc < 0) + smblib_err(chg, "failed to write USBIN_OPTIONS_1_CFG rc=%d\n", + rc); + + return rc; +} + static int smblib_request_dpdm(struct smb_charger *chg, bool enable) { int rc = 0; @@ -910,9 +926,10 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) goto set_mode; /* configure current */ - if (((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) - || (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)) - && (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) { + if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB + && (chg->typec_legacy + || chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT + || chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB)) { rc = set_sdp_current(chg, icl_ua); if (rc < 0) { smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc); @@ -1454,6 +1471,45 @@ int smblib_get_prop_batt_current_now(struct smb_charger *chg, return rc; } +int smblib_get_prop_batt_iterm(struct smb_charger *chg, + union power_supply_propval *val) +{ + int rc, temp; + u8 stat; + + /* + * Currently, only ADC comparator-based termination is supported, + * hence read only the threshold corresponding to ADC source. + * Proceed only if CHGR_ITERM_USE_ANALOG_BIT is 0. + */ + rc = smblib_read(chg, CHGR_ENG_CHARGING_CFG_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read CHGR_ENG_CHARGING_CFG_REG rc=%d\n", + rc); + return rc; + } + + if (stat & CHGR_ITERM_USE_ANALOG_BIT) { + val->intval = -EINVAL; + return 0; + } + + rc = smblib_batch_read(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, + (u8 *)&temp, 2); + + if (rc < 0) { + smblib_err(chg, "Couldn't read CHGR_ADC_ITERM_UP_THD_MSB_REG rc=%d\n", + rc); + return rc; + } + + temp = sign_extend32(temp, 15); + temp = DIV_ROUND_CLOSEST(temp * 10000, ADC_CHG_TERM_MASK); + val->intval = temp; + + return rc; +} + int smblib_get_prop_batt_temp(struct smb_charger *chg, union power_supply_propval *val) { @@ -1600,6 +1656,25 @@ int smblib_set_prop_input_current_limited(struct smb_charger *chg, return 0; } +int smblib_set_prop_rechg_soc_thresh(struct smb_charger *chg, + const union power_supply_propval *val) +{ + int rc; + u8 new_thr = DIV_ROUND_CLOSEST(val->intval * 255, 100); + + rc = smblib_write(chg, CHARGE_RCHG_SOC_THRESHOLD_CFG_REG, + new_thr); + if (rc < 0) { + smblib_err(chg, "Couldn't write to RCHG_SOC_THRESHOLD_CFG_REG rc=%d\n", + rc); + return rc; + } + + chg->auto_recharge_soc = val->intval; + + return rc; +} + int smblib_rerun_aicl(struct smb_charger *chg) { int rc; @@ -1955,6 +2030,8 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg) return POWER_SUPPLY_TYPEC_SOURCE_MEDIUM; case SNK_RP_3P0_BIT: return POWER_SUPPLY_TYPEC_SOURCE_HIGH; + case SNK_RP_SHORT_BIT: + return POWER_SUPPLY_TYPEC_NON_COMPLIANT; default: break; } @@ -2415,8 +2492,10 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, * For PPS, Charge Pump is preferred over parallel charger if * present. */ - if (chg->pd_active == 2 && chg->sec_cp_present) { - rc = smblib_select_sec_charger(chg, SEC_CHG_CP); + if (chg->pd_active == POWER_SUPPLY_PD_PPS_ACTIVE + && chg->sec_cp_present) { + rc = smblib_select_sec_charger(chg, + POWER_SUPPLY_CHARGER_SEC_CP); if (rc < 0) dev_err(chg->dev, "Couldn't enable secondary charger rc=%d\n", rc); @@ -2427,7 +2506,8 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0); rc = smblib_select_sec_charger(chg, - chg->sec_pl_present ? SEC_CHG_PL : SEC_CHG_NONE); + chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : + POWER_SUPPLY_CHARGER_SEC_NONE); if (rc < 0) dev_err(chg->dev, "Couldn't enable secondary charger rc=%d\n", @@ -2436,15 +2516,13 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, /* PD hard resets failed, rerun apsd */ if (chg->ok_to_pd) { chg->ok_to_pd = false; - rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT); + rc = smblib_configure_hvdcp_apsd(chg, true); if (rc < 0) { dev_err(chg->dev, - "Couldn't disable APSD rc=%d\n", rc); + "Couldn't enable APSD rc=%d\n", rc); return rc; } - smblib_rerun_apsd(chg); + smblib_rerun_apsd_if_required(chg); } } @@ -2851,6 +2929,8 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) rc = smblib_request_dpdm(chg, false); if (rc < 0) smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc); + + smblib_update_usb_type(chg); } if (chg->connector_type == POWER_SUPPLY_CONNECTOR_MICRO_USB) @@ -2967,7 +3047,8 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, /* for QC3, switch to CP if present */ if ((apsd_result->bit & QC_3P0_BIT) && chg->sec_cp_present) { - rc = smblib_select_sec_charger(chg, SEC_CHG_CP); + rc = smblib_select_sec_charger(chg, + POWER_SUPPLY_CHARGER_SEC_CP); if (rc < 0) dev_err(chg->dev, "Couldn't enable secondary chargers rc=%d\n", rc); @@ -3129,7 +3210,7 @@ irqreturn_t usb_source_change_irq_handler(int irq, void *data) * charger-mis-detection. */ chg->uusb_apsd_rerun_done = true; - smblib_rerun_apsd(chg); + smblib_rerun_apsd_if_required(chg); return IRQ_HANDLED; } @@ -3176,7 +3257,8 @@ static void typec_sink_insertion(struct smb_charger *chg) } if (!chg->pr_swap_in_progress) - chg->ok_to_pd = !(*chg->pd_disabled) || chg->early_usb_attach; + chg->ok_to_pd = (!(*chg->pd_disabled) || chg->early_usb_attach) + && !chg->pd_not_supported; } static void typec_src_insertion(struct smb_charger *chg) @@ -3195,24 +3277,28 @@ static void typec_src_insertion(struct smb_charger *chg) } chg->typec_legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT; - chg->ok_to_pd = !(chg->typec_legacy || *chg->pd_disabled) - || chg->early_usb_attach; + chg->ok_to_pd = (!(chg->typec_legacy || *chg->pd_disabled) + || chg->early_usb_attach) && !chg->pd_not_supported; if (!chg->ok_to_pd) { - rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT); + rc = smblib_configure_hvdcp_apsd(chg, true); if (rc < 0) { dev_err(chg->dev, - "Couldn't disable APSD rc=%d\n", rc); + "Couldn't enable APSD rc=%d\n", rc); return; } - smblib_rerun_apsd(chg); + smblib_rerun_apsd_if_required(chg); } } static void typec_sink_removal(struct smb_charger *chg) { vote(chg->usb_icl_votable, OTG_VOTER, false, 0); + + if (chg->use_extcon) { + if (chg->otg_present) + smblib_notify_usb_host(chg, false); + chg->otg_present = false; + } } static void typec_src_removal(struct smb_charger *chg) @@ -3222,18 +3308,16 @@ static void typec_src_removal(struct smb_charger *chg) struct storm_watch *wdata; rc = smblib_select_sec_charger(chg, - chg->sec_pl_present ? SEC_CHG_PL : SEC_CHG_NONE); + chg->sec_pl_present ? POWER_SUPPLY_CHARGER_SEC_PL : + POWER_SUPPLY_CHARGER_SEC_NONE); if (rc < 0) dev_err(chg->dev, "Couldn't disable secondary charger rc=%d\n", rc); /* disable apsd */ - rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, - HVDCP_EN_BIT | BC1P2_SRC_DETECT_BIT, - 0); + rc = smblib_configure_hvdcp_apsd(chg, false); if (rc < 0) - smblib_err(chg, - "Couldn't disable APSD rc=%d\n", rc); + smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc); smblib_update_usb_type(chg); @@ -3290,13 +3374,9 @@ static void typec_src_removal(struct smb_charger *chg) smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n", rc); - if (chg->use_extcon) { - if (chg->otg_present) - smblib_notify_usb_host(chg, false); - else - smblib_notify_device_mode(chg, false); - } - chg->otg_present = false; + if (chg->use_extcon) + smblib_notify_device_mode(chg, false); + chg->typec_legacy = false; } @@ -3627,6 +3707,9 @@ static void smblib_uusb_otg_work(struct work_struct *work) otg = !!(stat & U_USB_GROUND_NOVBUS_BIT); if (chg->otg_present != otg) smblib_notify_usb_host(chg, otg); + else + goto out; + chg->otg_present = otg; if (!otg) chg->boost_current_ua = 0; @@ -3661,10 +3744,10 @@ static void pl_update_work(struct work_struct *work) struct smb_charger *chg = container_of(work, struct smb_charger, pl_update_work); - if (chg->sec_chg_selected == SEC_CHG_CP) + if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) return; - smblib_select_sec_charger(chg, SEC_CHG_PL); + smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_PL); } static void clear_hdc_work(struct work_struct *work) @@ -3921,7 +4004,7 @@ int smblib_init(struct smb_charger *chg) chg->fake_batt_status = -EINVAL; chg->sink_src_mode = UNATTACHED_MODE; chg->jeita_configured = false; - chg->sec_chg_selected = SEC_CHG_NONE; + chg->sec_chg_selected = POWER_SUPPLY_CHARGER_SEC_NONE; switch (chg->mode) { case PARALLEL_MASTER: @@ -3951,9 +4034,10 @@ int smblib_init(struct smb_charger *chg) if (chg->sec_pl_present) { chg->pl.psy = power_supply_get_by_name("parallel"); - if (chg->pl.psy && - chg->sec_chg_selected != SEC_CHG_CP) { - rc = smblib_select_sec_charger(chg, SEC_CHG_PL); + if (chg->sec_chg_selected != POWER_SUPPLY_CHARGER_SEC_CP + && chg->pl.psy) { + rc = smblib_select_sec_charger(chg, + POWER_SUPPLY_CHARGER_SEC_PL); if (rc < 0) { smblib_err(chg, "Couldn't config pl charger rc=%d\n", rc); diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 9fcc55fd8432bb4cd627635af1da2806e0c74661..8b33ec346dd66fea4eb5e32a8e9d992c61d442f4 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -72,25 +72,14 @@ enum print_reason { #define VBAT_TO_VRAW_ADC(v) div_u64((u64)v * 1000000UL, 194637UL) +#define ADC_CHG_TERM_MASK 32767 + enum smb_mode { PARALLEL_MASTER = 0, PARALLEL_SLAVE, NUM_MODES, }; -enum sec_charger_config { - MAIN_STANDALONE = 0, - SEC_CHG_CP_ONLY, - SEC_CHG_PL_ONLY, - SEC_CHG_CP_AND_PL, -}; - -enum sec_charger_type { - SEC_CHG_NONE = 0, - SEC_CHG_CP, - SEC_CHG_PL, -}; - enum sink_src_mode { SINK_MODE, SRC_MODE, @@ -184,6 +173,12 @@ enum float_options { SUSPEND_INPUT = 4, }; +enum chg_term_config_src { + ITERM_SRC_UNSPECIFIED, + ITERM_SRC_ADC, + ITERM_SRC_ANALOG +}; + struct smb_irq_info { const char *name; const irq_handler_t handler; @@ -279,6 +274,7 @@ struct smb_charger { int smb_version; int otg_delay_ms; int *weak_chg_icl_ua; + bool pd_not_supported; /* locks */ struct mutex lock; @@ -471,6 +467,8 @@ int smblib_get_prop_batt_voltage_now(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_batt_current_now(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_batt_iterm(struct smb_charger *chg, + union power_supply_propval *val); int smblib_get_prop_batt_temp(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_batt_charge_counter(struct smb_charger *chg, @@ -540,6 +538,8 @@ int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg, const union power_supply_propval *val); int smblib_set_prop_ship_mode(struct smb_charger *chg, const union power_supply_propval *val); +int smblib_set_prop_rechg_soc_thresh(struct smb_charger *chg, + const union power_supply_propval *val); void smblib_suspend_on_debug_battery(struct smb_charger *chg); int smblib_rerun_apsd_if_required(struct smb_charger *chg); int smblib_get_prop_fcc_delta(struct smb_charger *chg, @@ -554,6 +554,7 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg, union power_supply_propval *val); int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg, const union power_supply_propval *val); +int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable); int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h index 035e8c0774d3066a7c3c1e9a8b93ce099850c0c9..20a864e513e65f04696598bc40d8b40fbdb5609e 100644 --- a/drivers/power/supply/qcom/smb5-reg.h +++ b/drivers/power/supply/qcom/smb5-reg.h @@ -73,6 +73,11 @@ enum { #define CHGR_FAST_CHARGE_CURRENT_CFG_REG (CHGR_BASE + 0x61) +#define CHGR_ADC_ITERM_UP_THD_MSB_REG (CHGR_BASE + 0x67) +#define CHGR_ADC_ITERM_UP_THD_LSB_REG (CHGR_BASE + 0x68) +#define CHGR_ADC_ITERM_LO_THD_MSB_REG (CHGR_BASE + 0x69) +#define CHGR_ADC_ITERM_LO_THD_LSB_REG (CHGR_BASE + 0x6A) + #define CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG (CHGR_BASE + 0x6B) #define NO_OF_SAMPLE_FOR_RCHG_SHIFT 2 #define NO_OF_SAMPLE_FOR_RCHG GENMASK(3, 2) @@ -102,6 +107,10 @@ enum { #define JEITA_CCCOMP_CFG_COLD_REG (CHGR_BASE + 0x93) #define CHGR_JEITA_THRESHOLD_BASE_REG(i) (CHGR_BASE + 0x94 + (i * 4)) + +#define CHGR_ENG_CHARGING_CFG_REG (CHGR_BASE + 0xC0) +#define CHGR_ITERM_USE_ANALOG_BIT BIT(3) + /******************************** * DCDC Peripheral Registers * ********************************/ @@ -252,10 +261,11 @@ enum { * TYPEC Peripheral Registers * ********************************/ #define TYPE_C_SNK_STATUS_REG (TYPEC_BASE + 0x06) -#define DETECTED_SRC_TYPE_MASK GENMASK(3, 1) +#define DETECTED_SRC_TYPE_MASK GENMASK(3, 0) #define SNK_RP_STD_BIT BIT(3) #define SNK_RP_1P5_BIT BIT(2) #define SNK_RP_3P0_BIT BIT(1) +#define SNK_RP_SHORT_BIT BIT(0) #define TYPE_C_SRC_STATUS_REG (TYPEC_BASE + 0x08) #define DETECTED_SNK_TYPE_MASK GENMASK(4, 0) @@ -283,7 +293,8 @@ enum { #define U_USB_GROUND_BIT BIT(4) #define TYPE_C_MODE_CFG_REG (TYPEC_BASE + 0x44) -#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0) +#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 1) +#define EN_TRY_SNK_BIT BIT(4) #define EN_SRC_ONLY_BIT BIT(2) #define EN_SNK_ONLY_BIT BIT(1) #define TYPEC_DISABLE_CMD_BIT BIT(0) @@ -327,7 +338,7 @@ enum { #define TYPEC_U_USB_CFG_REG (TYPEC_BASE + 0x70) #define EN_MICRO_USB_MODE_BIT BIT(0) -#define TYPEC_MICRO_USB_MODE_REG (TYPEC_BASE + 0x70) +#define TYPEC_MICRO_USB_MODE_REG (TYPEC_BASE + 0x73) #define MICRO_USB_MODE_ONLY_BIT BIT(0) /******************************** * MISC Peripheral Registers * diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c index 1a02eafc672d090016f42e6f67d5afdde389a523..9b428f0df3d72e5c182ec7be36593785eec87ecd 100644 --- a/drivers/regulator/cpr-regulator.c +++ b/drivers/regulator/cpr-regulator.c @@ -374,6 +374,7 @@ struct cpr_regulator { u32 num_corners; int *quot_adjust; int *mem_acc_corner_map; + unsigned int *vdd_mode_map; int num_adj_cpus; int *adj_cpus; @@ -1398,6 +1399,7 @@ static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg) u32 save_ctl, save_irq; cpumask_t tmp_mask; int rc = 0, i; + unsigned int current_mode; save_ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL); save_irq = cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line)); @@ -1422,6 +1424,13 @@ static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg) return rc; } + current_mode = regulator_get_mode(cpr_vreg->vdd_apc); + if (current_mode < 0) { + cpr_err(cpr_vreg, "Failed to get vdd-supply mode, error=%d\n", + current_mode); + return current_mode; + } + /* Force PWM mode */ rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_NORMAL); if (rc) { @@ -1447,10 +1456,10 @@ static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg) put_online_cpus(); /* Set to initial mode */ - rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_IDLE); + rc = regulator_set_mode(cpr_vreg->vdd_apc, current_mode); if (rc) { cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n", - REGULATOR_MODE_IDLE, rc); + current_mode, rc); return rc; } @@ -1511,6 +1520,17 @@ static int cpr_regulator_set_voltage(struct regulator_dev *rdev, if (rc) return rc; + if (cpr_vreg->vdd_mode_map) { + rc = regulator_set_mode(cpr_vreg->vdd_apc, + cpr_vreg->vdd_mode_map[corner]); + if (rc) { + cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n", + cpr_vreg->vdd_mode_map[corner], rc); + return rc; + } + } + + if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) { cpr_irq_clr(cpr_vreg); if (reset_quot) @@ -4560,6 +4580,44 @@ static int cpr_rpm_apc_init(struct platform_device *pdev, return rc; } +static int cpr_parse_vdd_mode_config(struct platform_device *pdev, + struct cpr_regulator *cpr_vreg) +{ + int rc, len = 0, i, mode; + struct device_node *of_node = pdev->dev.of_node; + const char *prop_str = "qcom,cpr-vdd-mode-map"; + + if (!of_find_property(of_node, prop_str, &len)) + return 0; + + if (len != cpr_vreg->num_corners * sizeof(u32)) { + cpr_err(cpr_vreg, "%s length=%d is invalid: required:%d\n", + prop_str, len, cpr_vreg->num_corners); + return -EINVAL; + } + + cpr_vreg->vdd_mode_map = devm_kcalloc(&pdev->dev, + cpr_vreg->num_corners + 1, + sizeof(*cpr_vreg->vdd_mode_map), + GFP_KERNEL); + if (!cpr_vreg->vdd_mode_map) + return -ENOMEM; + + for (i = 0; i < cpr_vreg->num_corners; i++) { + rc = of_property_read_u32_index(of_node, prop_str, i, &mode); + if (rc) { + cpr_err(cpr_vreg, "read %s index %d failed, rc = %d\n", + prop_str, i, rc); + return rc; + } + cpr_vreg->vdd_mode_map[i + CPR_CORNER_MIN] + = mode ? REGULATOR_MODE_NORMAL + : REGULATOR_MODE_IDLE; + } + + return rc; +} + static int cpr_vsens_init(struct platform_device *pdev, struct cpr_regulator *cpr_vreg) { @@ -5556,6 +5614,12 @@ static int cpr_regulator_probe(struct platform_device *pdev) return rc; } + rc = cpr_parse_vdd_mode_config(pdev, cpr_vreg); + if (rc) { + cpr_err(cpr_vreg, "vdd-mode parsing failed, rc=%d\n", rc); + return rc; + } + if (of_property_read_bool(pdev->dev.of_node, "qcom,disable-closed-loop-in-pc")) { rc = cpr_init_pm_notification(cpr_vreg); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 0fce06acfaeccd0d55061be589a206ce01764e96..a2eb50719c7bb2b6cdd3b23fd2786d9fdb24f09f 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { dev_err(&pdev->dev, "Failed to allocate supply name\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } if (config->nr_gpios != 0) { @@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Could not obtain regulator setting GPIOs: %d\n", ret); - goto err_memstate; + goto err_memgpio; } } @@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) if (drvdata->states == NULL) { dev_err(&pdev->dev, "Failed to allocate state data\n"); ret = -ENOMEM; - goto err_memgpio; + goto err_stategpio; } drvdata->nr_states = config->nr_states; @@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "No regulator type set\n"); ret = -EINVAL; - goto err_memgpio; + goto err_memstate; } /* build initial state from gpio init data. */ @@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev) if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_stategpio; + goto err_memstate; } platform_set_drvdata(pdev, drvdata); return 0; -err_stategpio: - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); err_memstate: kfree(drvdata->states); +err_stategpio: + gpio_free_array(drvdata->gpios, drvdata->nr_gpios); err_memgpio: kfree(drvdata->gpios); err_name: kfree(drvdata->desc.name); -err: return ret; } diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 14637a01ba2d3db64d3357ab3e4ea7ad2915ca8b..c9875355905d159827ea565affd0b26b7bc18b01 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -305,6 +305,7 @@ int of_regulator_match(struct device *dev, struct device_node *node, dev_err(dev, "failed to parse DT for regulator %s\n", child->name); + of_node_put(child); return -EINVAL; } match->of_node = of_node_get(child); diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 633268e9d550de7001999052f2692239b4754f6b..05bcbce2013a98d8e34210ce6cde7bddbdc76c58 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev) } dcfg = of_device_get_match_data(dev); - if (!dcfg) - return -EINVAL; + if (!dcfg) { + ret = -EINVAL; + goto err_put_rproc; + } priv = rproc->priv; priv->rproc = rproc; diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index e1cfa06810ef275704ab887935b39ca438ae5c3a..e79f2a181ad24217a3e3bc232593184b82d494fd 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -49,6 +49,11 @@ static int __init rtc_hctosys(void) tv64.tv_sec = rtc_tm_to_time64(&tm); +#if BITS_PER_LONG == 32 + if (tv64.tv_sec > INT_MAX) + goto err_read; +#endif + err = do_settimeofday64(&tv64); dev_info(rtc->dev.parent, diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index d67769265185929c53a48eba64100491f5622cce..a1c44d0c855780f8ba109e79c6bd62cb96dbb40d 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -235,3 +235,5 @@ static struct platform_driver goldfish_rtc = { }; module_platform_driver(goldfish_rtc); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index c90fba3ed861881c0c813361dfaece9efac60938..6620016869cf85799514526c6e3e5d53a6b54b51 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -885,7 +885,6 @@ static int m41t80_probe(struct i2c_client *client, { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int rc = 0; - struct rtc_device *rtc = NULL; struct rtc_time tm; struct m41t80_data *m41t80_data = NULL; bool wakeup_source = false; @@ -909,6 +908,10 @@ static int m41t80_probe(struct i2c_client *client, m41t80_data->features = id->driver_data; i2c_set_clientdata(client, m41t80_data); + m41t80_data->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(m41t80_data->rtc)) + return PTR_ERR(m41t80_data->rtc); + #ifdef CONFIG_OF wakeup_source = of_property_read_bool(client->dev.of_node, "wakeup-source"); @@ -932,15 +935,11 @@ static int m41t80_probe(struct i2c_client *client, device_init_wakeup(&client->dev, true); } - rtc = devm_rtc_device_register(&client->dev, client->name, - &m41t80_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) - return PTR_ERR(rtc); + m41t80_data->rtc->ops = &m41t80_rtc_ops; - m41t80_data->rtc = rtc; if (client->irq <= 0) { /* We cannot support UIE mode if we do not have an IRQ line */ - rtc->uie_unsupported = 1; + m41t80_data->rtc->uie_unsupported = 1; } /* Make sure HT (Halt Update) bit is cleared */ @@ -993,6 +992,11 @@ static int m41t80_probe(struct i2c_client *client, if (m41t80_data->features & M41T80_FEATURE_SQ) m41t80_sqw_register_clk(m41t80_data); #endif + + rc = rtc_register_device(m41t80_data->rtc); + if (rc) + return rc; + return 0; } diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c index 35c9aada07c8ef3a19f44cf8d4a3e8811f484e3f..79c8da54e922e296fd115f0e7b902109b3986d25 100644 --- a/drivers/rtc/rtc-rk808.c +++ b/drivers/rtc/rtc-rk808.c @@ -416,12 +416,11 @@ static int rk808_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - rk808_rtc->rtc = devm_rtc_device_register(&pdev->dev, "rk808-rtc", - &rk808_rtc_ops, THIS_MODULE); - if (IS_ERR(rk808_rtc->rtc)) { - ret = PTR_ERR(rk808_rtc->rtc); - return ret; - } + rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rk808_rtc->rtc)) + return PTR_ERR(rk808_rtc->rtc); + + rk808_rtc->rtc->ops = &rk808_rtc_ops; rk808_rtc->irq = platform_get_irq(pdev, 0); if (rk808_rtc->irq < 0) { @@ -438,9 +437,10 @@ static int rk808_rtc_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", rk808_rtc->irq, ret); + return ret; } - return ret; + return rtc_register_device(rk808_rtc->rtc); } static struct platform_driver rk808_rtc_driver = { diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 026035373ae65a446122c6ebf39df9364b58718a..38a12435b5a052fed6091e62226c2f3a0a11a60b 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c @@ -249,16 +249,24 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) platform_set_drvdata(dev, priv); - rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops, - THIS_MODULE); + rtc = devm_rtc_allocate_device(&dev->dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); + + rtc->ops = &rp5c01_rtc_ops; + priv->rtc = rtc; error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) return error; + error = rtc_register_device(rtc); + if (error) { + sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr); + return error; + } + return 0; } diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index d8ef9e052c4fc71f38a871a8293271cb469d495b..9af591d5223c3af8c6293a5f20704dc1da20d76e 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct snvs_rtc_data *data = dev_get_drvdata(dev); unsigned long time; + int ret; rtc_tm_to_time(tm, &time); /* Disable RTC first */ - snvs_rtc_enable(data, false); + ret = snvs_rtc_enable(data, false); + if (ret) + return ret; /* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */ regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH); regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH)); /* Enable RTC again */ - snvs_rtc_enable(data, true); + ret = snvs_rtc_enable(data, true); - return 0; + return ret; } static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) @@ -288,7 +291,11 @@ static int snvs_rtc_probe(struct platform_device *pdev) regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff); /* Enable RTC */ - snvs_rtc_enable(data, true); + ret = snvs_rtc_enable(data, true); + if (ret) { + dev_err(&pdev->dev, "failed to enable rtc %d\n", ret); + goto error_rtc_device_register; + } device_init_wakeup(&pdev->dev, true); diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 560d9a5e02253fe3ef5cf159c71d94198498b45c..a9528083061d50d9f6b59be99ec42a386d51e23e 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c @@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm) for (i = 2; i < 6; i++) buf[i] = __raw_readl(&rtcreg->dat); spin_unlock_irq(&pdata->lock); - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | + (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, tm); return rtc_valid_tm(tm); } @@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0; alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0; spin_unlock_irq(&pdata->lock); - sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; + sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | + (buf[3] << 8) | buf[2]; rtc_time_to_tm(sec, &alrm->time); return rtc_valid_tm(&alrm->time); } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 29f35e29d4801f83aa74b0a590bdb4412a9caa7c..e67c1d8a193d39a86c0c084e869d9c395f225816 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2596,8 +2596,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) case DASD_CQR_QUEUED: /* request was not started - just set to cleared */ cqr->status = DASD_CQR_CLEARED; - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; break; case DASD_CQR_IN_IO: /* request in IO - terminate IO and release again */ @@ -3917,9 +3915,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) wait_event(dasd_flush_wq, (cqr->status != DASD_CQR_CLEAR_PENDING)); - /* mark sleepon requests as ended */ - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; + /* + * requeue requests to blocklayer will only work + * for block device requests + */ + if (_dasd_requeue_request(cqr)) + continue; /* remove requests from device and block queue */ list_del_init(&cqr->devlist); @@ -3932,13 +3933,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) cqr = refers; } - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; - if (cqr->block) list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( @@ -3955,8 +3949,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) list_splice_tail(&requeue_queue, &device->ccw_queue); spin_unlock_irq(get_ccwdev_lock(device->cdev)); } - /* wake up generic waitqueue for eventually ended sleepon requests */ - wake_up(&generic_waitq); + dasd_schedule_device_bh(device); return rc; } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index f98ea674c3d8054390a5486802fedd482f0dbe16..28837ad75712d3fd86a4e6d4bc429789c8c1dcf2 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -796,6 +796,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -ETIMEDOUT; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -872,7 +873,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) /* OK, i/o is dead now. Call interrupt handler. */ if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } static void @@ -889,14 +890,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_online_verify(cdev, 0); if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, - ERR_PTR(-EIO)); + ERR_PTR(cdev->private->async_kill_io_rc)); } void ccw_device_kill_io(struct ccw_device *cdev) { int ret; + ccw_device_set_timeout(cdev, 0); cdev->private->iretry = 255; + cdev->private->async_kill_io_rc = -EIO; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index cf8c4ac6323a6d1c91dfe93dfdb22e2d9d0432b3..b22922ec32d11d1633d1f434f8e2c9f5d8bc0d05 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -160,7 +160,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) } /** - * ccw_device_start_key() - start a s390 channel program with key + * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -171,10 +171,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. + * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). + * This function notifies the device driver if the channel program has not + * completed during the time specified by @expires. If a timeout occurs, the + * channel program is terminated via xsch, hsch or csch, and the device's + * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -183,9 +188,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags) +int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags, int expires) { struct subchannel *sch; int ret; @@ -225,6 +230,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, switch (ret) { case 0: cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); break; case -EACCES: case -ENODEV: @@ -235,7 +242,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, } /** - * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key + * ccw_device_start_key() - start a s390 channel program with key * @cdev: target ccw device * @cpa: logical start address of channel program * @intparm: user specific interruption parameter; will be presented back to @@ -246,15 +253,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * @key: storage key to be used for the I/O * @flags: additional flags; defines the action to be performed for I/O * processing. - * @expires: timeout value in jiffies * * Start a S/390 channel program. When the interrupt arrives, the * IRQ handler is called, either immediately, delayed (dev-end missing, * or sense required) or never (no IRQ handler registered). - * This function notifies the device driver if the channel program has not - * completed during the time specified by @expires. If a timeout occurs, the - * channel program is terminated via xsch, hsch or csch, and the device's - * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). * Returns: * %0, if the operation was successful; * -%EBUSY, if the device is busy, or status pending; @@ -263,19 +265,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, * Context: * Interrupts disabled, ccw device lock held */ -int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, - unsigned long intparm, __u8 lpm, __u8 key, - unsigned long flags, int expires) +int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, + unsigned long intparm, __u8 lpm, __u8 key, + unsigned long flags) { - int ret; - - if (!cdev) - return -ENODEV; - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, + flags, 0); } /** @@ -490,18 +485,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) EXPORT_SYMBOL(ccw_device_get_id); /** - * ccw_device_tm_start_key() - perform start function + * ccw_device_tm_start_timeout_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key) +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) { struct subchannel *sch; int rc; @@ -528,37 +525,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, return -EACCES; } rc = cio_tm_start_key(sch, tcw, lpm, key); - if (rc == 0) + if (rc == 0) { cdev->private->intparm = intparm; + if (expires) + ccw_device_set_timeout(cdev, expires); + } return rc; } -EXPORT_SYMBOL(ccw_device_tm_start_key); +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); /** - * ccw_device_tm_start_timeout_key() - perform start function + * ccw_device_tm_start_key() - perform start function * @cdev: ccw device on which to perform the start function * @tcw: transport-command word to be started * @intparm: user defined parameter to be passed to the interrupt handler * @lpm: mask of paths to use * @key: storage key to use for storage access - * @expires: time span in jiffies after which to abort request * * Start the tcw on the given ccw device. Return zero on success, non-zero * otherwise. */ -int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, - unsigned long intparm, u8 lpm, u8 key, - int expires) +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) { - int ret; - - ccw_device_set_timeout(cdev, expires); - ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); - if (ret != 0) - ccw_device_set_timeout(cdev, 0); - return ret; + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); } -EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); +EXPORT_SYMBOL(ccw_device_tm_start_key); /** * ccw_device_tm_start() - perform start function diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925e7a8bb4f35db7e0a7c8aff1bdbf8..90e4e3a7841be1b23e833809587096dc9290f16e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -157,6 +157,7 @@ struct ccw_device_private { unsigned long intparm; /* user interruption parameter */ struct qdio_irq *qdio_data; struct irb irb; /* device status */ + int async_kill_io_rc; struct senseid senseid; /* SenseID info */ struct pgid pgid[8]; /* path group IDs per chpid*/ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 48b3866a9ded31401d986ea79a6d3f1629c08f74..35286907c636314d1358bb93e1b1575d6938150a 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) int i; for (i = 0; i < nr_queues; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); if (!q) return -ENOMEM; @@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) { struct ciw *ciw; struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; - int rc; memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); @@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data) ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); if (!ciw) { DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); - rc = -EINVAL; - goto out_err; + return -EINVAL; } irq_ptr->equeue = *ciw; ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); if (!ciw) { DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); - rc = -EINVAL; - goto out_err; + return -EINVAL; } irq_ptr->aqueue = *ciw; @@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data) irq_ptr->orig_handler = init_data->cdev->handler; init_data->cdev->handler = qdio_int_handler; return 0; -out_err: - qdio_release_memory(irq_ptr); - return rc; } void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 4a39b54732d031ec65e48bf0a6c6f1a9e9b4abea..72ce6ad95767b4d062c65d12eb308f277bbaa0e7 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -703,6 +703,10 @@ void cp_free(struct channel_program *cp) * and stores the result to ccwchain list. @cp must have been * initialized by a previous call with cp_init(). Otherwise, undefined * behavior occurs. + * For each chain composing the channel program: + * - On entry ch_len holds the count of CCWs to be translated. + * - On exit ch_len is adjusted to the count of successfully translated CCWs. + * This allows cp_free to find in ch_len the count of CCWs to free in a chain. * * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced * as helpers to do ccw chain translation inside the kernel. Basically @@ -737,11 +741,18 @@ int cp_prefetch(struct channel_program *cp) for (idx = 0; idx < len; idx++) { ret = ccwchain_fetch_one(chain, idx, cp); if (ret) - return ret; + goto out_err; } } return 0; +out_err: + /* Only cleanup the chain elements that were actually translated. */ + chain->ch_len = idx; + list_for_each_entry_continue(chain, &cp->ccwchain_list, next) { + chain->ch_len = 0; + } + return ret; } /** diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index e96b85579f21bdc791add2351449daf9dbc833db..3c800642134e4330d62bb8c0053df62618840ff3 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -129,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private, if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { orb = (union orb *)io_region->orb_area; + /* Don't try to build a cp if transport mode is specified. */ + if (orb->tm.b) { + io_region->ret_code = -EOPNOTSUPP; + goto err_out; + } io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), orb); if (io_region->ret_code) diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a8b831000b2d687b9608a9658ac90650c7131b8e..18c4f933e8b9a82c51fa20e113b6f6ca20566311 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -4,7 +4,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #define KMSG_COMPONENT "zfcp" @@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, spin_unlock_irqrestore(&dbf->rec_lock, flags); } +/** + * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock + * @tag: identifier for event + * @adapter: adapter on which the erp_action should run + * @port: remote port involved in the erp_action + * @sdev: scsi device involved in the erp_action + * @want: wanted erp_action + * @need: required erp_action + * + * The adapter->erp_lock must not be held. + */ +void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, + struct zfcp_port *port, struct scsi_device *sdev, + u8 want, u8 need) +{ + unsigned long flags; + + read_lock_irqsave(&adapter->erp_lock, flags); + zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need); + read_unlock_irqrestore(&adapter->erp_lock, flags); +} /** * zfcp_dbf_rec_run_lvl - trace event related to running recovery diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 8ca2ab7deaa9e471a2ce0afb3a3ed31127974b86..b1cbb14fb2ae531b6f9ca52860421304c9a03214 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -4,7 +4,7 @@ * * External function declarations. * - * Copyright IBM Corp. 2002, 2016 + * Copyright IBM Corp. 2002, 2018 */ #ifndef ZFCP_EXT_H @@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, struct zfcp_port *, struct scsi_device *, u8, u8); +extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev, u8 want, u8 need); extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); extern void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 4d2ba5682493221bf32f0c4000021da54dc57044..22f9562f415cbb09a098a83318818c49217a8237 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -4,7 +4,7 @@ * * Interface to Linux SCSI midlayer. * - * Copyright IBM Corp. 2002, 2017 + * Copyright IBM Corp. 2002, 2018 */ #define KMSG_COMPONENT "zfcp" @@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) ids.port_id = port->d_id; ids.roles = FC_RPORT_ROLE_FCP_TARGET; - zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, - ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); + zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); if (!rport) { dev_err(&port->adapter->ccw_device->dev, @@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) struct fc_rport *rport = port->rport; if (rport) { - zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, - ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); + zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); fc_remote_port_delete(rport); port->rport = NULL; } diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index c0a4fcb7fd0afdddb10d7e8dc8dfeeda265979be..998788a967be827106a4c789f35ba8a24e581487 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -752,6 +752,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, int wait; unsigned long flags = 0; unsigned long mflags = 0; + struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *) + fibptr->hw_fib_va; fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); if (callback) { @@ -762,11 +764,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, wait = 1; - if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { - struct aac_hba_cmd_req *hbacmd = - (struct aac_hba_cmd_req *)fibptr->hw_fib_va; + hbacmd->iu_type = command; - hbacmd->iu_type = command; + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { /* bit1 of request_id must be 0 */ hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); @@ -1530,9 +1530,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) host = aac->scsi_host_ptr; scsi_block_requests(host); aac_adapter_disable_int(aac); - if (aac->thread->pid != current->pid) { + if (aac->thread && aac->thread->pid != current->pid) { spin_unlock_irq(host->host_lock); kthread_stop(aac->thread); + aac->thread = NULL; jafo = 1; } @@ -1619,6 +1620,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) aac->name); if (IS_ERR(aac->thread)) { retval = PTR_ERR(aac->thread); + aac->thread = NULL; goto out; } } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 509fe23fafe1756e690138920cb9170199d323c5..4917649cacd50a6fa0759e49299b9a1028945f2e 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1565,6 +1565,7 @@ static void __aac_shutdown(struct aac_dev * aac) up(&fib->event_wait); } kthread_stop(aac->thread); + aac->thread = NULL; } aac_send_shutdown(aac); @@ -1690,8 +1691,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) * Map in the registers from the adapter. */ aac->base_size = AAC_MIN_FOOTPRINT_SIZE; - if ((*aac_drivers[index].init)(aac)) + if ((*aac_drivers[index].init)(aac)) { + error = -ENODEV; goto out_unmap; + } if (aac->sync_mode) { if (aac_sync_mode) diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 5b6153f23f01a88d0b93118e889223cf26c93e19..6626b28ba8fe64ec6c45c697422f590dc0d29eb8 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1865,6 +1865,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, /* we will not receive ABTS response for this IO */ BNX2FC_IO_DBG(io_req, "Timer context finished processing " "this scsi cmd\n"); + return; } /* Cancel the timeout_work, as we received IO completion */ diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 4d934d6c3e13adb26a91fec4dc665ed1f3f45bcb..e11eff6b0e97de8acc2229a24d0bfa611e781f44 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -952,6 +953,13 @@ static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) { + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn = session->leadconn; + + if (conn->datadgst_en) + sdev->request_queue->backing_dev_info->capabilities + |= BDI_CAP_STABLE_WRITES; blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY); blk_queue_dma_alignment(sdev->request_queue, 0); return 0; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 10b17da20176d93f1e9cc70ecf0ba0b1ebb5fdc6..0c4b186c852a3461c127d12c12efa5f93266fe8f 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -222,6 +222,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); + struct domain_device *dev = cmd_to_domain_dev(cmd); struct sas_task *task = TO_SAS_TASK(cmd); /* At this point, we only get called following an actual abort @@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) */ sas_end_task(cmd, task); + if (dev_is_sata(dev)) { + /* defer commands to libata so that libata EH can + * handle ata qcs correctly + */ + list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); + return; + } + /* now finish the command and move it on to the error * handler done list, this also takes it off the * error handler pending list. @@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); } -static void sas_eh_defer_cmd(struct scsi_cmnd *cmd) -{ - struct domain_device *dev = cmd_to_domain_dev(cmd); - struct sas_ha_struct *ha = dev->port->ha; - struct sas_task *task = TO_SAS_TASK(cmd); - - if (!dev_is_sata(dev)) { - sas_eh_finish_cmd(cmd); - return; - } - - /* report the timeout to libata */ - sas_end_task(cmd, task); - list_move_tail(&cmd->eh_entry, &ha->eh_ata_q); -} - static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) { struct scsi_cmnd *cmd, *n; @@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd list_for_each_entry_safe(cmd, n, error_q, eh_entry) { if (cmd->device->sdev_target == my_cmd->device->sdev_target && cmd->device->lun == my_cmd->device->lun) - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); } } @@ -630,12 +623,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * case TASK_IS_DONE: SAS_DPRINTK("%s: task 0x%p is done\n", __func__, task); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); continue; case TASK_IS_ABORTED: SAS_DPRINTK("%s: task 0x%p is aborted\n", __func__, task); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); continue; case TASK_IS_AT_LU: SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); @@ -646,7 +639,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * "recovered\n", SAS_ADDR(task->dev), cmd->device->lun); - sas_eh_defer_cmd(cmd); + sas_eh_finish_cmd(cmd); sas_scsi_clear_queue_lu(work_q, cmd); goto Again; } diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index dc6519b2c53ad48ed419062a2cefb35d2db6be65..3da242201cb4584b51370a0f43bd7801c8eed0cc 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -871,7 +871,12 @@ lpfc_issue_lip(struct Scsi_Host *shost) LPFC_MBOXQ_t *pmboxq; int mbxstatus = MBXERR_ERROR; + /* + * If the link is offline, disabled or BLOCK_MGMT_IO + * it doesn't make any sense to allow issue_lip + */ if ((vport->fc_flag & FC_OFFLINE_MODE) || + (phba->hba_flag & LINK_DISABLED) || (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)) return -EPERM; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d9a03beb76a4bb78abd84d0d4adb9545ab36acfc..4962d665b4d21f2275ede1847a28d252a9bbb6c7 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -698,8 +698,9 @@ lpfc_work_done(struct lpfc_hba *phba) phba->hba_flag & HBA_SP_QUEUE_EVT)) { if (pring->flag & LPFC_STOP_IOCB_EVENT) { pring->flag |= LPFC_DEFERRED_RING_EVENT; - /* Set the lpfc data pending flag */ - set_bit(LPFC_DATA_READY, &phba->data_flags); + /* Preserve legacy behavior. */ + if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) + set_bit(LPFC_DATA_READY, &phba->data_flags); } else { if (phba->link_state >= LPFC_LINK_UP || phba->link_flag & LS_MDS_LOOPBACK) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 455f3ce9fda941d5e2a8e528fedafe6c30c425bf..dc83498024dc38fddfd9ec65d9e4826e1fbec605 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -129,6 +129,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* set consumption flag every once in a while */ if (!((q->host_index + 1) % q->entry_repost)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); + else + bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 508ae4bc5ab51399cd8c34a22721a75ff4734742..66a798243e48908f93b1fec5345b9d8063d12cfb 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1921,8 +1921,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) continue; } - for_each_cpu(cpu, mask) + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } } return; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a1a7c45497f5fbef442dc95ea4dc2e70534eafbf..decf9d50142e3392b76d911adf16f4f1f455db39 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -8941,7 +8941,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event_%s%d", ioc->driver_name, ioc->id); ioc->firmware_event_thread = alloc_ordered_workqueue( - ioc->firmware_event_name, WQ_MEM_RECLAIM); + ioc->firmware_event_name, 0); if (!ioc->firmware_event_thread) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 7de5d8d75480f0d5934925d53556ca04f9b34510..eb5471bc72635c2d603c82188fef80ca6aad44df 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -1080,16 +1080,16 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, void __iomem *regs = mvi->regs_ex - 0x10200; int drive = (i/3) & (4-1); /* drive number on host */ - u32 block = mr32(MVS_SGPIO_DCTRL + + int driveshift = drive * 8; /* bit offset of drive */ + u32 block = ioread32be(regs + MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id); - /* * if bit is set then create a mask with the first * bit of the drive set in the mask ... */ - u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ? - 1<<(24-drive*8) : 0; + u32 bit = get_unaligned_be32(write_data) & (1 << i) ? + 1 << driveshift : 0; /* * ... and then shift it to the right position based @@ -1098,26 +1098,27 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, switch (i%3) { case 0: /* activity */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) - << (24-drive*8)); + << driveshift); /* hardwire activity bit to SOF */ block |= LED_BLINKA_SOF << ( MVS_SGPIO_DCTRL_ACT_SHIFT + - (24-drive*8)); + driveshift); break; case 1: /* id */ block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) - << (24-drive*8)); + << driveshift); block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; break; case 2: /* fail */ block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) - << (24-drive*8)); + << driveshift); block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; break; } - mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, - block); + iowrite32be(block, + regs + MVS_SGPIO_DCTRL + + MVS_SGPIO_HOST_OFFSET * mvi->id); } @@ -1132,7 +1133,7 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, void __iomem *regs = mvi->regs_ex - 0x10200; mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, - be32_to_cpu(((u32 *) write_data)[i])); + ((u32 *) write_data)[i]); } return reg_count; } diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 93d54acd4a22f70f7be57470056166f58ed69b0d..2e5e04a7623fad85f0746aa11c5eb62f0f9afc6a 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -769,6 +769,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, iscsi_cid = cqe->conn_id; qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; + if (!qedi_conn) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "icid not found 0x%x\n", cqe->conn_id); + return; + } /* Based on this itt get the corresponding qedi_cmd */ spin_lock_bh(&qedi_conn->tmf_work_lock); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cccc34adc0e02429417db29b0b124bf8de57051d..1573749fe615cb5941db790e3668759c40fec2ae 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1840,8 +1840,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) switch (type) { case ISCSI_BOOT_INI_INITIATOR_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", - initiator->initiator_name.byte); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + initiator->initiator_name.byte); break; default: rc = 0; @@ -1908,8 +1908,8 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, switch (type) { case ISCSI_BOOT_TGT_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", - block->target[idx].target_name.byte); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, + block->target[idx].target_name.byte); break; case ISCSI_BOOT_TGT_IP_ADDR: if (ipv6_en) @@ -1930,20 +1930,20 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, block->target[idx].lun.value[0]); break; case ISCSI_BOOT_TGT_CHAP_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", - chap_name); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + chap_name); break; case ISCSI_BOOT_TGT_CHAP_SECRET: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", - chap_secret); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + chap_secret); break; case ISCSI_BOOT_TGT_REV_CHAP_NAME: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", - mchap_name); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + mchap_name); break; case ISCSI_BOOT_TGT_REV_CHAP_SECRET: - rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", - mchap_secret); + rc = sprintf(str, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, + mchap_secret); break; case ISCSI_BOOT_TGT_FLAGS: rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d95b879c2bca5d4fdfb705235f59ba5d03ebb0f9..13a00a42b3ca637737b280de4939cf4af1e6a0e7 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Read all mbox registers? */ - mboxes = (1 << ha->mbx_count) - 1; + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); else @@ -2821,7 +2822,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Read all mbox registers? */ - mboxes = (1 << ha->mbx_count) - 1; + WARN_ON_ONCE(ha->mbx_count > 32); + mboxes = (1ULL << ha->mbx_count) - 1; if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); else diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8e7c0626f8b5fd743fbcca48dc7b385de39fb3bd..1be76695e6924331e7e16b57137de694eef3caae 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3581,6 +3581,8 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); + qla2x00_wait_for_sess_deletion(base_vha); + /* * if UNLOAD flag is already set, then continue unload, * where it was set first. diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index fc233717355fe22687678c805069478eddd99113..817f312023a999a3561794472c65c31c1ba18605 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -168,6 +168,8 @@ #define DEV_DB_NON_PERSISTENT 0 #define DEV_DB_PERSISTENT 1 +#define QL4_ISP_REG_DISCONNECT 0xffffffffU + #define COPY_ISID(dst_isid, src_isid) { \ int i, j; \ for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 64c6fa563fdb7562f6e33d83357879d8e572940e..a6aa08d9a171cb988a878a85ed279941c5b2ad0c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { static struct scsi_transport_template *qla4xxx_scsi_transport; +static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) +{ + u32 reg_val = 0; + int rval = QLA_SUCCESS; + + if (is_qla8022(ha)) + reg_val = readl(&ha->qla4_82xx_reg->host_status); + else if (is_qla8032(ha) || is_qla8042(ha)) + reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); + else + reg_val = readw(&ha->reg->ctrl_status); + + if (reg_val == QL4_ISP_REG_DISCONNECT) + rval = QLA_ERROR; + + return rval; +} + static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, uint32_t iface_type, uint32_t payload_size, uint32_t pid, struct sockaddr *dst_addr) @@ -9188,10 +9206,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; + int rval; ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", ha->host_no, id, lun, cmd, cmd->cmnd[0]); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); if (!srb) { @@ -9243,6 +9268,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int ret = FAILED, stat; + int rval; if (!ddb_entry) return ret; @@ -9262,6 +9288,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + /* FIXME: wait for hba to go online */ stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); if (stat != QLA_SUCCESS) { @@ -9305,6 +9337,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) struct scsi_qla_host *ha = to_qla_host(cmd->device->host); struct ddb_entry *ddb_entry = cmd->device->hostdata; int stat, ret; + int rval; if (!ddb_entry) return FAILED; @@ -9322,6 +9355,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + stat = qla4xxx_reset_target(ha, ddb_entry); if (stat != QLA_SUCCESS) { starget_printk(KERN_INFO, scsi_target(cmd->device), @@ -9376,9 +9415,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) { int return_status = FAILED; struct scsi_qla_host *ha; + int rval; ha = to_qla_host(cmd->device->host); + rval = qla4xxx_isp_check_reg(ha); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); + return FAILED; + } + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 961d652ff0a4fd6098f1b1161fb48c05019b0608..60345cb4e414a469b4ca45288a1d0193b8e9ca67 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -859,6 +859,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) /* for passthrough error may be set */ error = BLK_STS_OK; } + /* + * Another corner case: the SCSI status byte is non-zero but 'good'. + * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when + * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD + * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related + * intermediate statuses (both obsolete in SAM-4) as good. + */ + if (status_byte(result) && scsi_status_is_good(result)) { + result = 0; + error = BLK_STS_OK; + } /* * special case: failed zero length commands always need to diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 36f6190931bc07f194621ff2a51633c83561fa51..456ce9f19569f325dfdcf671fc13c24a36016788 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -51,6 +51,8 @@ struct srp_internal { struct transport_container rport_attr_cont; }; +static int scsi_is_srp_rport(const struct device *dev); + #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) #define dev_to_rport(d) container_of(d, struct srp_rport, dev) @@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) return dev_to_shost(r->dev.parent); } +static int find_child_rport(struct device *dev, void *data) +{ + struct device **child = data; + + if (scsi_is_srp_rport(dev)) { + WARN_ON_ONCE(*child); + *child = dev; + } + return 0; +} + static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) { - return transport_class_to_srp_rport(&shost->shost_gendev); + struct device *child = NULL; + + WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child, + find_child_rport) < 0); + return child ? dev_to_rport(child) : NULL; } /** @@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) struct srp_rport *rport = shost_to_rport(shost); pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); - return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && + return rport && rport->fast_io_fail_tmo < 0 && + rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 26e9dc2e0019b4005a2eac54bc741856cc54a803..34405200fccd325bcc12ac714fc2dab784281111 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2528,6 +2528,8 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) int res; struct scsi_device *sdp = sdkp->device; struct scsi_mode_data data; + int disk_ro = get_disk_ro(sdkp->disk); + int old_wp = sdkp->write_prot; set_disk_ro(sdkp->disk, 0); if (sdp->skip_ms_page_3f) { @@ -2567,7 +2569,12 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); - set_disk_ro(sdkp->disk, sdkp->write_prot); + set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); + if (sdkp->first_scan || old_wp != sdkp->write_prot) { + sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", + sdkp->write_prot ? "on" : "off"); + sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer); + } } } diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 2eb1ac6da5302adf87864647b3985bcca72d3b25..d374b16c9a6112877c9c4ce359e58d7ec647ae22 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1895,7 +1895,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; - schp->pages[k] = alloc_pages(gfp_mask, order); + schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order); if (!schp->pages[k]) goto out; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 9be34d37c3567169770563304dd4a927e22b4bc9..3f3cb72e0c0cdab6a76ea8c4057229f76924899c 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) struct scsi_cd *cd; int ret = -ENXIO; + check_disk_change(bdev); + mutex_lock(&sr_mutex); cd = scsi_cd_get(bdev->bd_disk); if (cd) { @@ -585,18 +587,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_cd *cd = scsi_cd(disk); + unsigned int ret = 0; + struct scsi_cd *cd; - if (atomic_read(&cd->device->disk_events_disable_depth)) + cd = scsi_cd_get(disk); + if (!cd) return 0; - return cdrom_check_events(&cd->cdi, clearing); + if (!atomic_read(&cd->device->disk_events_disable_depth)) + ret = cdrom_check_events(&cd->cdi, clearing); + + scsi_cd_put(cd); + return ret; } static int sr_block_revalidate_disk(struct gendisk *disk) { - struct scsi_cd *cd = scsi_cd(disk); struct scsi_sense_hdr sshdr; + struct scsi_cd *cd; + + cd = scsi_cd_get(disk); + if (!cd) + return -ENXIO; /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) @@ -605,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk) sr_cd_check(&cd->cdi); get_sectorsize(cd); out: + scsi_cd_put(cd); return 0; } diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 2a21f2d4859229693381e955f141beff3fa64fea..35fab1e18adc3414935b182fe1774c911d733291 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; SDev = cd->device; + if (cgc->sense) + senseptr = sense_buffer; + retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; @@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) } result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, - cgc->buffer, cgc->buflen, - (unsigned char *)cgc->sense, &sshdr, + cgc->buffer, cgc->buflen, senseptr, &sshdr, cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); + if (cgc->sense) + memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); + /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { switch (sshdr.sense_key) { diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index a3e480e7a257c4fa15561a48bb9e235df2c4c7fe..c44de0b4a995a9d5b546ecfac4f435f8b448680e 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1661,7 +1661,7 @@ static struct scsi_host_template scsi_driver = { .eh_timed_out = storvsc_eh_timed_out, .slave_alloc = storvsc_device_alloc, .slave_configure = storvsc_device_configure, - .cmd_per_lun = 255, + .cmd_per_lun = 2048, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ca360daa6a253c7a9a0f4f29eaeaf5a49a1a8c98..378af306fda1748d8f587f466bb83fa34dbc9b5c 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa * Look for the greatest clock divisor that allows an * input speed faster than the period. */ - while (div-- > 0) + while (--div > 0) if (kpc >= (div_10M[div] << 2)) break; /* diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 841f01104ba8de76f9bde2ab24fb3d78d407360b..d759ddc272e43bd805005d2db5a40644f4dd17a7 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -589,6 +589,36 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, update_link_startup_timer, false); } +static int ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(struct ufs_hba *hba) +{ + struct ufs_clk_info *clki; + struct list_head *head = &hba->clk_list_head; + u32 max_freq = 0; + int err = 0; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk) && + (!strcmp(clki->name, "core_clk_unipro"))) { + max_freq = clki->max_freq; + break; + } + } + + switch (max_freq) { + case 300000000: + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 300, 12); + break; + case 150000000: + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150, 6); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -609,10 +639,7 @@ static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba) goto out; if (ufs_qcom_cap_qunipro(host)) { - /* - * set unipro core clock cycles to 150 & clear clock divider - */ - err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150, 6); + err = ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(hba); if (err) goto out; } @@ -1552,7 +1579,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - int err; + int err = 0; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -2325,9 +2352,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_pa_layer_attr *attr = &host->dev_req_params; int err = 0; - struct ufs_clk_info *clki; - struct list_head *head = &hba->clk_list_head; - u32 max_freq = 0; if (!ufs_qcom_cap_qunipro(host)) goto out; @@ -2336,25 +2360,7 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx, attr->hs_rate, false, true); - list_for_each_entry(clki, head, list) { - if (!IS_ERR_OR_NULL(clki->clk) && - (!strcmp(clki->name, "core_clk_unipro"))) { - max_freq = clki->max_freq; - break; - } - } - - switch (max_freq) { - case 300000000: - err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 300, 12); - break; - case 150000000: - err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150, 6); - break; - default: - err = -EINVAL; - break; - } + err = ufs_qcom_set_dme_vs_core_clk_ctrl_max_freq_mode(hba); out: return err; } @@ -2547,7 +2553,7 @@ bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, int ufs_qcom_testbus_config(struct ufs_qcom_host *host) { int reg = 0; - int offset, ret = 0, testbus_sel_offset = 19; + int offset = -1, ret = 0, testbus_sel_offset = 19; u32 mask = TEST_BUS_SUB_SEL_MASK; unsigned long flags; struct ufs_hba *hba; @@ -2612,6 +2618,13 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) * is legal */ } + + if (offset < 0) { + dev_err(hba->dev, "%s: Bad offset: %d\n", __func__, offset); + ret = -EINVAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto out; + } mask <<= offset; spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -2674,7 +2687,7 @@ static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba) return; host->testbus.select_major = TSTBUS_UTP_HCI; - for (i = 0; i <= nminor; i++) { + for (i = 0; i < nminor; i++) { host->testbus.select_minor = i; ufs_qcom_testbus_config(host); testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f34cec37711cfc7710715773cde518c759948230..9dbc1d97339aabf537c239509b32968a8a5978bf 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5946,7 +5946,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* REPORT SUPPORTED OPERATION CODES is not supported */ sdev->no_report_opcodes = 1; - /* WRITE_SAME command is not supported*/ + /* WRITE_SAME command is not supported */ sdev->no_write_same = 1; ufshcd_set_queue_depth(sdev); diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h index ca32454f3f0028f06a21d25b488909b58299460d..44a6759ce4f3172592d162cd59f3761e256c0c42 100644 --- a/drivers/slimbus/slim-msm.h +++ b/drivers/slimbus/slim-msm.h @@ -43,7 +43,7 @@ #define MSM_SLIM_VE_MAX_MAP_ADDR 0xFFF #define SLIM_MAX_VE_SLC_BYTES 16 -#define MSM_SLIM_AUTOSUSPEND MSEC_PER_SEC +#define MSM_SLIM_AUTOSUSPEND (MSEC_PER_SEC / 10) #define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000 /* diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 47e7aa963dbb1f8ff5c9b3b7e41c4ef92c7aba26..1613ccf0c0591921bd044151fb9da9778f700023 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -456,13 +456,21 @@ static int imx_gpc_probe(struct platform_device *pdev) static int imx_gpc_remove(struct platform_device *pdev) { + struct device_node *pgc_node; int ret; + pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); + + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && + !pgc_node) + return 0; + /* * If the old DT binding is used the toplevel driver needs to * de-register the power domains */ - if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { + if (!pgc_node) { of_genpd_del_provider(pdev->dev.of_node); ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c index 8d8659463b3e85f4e1513007dde398ced1bc3e6a..feeb17cebc25e98973dd73160d2118c5cc205dcb 100644 --- a/drivers/soc/lantiq/gphy.c +++ b/drivers/soc/lantiq/gphy.c @@ -30,7 +30,6 @@ struct xway_gphy_priv { struct clk *gphy_clk_gate; struct reset_control *gphy_reset; struct reset_control *gphy_reset2; - struct notifier_block gphy_reboot_nb; void __iomem *membase; char *fw_name; }; @@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = { }; MODULE_DEVICE_TABLE(of, xway_gphy_match); -static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb) -{ - return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb); -} - -static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb, - unsigned long code, void *unused) -{ - struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb); - - if (priv) { - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); - } - - return NOTIFY_DONE; -} - static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, dma_addr_t *dev_addr) { @@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev) reset_control_deassert(priv->gphy_reset); reset_control_deassert(priv->gphy_reset2); - /* assert the gphy reset because it can hang after a reboot: */ - priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify; - priv->gphy_reboot_nb.priority = -1; - - ret = register_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to register reboot notifier\n"); - platform_set_drvdata(pdev, priv); return ret; @@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev) static int xway_gphy_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; struct xway_gphy_priv *priv = platform_get_drvdata(pdev); - int ret; - - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); iowrite32be(0, priv->membase); clk_disable_unprepare(priv->gphy_clk_gate); - ret = unregister_reboot_notifier(&priv->gphy_reboot_nb); - if (ret) - dev_warn(dev, "Failed to unregister reboot notifier\n"); - return 0; } diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 0dd0adc4e03f393cf8188774d28fdcffcdf98ea1..4ddf289ab1699640e0555d6189f64d6c0b0236ad 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -2,6 +2,25 @@ # QCOM Soc drivers # menu "Qualcomm SoC drivers" +config QCOM_MEM_OFFLINE + bool "Dynamic Memory Region Offline driver" + help + Add support for DDR Self-Refresh power management through the dynamic + memory offline framework. This driver interfaces between the memory + hotplug subsystem and AOP which hot adds or removes memory blocks and + controls the start/stop of self-refresh of these DDR regions. This + helps reduce power consumption during idle mode of the system. + If unsure, say N + +config OVERRIDE_MEMORY_LIMIT + bool "Override memory limit set by the kernel boot parameter" + depends on QCOM_MEM_OFFLINE + help + Override any memory limit set by the kernel boot parameter with + limit set by mem-offline dt entry so that memory offline framework + can initialize remaining memory with movable pages for memory + hot-plugging. + If unsure, say N config QCOM_CPUSS_DUMP bool "CPU Subsystem Dumping support" @@ -75,6 +94,14 @@ config QCOM_SM6150_LLCC data required to configure LLCC so that clients can start using the LLCC slices. +config QCOM_SDMMAGPIE_LLCC + tristate "Qualcomm Technologies, Inc. SDMMAGPIE LLCC driver" + depends on QCOM_LLCC + help + Say yes here to enable the LLCC driver for SDMMAGPIE. This is provides + data required to configure LLCC so that clients can start using the + LLCC slices. + config QCOM_LLCC_AMON tristate "Qualcomm Technologies, Inc. LLCC Activity Monitor(AMON) driver" depends on QCOM_LLCC @@ -93,6 +120,16 @@ config QCOM_LLCC_AMON_PANIC deadlock detection mode AMON will trigger an interrupt if some LLCC request ages out. +config QCOM_LLCC_PERFMON + tristate "Qualcomm Technologies, Inc. LLCC Perfmon driver" + depends on QCOM_LLCC + help + This option enables driver for LLCC Performance monitor block. Using + this various events in different LLCC sub block ports can be monitored. + This is used for performance and debug activity and exports SYSFS + interface. SYSFS interface used for configure and dump the LLCC + performance events. + config QCOM_PM bool "Qualcomm Power Management" depends on ARCH_QCOM && !ARM64 @@ -112,6 +149,26 @@ config QCOM_QMI_HELPERS clients and this helpers provide the common functionality needed for doing this from a kernel driver. +config QCOM_QMI_DFC + bool "Enable burst mode flow control" + depends on QCOM_QMI_HELPERS + depends on RMNET + help + Say y here to enable support for burst mode data flow control. + DFC client provides an interface to the modem dfc service and + does burst mode flow control. It enables the flow on receiving flow + status indication and disables flows while grant size is reached. + If unsure or not use burst mode flow control, say 'N'. + +config QCOM_QMI_POWER_COLLAPSE + bool "Enable power collapse feature" + depends on QCOM_QMI_DFC + help + Say y here to enable support for power collapse. + It is to register/unregister the flow status indication callback + based on detected flow status. + If unsure or not use power collapse feature, say 'N'. + config QCOM_SMEM tristate "Qualcomm Shared Memory Manager (SMEM)" depends on ARCH_QCOM @@ -557,7 +614,7 @@ config MSM_JTAGV8 config QCOM_QDSS_BRIDGE bool "Configure bridge driver for QTI/Qualcomm Technologies, Inc. MDM" - depends on MSM_MHI + depends on MHI_BUS help The driver will help route diag traffic from modem side over the QDSS sub-system to USB on APSS side. The driver acts as a bridge between the diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index e51110f3dac13a1da265465710c2a615c274ffa7..4a8af4747d08e637b43f99af9cf342ddbdbc63c7 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -8,11 +8,15 @@ obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o obj-$(CONFIG_QCOM_SM8150_LLCC) += llcc-sm8150.o obj-$(CONFIG_QCOM_SDMSHRIKE_LLCC) += llcc-sdmshrike.o obj-$(CONFIG_QCOM_SM6150_LLCC) += llcc-sm6150.o +obj-$(CONFIG_QCOM_LLCC_PERFMON) += llcc_perfmon.o +obj-$(CONFIG_QCOM_SDMMAGPIE_LLCC) += llcc-sdmmagpie.o obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o obj-$(CONFIG_QCOM_PM) += spm.o obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_helpers-y += qmi_interface.o +obj-$(CONFIG_QCOM_QMI_DFC) += qmi_rmnet.o +obj-$(CONFIG_QCOM_QMI_DFC) += dfc_qmi.o obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o @@ -85,3 +89,4 @@ ifdef CONFIG_DEBUG_FS obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o endif obj-$(CONFIG_QCOM_SMP2P_SLEEPSTATE) += smp2p_sleepstate.o +obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c new file mode 100644 index 0000000000000000000000000000000000000000..dea5f5c07c50129b3335818235973bb0573c073c --- /dev/null +++ b/drivers/soc/qcom/dfc_qmi.c @@ -0,0 +1,952 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "qmi_rmnet_i.h" + +#define DFC_MAX_BEARERS_V01 16 +#define DFC_MAX_QOS_ID_V01 2 +#define DEFAULT_FLOW_ID 0 + +/* bearer list update result */ +#define NO_BEARER 0 +#define NO_CHANGE 1 +#define UPDATED 2 + +struct qmap_header { + uint8_t pad_len:6; + uint8_t reserved_bit:1; + uint8_t cd_bit:1; + uint8_t mux_id; + __be16 pkt_len; +} __aligned(1); + +struct dfc_ack_cmd { + struct qmap_header header; + uint8_t command_name; + uint8_t cmd_type:2; + uint8_t reserved:6; + uint16_t reserved2; + uint32_t transaction_id; + uint8_t qos_ver:2; + uint8_t reserved3:6; + uint8_t qos_type:2; + uint8_t reserved4:6; + uint16_t dfc_seq; + uint8_t reserved5[3]; + uint8_t bearer_id; +} __aligned(1); + +struct dfc_qos_ids { + uint32_t qos_id_valid; + uint32_t qos_id; +}; + +struct dfc_work { + struct work_struct work; + struct net_device *dev; + uint8_t bearer_id; + uint8_t ack_req; + uint16_t seq; + uint8_t mux_id; +}; + +struct dfc_qmi_data { + void *rmnet_port; + struct workqueue_struct *dfc_wq; + struct work_struct svc_arrive; + struct qmi_handle handle; + struct sockaddr_qrtr ssctl; + int modem; +}; + +struct dfc_svc_ind { + struct work_struct work; + struct dfc_qmi_data *data; + void *dfc_info; +}; + +#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE +struct dfc_ind_reg { + struct work_struct work; + struct dfc_qmi_data *data; + int reg; +}; +static void dfc_ind_reg_dereg(struct work_struct *work); +#endif + +static void dfc_svc_init(struct work_struct *work); +static void dfc_do_burst_flow_control(struct work_struct *work); +static void dfc_disable_flow(struct work_struct *work); + +/* **************************************************** */ +#define DFC_SERVICE_ID_V01 0x4E +#define DFC_SERVICE_VERS_V01 0x01 +#define DFC_TIMEOUT_MS 10000 + +#define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020 +#define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020 +#define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11 +#define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7 + +#define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001 +#define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001 +#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 4 +#define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7 + +#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022 +#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 471 + +struct dfc_bind_client_req_msg_v01 { + uint8_t ep_id_valid; + struct data_ep_id_type_v01 ep_id; +}; + +struct dfc_bind_client_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct dfc_indication_register_req_msg_v01 { + uint8_t report_flow_status_valid; + uint8_t report_flow_status; +}; + +struct dfc_indication_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +enum dfc_ip_type_enum_v01 { + DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + DFC_IPV4_TYPE_V01 = 0x4, + DFC_IPV6_TYPE_V01 = 0x6, + DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 +}; + +struct dfc_qos_id_type_v01 { + uint32_t qos_id; + enum dfc_ip_type_enum_v01 ip_type; +}; + +struct dfc_flow_status_info_type_v01 { + uint8_t subs_id; + uint8_t mux_id; + uint8_t bearer_id; + uint32_t num_bytes; + uint16_t seq_num; + uint8_t qos_ids_len; + struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01]; +}; + +static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct dfc_qos_id_type_v01, + qos_id), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum dfc_ip_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct dfc_qos_id_type_v01, + ip_type), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + subs_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + mux_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + bearer_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + num_bytes), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + seq_num), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + qos_ids_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_QOS_ID_V01, + .elem_size = sizeof(struct dfc_qos_id_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + qos_ids), + .ei_array = dfc_qos_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +/** + * Indication Message; Gives the flow status to control points + * that have registered for this event reporting. + */ +struct dfc_flow_status_ind_msg_v01 { + uint8_t flow_status_valid; + uint8_t flow_status_len; + struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; + uint8_t eod_ack_reqd_valid; + uint8_t eod_ack_reqd; +}; + +static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct dfc_bind_client_req_msg_v01, + ep_id_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct data_ep_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct dfc_bind_client_req_msg_v01, + ep_id), + .ei_array = data_ep_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct dfc_bind_client_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_flow_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_flow_status), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + dfc_indication_register_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_flow_status_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status), + .ei_array = dfc_flow_status_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + eod_ack_reqd_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + eod_ack_reqd), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static int +dfc_bind_client_req(struct qmi_handle *dfc_handle, + struct sockaddr_qrtr *ssctl, struct svc_info *svc) +{ + struct dfc_bind_client_resp_msg_v01 *resp; + struct dfc_bind_client_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + /* Prepare req and response message */ + req->ep_id_valid = 1; + req->ep_id.ep_type = svc->ep_type; + req->ep_id.iface_id = svc->iface_id; + + ret = qmi_txn_init(dfc_handle, &txn, + dfc_bind_client_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("Fail to init txn for bind client resp %d\n", ret); + goto out; + } + + ret = qmi_send_request(dfc_handle, ssctl, &txn, + QMI_DFC_BIND_CLIENT_REQ_V01, + QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN, + dfc_bind_client_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("Fail to send bind client req %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS); + if (ret < 0) { + pr_err("bind client resp wait failed ret %d\n", ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("QMI bind client request rejected, result:%d err:%d\n", + resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static int +dfc_indication_register_req(struct qmi_handle *dfc_handle, + struct sockaddr_qrtr *ssctl, uint8_t reg) +{ + struct dfc_indication_register_resp_msg_v01 *resp; + struct dfc_indication_register_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + /* Prepare req and response message */ + req->report_flow_status_valid = 1; + req->report_flow_status = reg; + + ret = qmi_txn_init(dfc_handle, &txn, + dfc_indication_register_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init txn for resp %d\n", __func__, ret); + goto out; + } + + ret = qmi_send_request(dfc_handle, ssctl, &txn, + QMI_DFC_INDICATION_REGISTER_REQ_V01, + QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN, + dfc_indication_register_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("%s() Fail to send indication register req %d\n", + __func__, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS); + if (ret < 0) { + pr_err("%s() resp wait failed ret:%d\n", __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() rejected, result:%d error:%d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi) +{ + int rc; + + rc = dfc_bind_client_req(&data->handle, &data->ssctl, + &qmi->fc_info[data->modem].svc); + if (rc < 0) + return rc; + + return dfc_indication_register_req(&data->handle, &data->ssctl, 1); +} + +static int dfc_disable_bearer_flows(struct net_device *dev, uint8_t bearer_id) +{ + struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); + struct list_head *p; + struct rmnet_flow_map *itm; + int rc = 0; + + if (!qos) + return 0; + + list_for_each(p, &qos->flow_head) { + itm = list_entry(p, struct rmnet_flow_map, list); + + if (unlikely(!itm)) + return 0; + + if (itm->bearer_id == bearer_id) { + rtnl_lock(); + tc_qdisc_flow_control(dev, itm->tcm_handle, 0); + rtnl_unlock(); + rc++; + } + } + return rc; +} + +static int dfc_update_fc_map(struct qos_info *qos, uint8_t ack_req, + struct dfc_flow_status_info_type_v01 *fc_info) +{ + struct rmnet_bearer_map *itm = NULL; + unsigned long flags; + int rc = NO_BEARER; + + write_lock_irqsave(&qos->flow_map_lock, flags); + itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id); + if (itm) { + if ((itm->grant_size == fc_info->num_bytes) && + (itm->counter > 0)) { + /*flow is enabled and grant_size is the same*/ + rc = NO_CHANGE; + } else { + itm->grant_size = fc_info->num_bytes; + itm->seq = fc_info->seq_num; + itm->ack_req = ack_req; + rc = UPDATED; + } + itm->counter = 0; + } + write_unlock_irqrestore(&qos->flow_map_lock, flags); + return rc; +} + +static int dfc_do_fc(struct net_device *dev, uint32_t flow_id, + int ip_type, int enable) +{ + struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); + struct rmnet_flow_map *itm = NULL; + int len = 0; + + if (!qos) + return 0; + + itm = qmi_rmnet_get_flow_map(qos, flow_id, ip_type); + if (itm) { + rtnl_lock(); + len = tc_qdisc_flow_control(dev, itm->tcm_handle, enable); + rtnl_unlock(); + } + return len; +} + +static void dfc_disable_flow(struct work_struct *work) +{ + struct dfc_work *data = (struct dfc_work *)work; + int rc = dfc_disable_bearer_flows(data->dev, data->bearer_id); + + pr_debug("%s() %d flows disabled\n", __func__, rc); + kfree(data); +} + +static void dfc_do_flow_controls(struct net_device *dev, + struct dfc_flow_status_info_type_v01 *flow) +{ + int i; + int enable = (flow->num_bytes > 0) ? 1 : 0; + int qdisc_len; + + for (i = 0; i < flow->qos_ids_len; i++) { + /* do flow control per specified flow */ + if (flow->qos_ids[i].ip_type == DFC_IPV4_TYPE_V01) { + qdisc_len = dfc_do_fc(dev, flow->qos_ids[i].qos_id, + AF_INET, enable); + pr_debug("%s() qdisc_len=%d\n", __func__, qdisc_len); + } else if (flow->qos_ids[i].ip_type == DFC_IPV6_TYPE_V01) { + qdisc_len = dfc_do_fc(dev, flow->qos_ids[i].qos_id, + AF_INET6, enable); + } else { + pr_err("%s() ip type[%d] not supported\n", + __func__, flow->qos_ids[i].ip_type); + } + } +} + +static void dfc_do_burst_flow_control(struct work_struct *work) +{ + struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work; + struct dfc_flow_status_ind_msg_v01 *ind = + (struct dfc_flow_status_ind_msg_v01 *)svc_ind->dfc_info; + struct net_device *dev; + struct qos_info *qos; + struct dfc_flow_status_info_type_v01 *flow_status; + uint8_t ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; + int i, rc; + + for (i = 0; i < ind->flow_status_len; i++) { + flow_status = &ind->flow_status[i]; + dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port, + flow_status->mux_id); + if (!dev) + goto clean_out; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + continue; + + rc = dfc_update_fc_map(qos, ack_req, flow_status); + if (rc == NO_BEARER) { + pr_debug("%s: num_bytes[%u]\n", + __func__, flow_status->num_bytes); + qos->default_grant = flow_status->num_bytes; + continue; + } else if (rc == NO_CHANGE) { + continue; + } else { + if ((flow_status->num_bytes > 0) || + (flow_status->bearer_id != 0xFF)) + dfc_do_flow_controls(dev, flow_status); + else + netif_stop_queue(dev); + + } + } + +clean_out: + kfree(ind); + kfree(svc_ind); +} + +static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data, + handle); + struct dfc_flow_status_ind_msg_v01 *ind_msg; + struct dfc_svc_ind *svc_ind; + + if (qmi != &dfc->handle) { + pr_err("Wrong client\n"); + return; + } + + ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data; + if (ind_msg->flow_status_valid) { + if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) { + pr_err("Invalid fc info len: %d\n", + ind_msg->flow_status_len); + return; + } + + svc_ind = kmalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC); + if (!svc_ind) + return; + + INIT_WORK((struct work_struct *)svc_ind, + dfc_do_burst_flow_control); + svc_ind->dfc_info = kmalloc(sizeof(*ind_msg), GFP_ATOMIC); + if (!svc_ind->dfc_info) { + kfree(svc_ind); + return; + } + + memcpy(svc_ind->dfc_info, ind_msg, sizeof(*ind_msg)); + svc_ind->data = dfc; + queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind); + } +} + +static void dfc_svc_init(struct work_struct *work) +{ + int rc = 0; + struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data, + svc_arrive); + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port); + if (!qmi) { + qmi_handle_release(&data->handle); + return; + } + + /* Request indication from modem service */ + rc = dfc_init_service(data, qmi); + if (rc < 0) { + qmi_handle_release(&data->handle); + return; + } + + qmi->fc_info[data->modem].dfc_client = (void *)data; + pr_debug("Connection established with the DFC Service\n"); +} + +static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data, + handle); + + data->ssctl.sq_family = AF_QIPCRTR; + data->ssctl.sq_node = svc->node; + data->ssctl.sq_port = svc->port; + + queue_work(data->dfc_wq, &data->svc_arrive); + + return 0; +} + +static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data, + handle); + struct qmi_info *qmi_pt; + int modem; + + pr_debug("Connection with DFC service lost\n"); + qmi_pt = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port); + if (qmi_pt) { + for (modem = 0; modem < 2; modem++) { + if (qmi_pt->fc_info[modem].dfc_client == (void *)data) + qmi_pt->fc_info[modem].dfc_client = NULL; + break; + } + } + destroy_workqueue(data->dfc_wq); + kfree(data); +} + +static struct qmi_ops server_ops = { + .new_server = dfc_svc_arrive, + .del_server = dfc_svc_exit, +}; + +static struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_DFC_FLOW_STATUS_IND_V01, + .ei = dfc_flow_status_ind_v01_ei, + .decoded_size = QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN, + .fn = dfc_clnt_ind_cb, + }, + {}, +}; + +/* **************************************************** */ +int dfc_qmi_client_init(void *port, int modem) +{ + struct qmi_info *qmi = rmnet_get_qmi_pt(port); + struct dfc_qmi_data *data; + int rc = 0; + + if (!qmi) + return -EINVAL; + + data = kmalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + memset(data, 0, sizeof(struct dfc_qmi_data)); + data->rmnet_port = port; + data->modem = modem; + + data->dfc_wq = create_singlethread_workqueue("dfc_wq"); + if (!data->dfc_wq) { + pr_err("%s Could not create workqueue\n", __func__); + kfree(data); + return -ENOMEM; + } + INIT_WORK(&data->svc_arrive, dfc_svc_init); + rc = qmi_handle_init(&data->handle, + QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN, + &server_ops, qmi_indication_handler); + if (rc < 0) { + pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc); + kfree(data); + return rc; + } + + rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01, + DFC_SERVICE_VERS_V01, + qmi->fc_info[modem].svc.instance); + if (rc < 0) { + pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc); + qmi_handle_release(&data->handle); + } + + return rc; +} + +void dfc_qmi_client_exit(void *dfc_data) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + + if (!data) + return; + + qmi_handle_release(&data->handle); +} + +void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, + struct sk_buff *skb) +{ + struct dfc_work *svc_check; + struct rmnet_bearer_map *bearer; + struct rmnet_flow_map *itm; + unsigned long flags; + int ip_type; + + if (!qos || !skb) + return; + + ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET; + write_lock_irqsave(&qos->flow_map_lock, flags); + itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type); + + if (!itm) { + write_unlock_irqrestore(&qos->flow_map_lock, flags); + } else { + bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id); + if (unlikely(!bearer)) { + write_unlock_irqrestore(&qos->flow_map_lock, flags); + return; + } + bearer->counter += skb->len; + if (bearer->counter < bearer->grant_size) { + write_unlock_irqrestore(&qos->flow_map_lock, flags); + } else { + bearer->counter = 0; + write_unlock_irqrestore(&qos->flow_map_lock, flags); + + svc_check = kmalloc(sizeof(struct dfc_work), + GFP_ATOMIC); + if (!svc_check) + return; + + INIT_WORK((struct work_struct *)svc_check, + dfc_disable_flow); + svc_check->dev = dev; + svc_check->bearer_id = bearer->bearer_id; + svc_check->ack_req = bearer->ack_req; + svc_check->seq = bearer->seq; + svc_check->mux_id = qos->mux_id; + schedule_work((struct work_struct *)svc_check); + } + } +} + +#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE +static void dfc_ind_reg_dereg(struct work_struct *work) +{ + struct dfc_ind_reg *ind_reg = (struct dfc_ind_reg *)work; + + dfc_indication_register_req(&ind_reg->data->handle, + &ind_reg->data->ssctl, ind_reg->reg); + kfree(ind_reg); +} + +int dfc_reg_unreg_fc_ind(void *dfc_data, int reg) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + struct dfc_ind_reg *ind_reg; + + if (!data) + return -EINVAL; + + ind_reg = kmalloc(sizeof(struct dfc_ind_reg), GFP_ATOMIC); + if (!ind_reg) + return -ENOMEM; + + INIT_WORK((struct work_struct *)ind_reg, dfc_ind_reg_dereg); + ind_reg->data = data; + ind_reg->reg = reg; + schedule_work((struct work_struct *)ind_reg); + return 0; +} +#endif /*defed CONFIG_QCOM_QMI_POWER_COLLAPSE*/ diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 759db12ba16497db71104552e6f3269188931b5c..c8abef8630700f0575b3e89188b1147b5d905477 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -602,8 +602,9 @@ bool icnss_is_fw_down(void) { if (!penv) return false; - else - return test_bit(ICNSS_FW_DOWN, &penv->state); + + return test_bit(ICNSS_FW_DOWN, &penv->state) || + test_bit(ICNSS_PD_RESTART, &penv->state); } EXPORT_SYMBOL(icnss_is_fw_down); diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c index 55ae54514dccf0d7a56b85df28525bf6fb0dfdac..d3815ab251f5bc460c6b0a832a39dd424b90c3fa 100644 --- a/drivers/soc/qcom/icnss_qmi.c +++ b/drivers/soc/qcom/icnss_qmi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,6 +10,8 @@ * GNU General Public License for more details. */ +#define pr_fmt(fmt) "icnss_qmi: " fmt + #include #include #include @@ -58,6 +60,11 @@ void icnss_ignore_fw_timeout(bool ignore) void icnss_ignore_fw_timeout(bool ignore) { } #endif +#define icnss_qmi_fatal_err(_fmt, ...) do { \ + icnss_pr_err("fatal: "_fmt, ##__VA_ARGS__); \ + ICNSS_QMI_ASSERT(); \ + } while (0) + int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv) { int ret; @@ -89,7 +96,7 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_msa_info_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for MSA Mem info resp %d\n", + icnss_qmi_fatal_err("Fail to init txn for MSA Mem info resp %d\n", ret); goto out; } @@ -100,16 +107,17 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv) wlfw_msa_info_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send MSA Mem info req %d\n", ret); + icnss_qmi_fatal_err("Fail to send MSA Mem info req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("MSA Mem info resp wait failed ret %d\n", ret); + icnss_qmi_fatal_err("MSA Mem info resp wait failed ret %d\n", + ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI MSA Mem info request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI MSA Mem info request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -119,7 +127,7 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv) resp->mem_region_info_len); if (resp->mem_region_info_len > QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01) { - icnss_pr_err("Invalid memory region length received: %d\n", + icnss_qmi_fatal_err("Invalid memory region length received: %d\n", resp->mem_region_info_len); ret = -EINVAL; goto out; @@ -148,7 +156,6 @@ int wlfw_msa_mem_info_send_sync_msg(struct icnss_priv *priv) kfree(resp); kfree(req); priv->stats.msa_info_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -180,7 +187,7 @@ int wlfw_msa_ready_send_sync_msg(struct icnss_priv *priv) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_msa_ready_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for MSA Mem Ready resp %d\n", + icnss_qmi_fatal_err("Fail to init txn for MSA Mem Ready resp %d\n", ret); goto out; } @@ -191,17 +198,17 @@ int wlfw_msa_ready_send_sync_msg(struct icnss_priv *priv) wlfw_msa_ready_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send MSA Mem Ready req %d\n", ret); + icnss_qmi_fatal_err("Fail to send MSA Mem Ready req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("MSA Mem Ready resp wait failed with ret %d\n", + icnss_qmi_fatal_err("MSA Mem Ready resp wait failed with ret %d\n", ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI MSA Mem Ready request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI MSA Mem Ready request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -217,7 +224,6 @@ int wlfw_msa_ready_send_sync_msg(struct icnss_priv *priv) kfree(resp); kfree(req); priv->stats.msa_ready_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -262,7 +268,7 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_ind_register_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for Ind Register resp %d\n", + icnss_qmi_fatal_err("Fail to init txn for Ind Register resp %d\n", ret); goto out; } @@ -273,17 +279,17 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) wlfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send Ind Register req %d\n", ret); + icnss_qmi_fatal_err("Fail to send Ind Register req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("Ind Register resp wait failed with ret %d\n", + icnss_qmi_fatal_err("Ind Register resp wait failed with ret %d\n", ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI Ind Register request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI Ind Register request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -299,7 +305,6 @@ int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv) kfree(resp); kfree(req); priv->stats.ind_register_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -329,7 +334,8 @@ int wlfw_cap_send_sync_msg(struct icnss_priv *priv) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_cap_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for Capability resp %d\n", ret); + icnss_qmi_fatal_err("Fail to init txn for Capability resp %d\n", + ret); goto out; } @@ -339,20 +345,21 @@ int wlfw_cap_send_sync_msg(struct icnss_priv *priv) wlfw_cap_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send Capability req %d\n", ret); + icnss_qmi_fatal_err("Fail to send Capability req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("Capability resp wait failed with ret %d\n", ret); + icnss_qmi_fatal_err("Capability resp wait failed with ret %d\n", + ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI Capability request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI Capability request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; if (resp->resp.error == QMI_ERR_PLAT_CCPM_CLK_INIT_FAILED) - icnss_pr_err("RF card not present\n"); + icnss_qmi_fatal_err("RF card not present\n"); goto out; } @@ -394,7 +401,6 @@ int wlfw_cap_send_sync_msg(struct icnss_priv *priv) kfree(resp); kfree(req); priv->stats.cap_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -438,7 +444,7 @@ int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, ret = qmi_txn_init(&priv->qmi, &txn, wlfw_wlan_mode_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for Mode resp %d\n", ret); + icnss_qmi_fatal_err("Fail to init txn for Mode resp %d\n", ret); goto out; } @@ -448,16 +454,16 @@ int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, wlfw_wlan_mode_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send Mode req %d\n", ret); + icnss_qmi_fatal_err("Fail to send Mode req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("Mode resp wait failed with ret %d\n", ret); + icnss_qmi_fatal_err("Mode resp wait failed with ret %d\n", ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI Mode request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI Mode request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -473,7 +479,6 @@ int wlfw_wlan_mode_send_sync_msg(struct icnss_priv *priv, kfree(resp); kfree(req); priv->stats.mode_req_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -507,7 +512,8 @@ int wlfw_wlan_cfg_send_sync_msg(struct icnss_priv *priv, ret = qmi_txn_init(&priv->qmi, &txn, wlfw_wlan_cfg_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for Config resp %d\n", ret); + icnss_qmi_fatal_err("Fail to init txn for Config resp %d\n", + ret); goto out; } @@ -517,16 +523,17 @@ int wlfw_wlan_cfg_send_sync_msg(struct icnss_priv *priv, wlfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send Config req %d\n", ret); + icnss_qmi_fatal_err("Fail to send Config req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("Config resp wait failed with ret %d\n", ret); + icnss_qmi_fatal_err("Config resp wait failed with ret %d\n", + ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI Config request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI Config request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -542,7 +549,6 @@ int wlfw_wlan_cfg_send_sync_msg(struct icnss_priv *priv, kfree(resp); kfree(req); priv->stats.cfg_req_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -576,7 +582,7 @@ int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_ini_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for INI resp %d\n", ret); + icnss_qmi_fatal_err("Fail to init txn for INI resp %d\n", ret); goto out; } @@ -586,16 +592,16 @@ int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode) wlfw_ini_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send INI req %d\n", ret); + icnss_qmi_fatal_err("Fail to send INI req %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("INI resp wait failed with ret %d\n", ret); + icnss_qmi_fatal_err("INI resp wait failed with ret %d\n", ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI INI request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI INI request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -611,7 +617,6 @@ int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode) kfree(resp); kfree(req); priv->stats.ini_req_err++; - ICNSS_QMI_ASSERT(); return ret; } @@ -783,7 +788,7 @@ int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv) ret = qmi_txn_init(&priv->qmi, &txn, wlfw_rejuvenate_ack_resp_msg_v01_ei, resp); if (ret < 0) { - icnss_pr_err("Fail to init txn for Rejuvenate Ack resp %d\n", + icnss_qmi_fatal_err("Fail to init txn for Rejuvenate Ack resp %d\n", ret); goto out; } @@ -794,17 +799,18 @@ int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv) wlfw_rejuvenate_ack_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); - icnss_pr_err("Fail to send Rejuvenate Ack req %d\n", ret); + icnss_qmi_fatal_err("Fail to send Rejuvenate Ack req %d\n", + ret); goto out; } ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); if (ret < 0) { - icnss_pr_err("Rejuvenate Ack resp wait failed with ret %d\n", + icnss_qmi_fatal_err("Rejuvenate Ack resp wait failed with ret %d\n", ret); goto out; } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - icnss_pr_err("QMI Rejuvenate Ack request rejected, result:%d error:%d\n", + icnss_qmi_fatal_err("QMI Rejuvenate Ack request rejected, result:%d error:%d\n", resp->resp.result, resp->resp.error); ret = -resp->resp.result; goto out; @@ -820,7 +826,6 @@ int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv) kfree(resp); kfree(req); priv->stats.rejuvenate_ack_err++; - ICNSS_QMI_ASSERT(); return ret; } diff --git a/drivers/soc/qcom/llcc-sdmmagpie.c b/drivers/soc/qcom/llcc-sdmmagpie.c new file mode 100644 index 0000000000000000000000000000000000000000..7e772cf7151cab4453632cf1ccb9b5d5b017b2ea --- /dev/null +++ b/drivers/soc/qcom/llcc-sdmmagpie.c @@ -0,0 +1,100 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +/* + * SCT entry contains of the following parameters + * name: Name of the client's use case for which the llcc slice is used + * uid: Unique id for the client's use case + * slice_id: llcc slice id for each client + * max_cap: The maximum capacity of the cache slice provided in KB + * priority: Priority of the client used to select victim line for replacement + * fixed_size: Determine of the slice has a fixed capacity + * bonus_ways: Bonus ways to be used by any slice, bonus way is used only if + * it't not a reserved way. + * res_ways: Reserved ways for the cache slice, the reserved ways cannot be used + * by any other client than the one its assigned to. + * cache_mode: Each slice operates as a cache, this controls the mode of the + * slice normal or TCM + * probe_target_ways: Determines what ways to probe for access hit. When + * configured to 1 only bonus and reseved ways are probed. + * when configured to 0 all ways in llcc are probed. + * dis_cap_alloc: Disable capacity based allocation for a client + * retain_on_pc: If this bit is set and client has maitained active vote + * then the ways assigned to this client are not flushed on power + * collapse. + * activate_on_init: Activate the slice immidiately after the SCT is programmed + */ +#define SCT_ENTRY(n, uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \ + { \ + .name = n, \ + .usecase_id = uid, \ + .slice_id = sid, \ + .max_cap = mc, \ + .priority = p, \ + .fixed_size = fs, \ + .bonus_ways = bway, \ + .res_ways = rway, \ + .cache_mode = cmod, \ + .probe_target_ways = ptw, \ + .dis_cap_alloc = dca, \ + .retain_on_pc = rp, \ + .activate_on_init = a, \ + } + +static struct llcc_slice_config sdmmagpie_data[] = { + SCT_ENTRY("cpuss", 1, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1), + SCT_ENTRY("modem", 8, 8, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhw", 9, 9, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 0, 1), +}; + +static int sdmmagpie_qcom_llcc_probe(struct platform_device *pdev) +{ + return qcom_llcc_probe(pdev, sdmmagpie_data, + ARRAY_SIZE(sdmmagpie_data)); +} + +static const struct of_device_id sdmmagpie_qcom_llcc_of_match[] = { + { .compatible = "qcom,sdmmagpie-llcc", }, + { }, +}; + +static struct platform_driver sdmmagpie_qcom_llcc_driver = { + .driver = { + .name = "sdmmagpie-llcc", + .owner = THIS_MODULE, + .of_match_table = sdmmagpie_qcom_llcc_of_match, + }, + .probe = sdmmagpie_qcom_llcc_probe, + .remove = qcom_llcc_remove, +}; + +static int __init sdmmagpie_init_qcom_llcc_init(void) +{ + return platform_driver_register(&sdmmagpie_qcom_llcc_driver); +} +module_init(sdmmagpie_init_qcom_llcc_init); + +static void __exit sdmmagpie_exit_qcom_llcc_exit(void) +{ + platform_driver_unregister(&sdmmagpie_qcom_llcc_driver); +} +module_exit(sdmmagpie_exit_qcom_llcc_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies Inc sdmmagpie LLCC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/llcc-sm8150.c b/drivers/soc/qcom/llcc-sm8150.c index 5a397a2f310915fc487166cca5acbeebd26798ab..5ad72b7fc92d1ae7ded1ed7264b6bab461ff35fc 100644 --- a/drivers/soc/qcom/llcc-sm8150.c +++ b/drivers/soc/qcom/llcc-sm8150.c @@ -60,11 +60,11 @@ static struct llcc_slice_config sm8150_data[] = { SCT_ENTRY("cpuss", 1, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1), SCT_ENTRY("vidsc0", 2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("vidsc1", 3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("rotator", 4, 4, 1024, 2, 1, 0xFFF, 0x0, 2, 0, 0, 1, 0), SCT_ENTRY("voice", 5, 5, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("audio", 6, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modemhp_grow", 7, 7, 1024, 2, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modem", 8, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("modemhw", 9, 9, 1024, 1, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("compute", 10, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpuhtw", 11, 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("gpu", 12, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0), diff --git a/drivers/soc/qcom/llcc_events.h b/drivers/soc/qcom/llcc_events.h new file mode 100644 index 0000000000000000000000000000000000000000..89769b038e48aee8245b75b440ad221a73030c69 --- /dev/null +++ b/drivers/soc/qcom/llcc_events.h @@ -0,0 +1,301 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SOC_QCOM_LLCC_EVENTS_H_ +#define _SOC_QCOM_LLCC_EVENTS_H_ + +enum event_port_select { + EVENT_PORT_FEAC, + EVENT_PORT_FERC, + EVENT_PORT_FEWC, + EVENT_PORT_BEAC, + EVENT_PORT_BERC, + EVENT_PORT_TRP, + EVENT_PORT_DRP, + EVENT_PORT_PMGR, + EVENT_PORT_TENURE, + EVENT_PORT_TLAT, +}; + +enum feac_events { + FEAC_ANY_ACCESS, + FEAC_READ_INCR, + FEAC_WRITE_INCR, + FEAC_WRITE_ORDERED, + FEAC_READE_EXCL, + FEAC_WRITE_EXCL, + FEAC_CMO, + FEAC_CMO_CLEAN, + FEAC_CMO_INVAL, + FEAC_CMO_CLEANINVAL, + FEAC_CMO_DCPLD, + FEAC_READ_NOALLOC, + FEAC_WRITE_NOALLOC, + FEAC_PREFETCH, + FEAC_RD_BYTES, + FEAC_RD_BEATS, + FEAC_WR_BYTES, + FEAC_WR_BEATS, + FEAC_FC_READ, + FEAC_EWD_ACCESS, + FEAC_TCM_ACCESS, + FEAC_GM_HIT, + FEAC_GM_MISS, + FEAC_GM_UNAVAILABLE, + FEAC_XPU_ERROR, + FEAC_READ_HAZARD, + FEAC_WRITE_HAZARD, + FEAC_GRANULE_READ, + FEAC_GRANULE_WRITE, + FEAC_RIFB_ALLOC, + FEAC_WIFB_ALLOC, + FEAC_RIFB_DEALLOC, + FEAC_WIFB_DEALLOC, + FEAC_RESERVED, + FEAC_RESERVED1, + FEAC_FEAC2TRP_LP_TX, + FEAC_TRP_LP_BUSY, + FEAC_FEAC2TRP_HP_TX, + FEAC_TRP_HP_BUSY, + FEAC_FEAC2FEWC_TX, + FEAC_BEAC_LP_BUSY, + FEAC_BEAC_HP_BUSY, + FEAC_RIFB_FULL, + FEAC_WIFB_FULL, + FEAC_RD_CRDT_TX, + FEAC_WR_CRDT_TX, + FEAC_PROMOTION, + FEAC_FEAC2TRP_LP_PRESSURE, + FEAC_FEAC2TRP_HP_PRESSURE, + FEAC_FEAC2FEWC_PRESSURE, + FEAC_FEAC2BEAC_LP_PRESSURE, + FEAC_FEAC2BEAC_HP_PRESSURE, + FEAC_WR_THROUGH, +}; + +enum ferc_events { + FERC_BERC_CMD, + FERC_BERC_BEAT, + FERC_DRP_CMD, + FERC_DRP_BEAT, + FERC_RD_CTRL_RSP_TX, + FERC_WR_CTRL_RSP_TX, + FERC_RD_DATA_TX, + FERC_MISS_TRUMPS_HIT, + FERC_HIT_TRUMPS_WRSP, + FERC_RD_INTRA_RSP_IDLE, +}; + +enum fewc_events { + FEWC_WR_CMD, + FEWC_WR_DATA_BEAT, + FEWC_WR_LAST, + FEWC_WBUF_DEALLOC, + FEWC_WR_HIT, + FEWC_WR_MISS, + FEWC_NC_RMW, + FEWC_WR_DOWNGRADE, + FEWC_BEAC_WR_CMD, + FEWC_BEAC_WR_BEAT, + FEWC_BEAC_RD_CMD, + FEWC_BERC_FILL_BEAT, + FEWC_DRP_WR_CMD, + FEWC_DRP_WR_BEAT, + FEWC_DRP_RD_BEAT, + FEWC_TRP_TAG_LOOKUP, + FEWC_TRP_TAG_UPDATE, + FEWC_TRP_UNSTALL, + FEWC_WBUFFS_FULL, + FEWC_DRP_BUSY, + FEWC_BEAC_WR_BUSY, + FEWC_BEAC_RD_BUSY, + FEWC_TRP_TAG_LOOKUP_BUSY, + FEWC_TRP_TAG_UPDATE_BUSY, + FEWC_C_RMW, + FEWC_NC_ALLOC_RMW, + FEWC_NC_NO_ALLOC_RMW, + FEWC_NC_RMW_DEALLOC, + FEWC_C_RMW_DEALLOC, + FEWC_STALLED_BY_EVICT, +}; + +enum beac_events { + BEAC_RD_TX, + BEAC_WR_TX, + BEAC_RD_GRANULE, + BEAC_WR_GRANULE, + BEAC_WR_BEAT_TX, + BEAC_RD_CRDT_ZERO, + BEAC_WR_CRDT_ZERO, + BEAC_WDATA_CRDT_ZERO, + BEAC_IFCMD_CRDT_ZERO, + BEAC_IFWDATA_CRDT_ZERO, + BEAC_PCT_ENTRY_ALLOC, + BEAC_PCT_ENTRY_FREE, + BEAC_PCT_FULL, + BEAC_RD_PROMOTION_TX, + BEAC_WR_PROMOTION_TX, + BEAC_RD_PRESSURE_TX, + BEAC_WR_PRESSURE_TX, +}; + +enum berc_events { + BERC_RD_CMD, + BERC_ERROR_CMD, + BERC_PCT_ENTRY_DEALLOC, + BERC_RD_RSP_RX, + BERC_RD_RSP_BEAT_RX, + BERC_RD_LA_RX, + BERC_UNSTALL_RX, + BERC_TX_RD_CMD, + BERC_TX_ERR_CMD, + BERC_TX_RD_BEAT, + BERC_TX_ERR_BEAT, + BERC_RESERVED, + BERC_RESERVED1, + BERC_CMO_RX, + BERC_CMO_TX, + BERC_DRP_WR_TX, + BERC_DRP_WR_BEAT_TX, + BERC_FEWC_WR_TX, + BERC_FEWC_WR_BEAT_TX, + BERC_LBUFFS_FULL, + BERC_DRP_BUSY, + BERC_FEWC_BUSY, + BERC_LBUFF_STALLED, +}; + +enum trp_events { + TRP_ANY_ACCESS, + TRP_INCR_RD, + TRP_INCR_WR, + TRP_ANY_HIT, + TRP_RD_HIT, + TRP_WR_HIT, + TRP_RD_MISS, + TRP_WR_MISS, + TRP_RD_HIT_MISS, + TRP_WR_HIT_MISS, + TRP_EVICT, + TRP_GRANULE_EVICT, + TRP_RD_EVICT, + TRP_WR_EVICT, + TRP_LINE_FILL, + TRP_GRANULE_FILL, + TRP_WSC_WRITE, + TRP_WSC_EVICT, + TRP_SUBCACHE_ACT, + TRP_SUBCACHE_DEACT, + TRP_RD_DEACTIVE_SUBCACHE, + TRP_WR_DEACTIVE_SUBCACHE, + TRP_INVALID_LINE_ALLOC, + TRP_DEACTIVE_LINE_ALLOC, + TRP_SELF_EVICTION_ALLOC, + TRP_UC_SUBCACHE_ALLOC, + TRP_FC_SELF_EVICTION_ALLOC, + TRP_LP_SUBCACHE_VICTIM, + TRP_OC_SUBCACHE_VICTIM, + TRP_MRU_ROLLOVER, + TRP_NC_DOWNGRADE, + TRP_TAGRAM_CORR_ERR, + TRP_TAGRAM_UNCORR_ERR, + TRP_RD_MISS_FC, + TRP_CPU_WRITE_EWD_LINE, + TRP_CLIENT_WRITE_EWD_LINE, + TRP_CLIENT_READ_EWD_LINE, + TRP_CMO_I_EWD_LINE, + TRP_CMO_I_DIRTY_LINE, + TRP_DRP_RD_NOTIFICATION, + TRP_DRP_WR_NOTIFICATION, + TRP_LINEFILL_TAG_UPDATE, + TRP_FEWC_TAG_UPDATE, + TRP_ET_FULL, + TRP_NAWT_FULL, + TRP_HITQ_FULL, + TRP_ET_ALLOC, + TRP_ET_DEALLOC, + TRP_NAWT_ALLOC, + TRP_NAWT_DEALLOC, + TRP_RD_REPLAY, + TRP_WR_ECC_RD, + TRP_ET_LP_FULL, + TRP_ET_HP_FULL, + TRP_SOEH, +}; + +enum drp_events { + DRP_TRP_RD_NOTIFICATION, + DRP_TRP_WR_NOTIFICATION, + DRP_BIST_WR_NOTIFICATION, + DRP_DRIE_WR_NOTIFICATION, + DRP_ECC_CORR_ERR, + DRP_ECC_UNCORR_ERR, + DRP_FERC_RD_TX, + DRP_FEWC_RD_TX, + DRP_EVICT_LINE_TX, + DRP_EVICT_GRANULE_TX, + DRP_BIST_TX, + DRP_FERC_RD_BEAT, + DRP_FEWC_RD_BEAT, + DRP_BIST_RD_BEAT, + DRP_EVICT_RD_BEAT, + DRP_BERC_WR_BEAT, + DRP_FEWC_WR_BEAT, + DRP_BIST_WR_BEAT, + DRP_DRIE_WR_BEAT, + DRP_BERC_UNSTALL, + DRP_FEWC_UNSTALL, + DRP_LB_RD, + DRP_LB_WR, + DRP_BANK_CONFLICT, + DRP_FILL_TRUMPS_RD, + DRP_RD_TRUMPS_WR, + DRP_LB_SLP_RET, + DRP_LB_SLP_NRET, + DRP_LB_WAKEUP, + DRP_TRP_EARLY_WAKEUP, + DRP_PCB_IDLE, + DRP_EVICT_RDFIFO_FULL, + DRP_FEWC_RDFIFO_FULL, + DRP_FERC_RDFIFO_FULL, + DRP_FERC_RD, + DRP_FEWC_RD, + DRP_LINE_EVICT, + DRP_GRANULE_EVICT, + DRP_BIST_RD, + DRP_FEWC_WR, + DRP_LINE_FILL, + DRP_GRANULE_FILL, + DRP_BIST_WR, + DRP_DRIE_WR, +}; + +enum pmgr_events { + PMGR_Q_RUN_STATE, + PMGR_Q_DENIED_STATE, + PMGR_Q_STOPEED_TO_Q_RUN, + PMGR_Q_RUN_TO_Q_FENCED, + PMGR_Q_RUN_TO_Q_DENIED, + PMGR_Q_DENIED_TO_Q_RUN, + PMGR_Q_FENCED_TO_Q_STOPPED, + PMGR_Q_FENCED_TO_Q_DENIED, +}; + +enum filter_type { + SCID, + MID, + PROFILING_TAG, + WAY_ID, + UNKNOWN, +}; + +#endif /* _SOC_QCOM_LLCC_EVENTS_H_ */ diff --git a/drivers/soc/qcom/llcc_perfmon.c b/drivers/soc/qcom/llcc_perfmon.c new file mode 100644 index 0000000000000000000000000000000000000000..3a1cef853d3b8c9ea4cc57147218992c41bae007 --- /dev/null +++ b/drivers/soc/qcom/llcc_perfmon.c @@ -0,0 +1,1205 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "llcc_events.h" +#include "llcc_perfmon.h" + +#define LLCC_PERFMON_NAME "llcc_perfmon" +#define LLCC_PERFMON_COUNTER_MAX 16 +#define MAX_NUMBER_OF_PORTS 8 +#define NUM_CHANNELS 16 +#define MAX_STRING_SIZE 20 +#define DELIM_CHAR " " + +/** + * struct llcc_perfmon_counter_map - llcc perfmon counter map info + * @port_sel: Port selected for configured counter + * @event_sel: Event selected for configured counter + * @counter_dump: Cumulative counter dump + */ +struct llcc_perfmon_counter_map { + unsigned int port_sel; + unsigned int event_sel; + unsigned long long counter_dump; +}; + +struct llcc_perfmon_private; +/** + * struct event_port_ops - event port operation + * @event_config: Counter config support for port & event + * @event_enable: Counter enable support for port + * @event_filter_config: Port filter config support + */ +struct event_port_ops { + void (*event_config)(struct llcc_perfmon_private *, + unsigned int, unsigned int, bool); + void (*event_enable)(struct llcc_perfmon_private *, bool); + void (*event_filter_config)(struct llcc_perfmon_private *, + enum filter_type, unsigned long, bool); +}; + +/** + * struct llcc_perfmon_private - llcc perfmon private + * @llcc_map: llcc register address space map + * @broadcast_off: Offset of llcc broadcast address space + * @bank_off: Offset of llcc banks + * @num_banks: Number of banks supported + * @port_ops: struct event_port_ops + * @configured: Mapping of configured event counters + * @configured_counters: + * Count of configured counters. + * @enables_port: Port enabled for perfmon configuration + * @filtered_ports: Port filter enabled + * @port_configd: Number of perfmon port configuration supported + * @mutex: mutex to protect this structure + * @hrtimer: hrtimer instance for timer functionality + * @expires: timer expire time in nano seconds + */ +struct llcc_perfmon_private { + struct regmap *llcc_map; + unsigned int broadcast_off; + unsigned int bank_off[NUM_CHANNELS]; + unsigned int num_banks; + struct event_port_ops *port_ops[MAX_NUMBER_OF_PORTS]; + struct llcc_perfmon_counter_map configured[LLCC_PERFMON_COUNTER_MAX]; + unsigned int configured_counters; + unsigned int enables_port; + unsigned int filtered_ports; + unsigned int port_configd; + struct mutex mutex; + struct hrtimer hrtimer; + ktime_t expires; +}; + +static inline void llcc_bcast_write(struct llcc_perfmon_private *llcc_priv, + unsigned int offset, uint32_t val) +{ + regmap_write(llcc_priv->llcc_map, llcc_priv->broadcast_off + offset, + val); +} + +static inline void llcc_bcast_read(struct llcc_perfmon_private *llcc_priv, + unsigned int offset, uint32_t *val) +{ + regmap_read(llcc_priv->llcc_map, llcc_priv->broadcast_off + offset, + val); +} + +static void llcc_bcast_modify(struct llcc_perfmon_private *llcc_priv, + unsigned int offset, uint32_t val, uint32_t mask) +{ + uint32_t readval; + + llcc_bcast_read(llcc_priv, offset, &readval); + readval &= ~mask; + readval |= val & mask; + llcc_bcast_write(llcc_priv, offset, readval); +} + +static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv) +{ + uint32_t val; + unsigned int i, j; + unsigned long long total; + + if (!llcc_priv->configured_counters) + return; + + llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP); + for (i = 0; i < llcc_priv->configured_counters; i++) { + total = 0; + for (j = 0; j < llcc_priv->num_banks; j++) { + regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] + + LLCC_COUNTER_n_VALUE(i), &val); + total += val; + } + + llcc_priv->configured[i].counter_dump += total; + } +} + +static ssize_t perfmon_counter_dump_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + uint32_t val; + unsigned int i, j; + unsigned long long total; + ssize_t cnt = 0, print; + + if (llcc_priv->configured_counters == 0) { + pr_err("counters not configured\n"); + return cnt; + } + + if (llcc_priv->expires) { + perfmon_counter_dump(llcc_priv); + for (i = 0; i < llcc_priv->configured_counters - 1; i++) { + print = snprintf(buf, MAX_STRING_SIZE, "Port %02d,", + llcc_priv->configured[i].port_sel); + buf += print; + cnt += print; + print = snprintf(buf, MAX_STRING_SIZE, "Event %02d,", + llcc_priv->configured[i].event_sel); + buf += print; + cnt += print; + + print = snprintf(buf, MAX_STRING_SIZE, "0x%016llx\n", + llcc_priv->configured[i].counter_dump); + buf += print; + cnt += print; + llcc_priv->configured[i].counter_dump = 0; + } + + print = snprintf(buf, MAX_STRING_SIZE, "CYCLE COUNT, ,"); + buf += print; + cnt += print; + print = snprintf(buf, MAX_STRING_SIZE, "0x%016llx\n", + llcc_priv->configured[i].counter_dump); + buf += print; + cnt += print; + llcc_priv->configured[i].counter_dump = 0; + hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires); + } else { + llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP); + + for (i = 0; i < llcc_priv->configured_counters - 1; i++) { + print = snprintf(buf, MAX_STRING_SIZE, "Port %02d,", + llcc_priv->configured[i].port_sel); + buf += print; + cnt += print; + print = snprintf(buf, MAX_STRING_SIZE, "Event %02d,", + llcc_priv->configured[i].event_sel); + buf += print; + cnt += print; + total = 0; + for (j = 0; j < llcc_priv->num_banks; j++) { + regmap_read(llcc_priv->llcc_map, + llcc_priv->bank_off[j] + + LLCC_COUNTER_n_VALUE(i), + &val); + print = snprintf(buf, MAX_STRING_SIZE, + "0x%08x,", val); + buf += print; + cnt += print; + total += val; + } + + print = snprintf(buf, MAX_STRING_SIZE, "0x%09llx\n", + total); + buf += print; + cnt += print; + } + + print = snprintf(buf, MAX_STRING_SIZE, "CYCLE COUNT, ,"); + buf += print; + cnt += print; + total = 0; + for (j = 0; j < llcc_priv->num_banks; j++) { + regmap_read(llcc_priv->llcc_map, + llcc_priv->bank_off[j] + + LLCC_COUNTER_n_VALUE(i), &val); + print = snprintf(buf, MAX_STRING_SIZE, "0x%08x,", val); + buf += print; + cnt += print; + total += val; + } + + print = snprintf(buf, MAX_STRING_SIZE, "0x%09llx\n", total); + buf += print; + cnt += print; + } + + return cnt; +} + +static ssize_t perfmon_configure_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + struct event_port_ops *port_ops; + unsigned int j; + unsigned long port_sel, event_sel; + uint32_t val; + char *token, *delim = DELIM_CHAR; + + mutex_lock(&llcc_priv->mutex); + if (llcc_priv->configured_counters) { + pr_err("Counters configured already, remove & try again\n"); + mutex_unlock(&llcc_priv->mutex); + return -EINVAL; + } + + llcc_priv->configured_counters = 0; + j = 0; + token = strsep((char **)&buf, delim); + + while (token != NULL) { + if (kstrtoul(token, 10, &port_sel)) + break; + + if (port_sel >= llcc_priv->port_configd) + break; + + token = strsep((char **)&buf, delim); + if (token == NULL) + break; + + if (kstrtoul(token, 10, &event_sel)) + break; + + token = strsep((char **)&buf, delim); + if (event_sel >= EVENT_NUM_MAX) { + pr_err("unsupported event num %ld\n", event_sel); + continue; + } + + llcc_priv->configured[j].port_sel = port_sel; + llcc_priv->configured[j].event_sel = event_sel; + port_ops = llcc_priv->port_ops[port_sel]; + pr_info("counter %d configured for event %ld from port %ld\n", + j, event_sel, port_sel); + port_ops->event_config(llcc_priv, event_sel, j++, true); + if (!(llcc_priv->enables_port & (1 << port_sel))) + if (port_ops->event_enable) + port_ops->event_enable(llcc_priv, true); + + llcc_priv->enables_port |= (1 << port_sel); + + /* Last perfmon counter for cycle counter */ + if (llcc_priv->configured_counters++ == + (LLCC_PERFMON_COUNTER_MAX - 2)) + break; + } + + /* configure clock event */ + val = COUNT_CLOCK_EVENT | CLEAR_ON_ENABLE | CLEAR_ON_DUMP; + llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), val); + + llcc_priv->configured_counters++; + mutex_unlock(&llcc_priv->mutex); + return count; +} + +static ssize_t perfmon_remove_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + struct event_port_ops *port_ops; + unsigned int j, counter_remove = 0; + unsigned long port_sel, event_sel; + char *token, *delim = DELIM_CHAR; + + mutex_lock(&llcc_priv->mutex); + if (!llcc_priv->configured_counters) { + pr_err("Counters not configured\n"); + mutex_unlock(&llcc_priv->mutex); + return -EINVAL; + } + + j = 0; + token = strsep((char **)&buf, delim); + + while (token != NULL) { + if (kstrtoul(token, 10, &port_sel)) + break; + + if (port_sel >= llcc_priv->port_configd) + break; + + token = strsep((char **)&buf, delim); + if (token == NULL) + break; + + if (kstrtoul(token, 10, &event_sel)) + break; + + token = strsep((char **)&buf, delim); + if (event_sel >= EVENT_NUM_MAX) { + pr_err("unsupported event num %ld\n", event_sel); + continue; + } + + /* put dummy values */ + llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS; + llcc_priv->configured[j].event_sel = 100; + port_ops = llcc_priv->port_ops[port_sel]; + pr_info("removed counter %d for event %ld from port %ld\n", + j, event_sel, port_sel); + + port_ops->event_config(llcc_priv, event_sel, j++, false); + if (llcc_priv->enables_port & (1 << port_sel)) + if (port_ops->event_enable) + port_ops->event_enable(llcc_priv, false); + + llcc_priv->enables_port &= ~(1 << port_sel); + + /* Last perfmon counter for cycle counter */ + if (counter_remove++ == (LLCC_PERFMON_COUNTER_MAX - 2)) + break; + } + + /* remove clock event */ + llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), 0); + + llcc_priv->configured_counters = 0; + mutex_unlock(&llcc_priv->mutex); + return count; +} + +static enum filter_type find_filter_type(char *filter) +{ + enum filter_type ret = UNKNOWN; + + if (!strcmp(filter, "SCID")) + ret = SCID; + else if (!strcmp(filter, "MID")) + ret = MID; + else if (!strcmp(filter, "PROFILING_TAG")) + ret = PROFILING_TAG; + else if (!strcmp(filter, "WAY_ID")) + ret = WAY_ID; + + return ret; +} + +static ssize_t perfmon_filter_config_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + unsigned long port, val; + struct event_port_ops *port_ops; + char *token, *delim = DELIM_CHAR; + enum filter_type filter = UNKNOWN; + + mutex_lock(&llcc_priv->mutex); + + token = strsep((char **)&buf, delim); + if (token != NULL) + filter = find_filter_type(token); + + if (filter == UNKNOWN) { + pr_err("filter configuration failed, Unsupported filter\n"); + goto filter_config_free; + } + + token = strsep((char **)&buf, delim); + if (token == NULL) { + pr_err("filter configuration failed, Wrong input\n"); + goto filter_config_free; + } + + if (kstrtoul(token, 10, &val)) { + pr_err("filter configuration failed, Wrong input\n"); + goto filter_config_free; + } + + if ((filter == SCID) && (val >= SCID_MAX)) { + pr_err("filter configuration failed, SCID above MAX value\n"); + goto filter_config_free; + } + + + while (token != NULL) { + token = strsep((char **)&buf, delim); + if (token == NULL) + break; + + if (kstrtoul(token, 10, &port)) + break; + + llcc_priv->filtered_ports |= 1 << port; + port_ops = llcc_priv->port_ops[port]; + if (port_ops->event_filter_config) + port_ops->event_filter_config(llcc_priv, filter, val, + true); + } + + mutex_unlock(&llcc_priv->mutex); + return count; + +filter_config_free: + mutex_unlock(&llcc_priv->mutex); + return -EINVAL; +} + +static ssize_t perfmon_filter_remove_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + struct event_port_ops *port_ops; + unsigned long port, val; + char *token, *delim = DELIM_CHAR; + enum filter_type filter = UNKNOWN; + + mutex_lock(&llcc_priv->mutex); + token = strsep((char **)&buf, delim); + if (token != NULL) + filter = find_filter_type(token); + + if (filter == UNKNOWN) { + pr_err("filter configuration failed, Unsupported filter\n"); + goto filter_remove_free; + } + + token = strsep((char **)&buf, delim); + if (token == NULL) { + pr_err("filter configuration failed, Wrong input\n"); + goto filter_remove_free; + } + + if (kstrtoul(token, 10, &val)) { + pr_err("filter configuration failed, Wrong input\n"); + goto filter_remove_free; + } + + if ((filter == SCID) && (val >= SCID_MAX)) { + pr_err("filter configuration failed, SCID above MAX value\n"); + goto filter_remove_free; + } + + while (token != NULL) { + token = strsep((char **)&buf, delim); + if (token == NULL) + break; + + if (kstrtoul(token, 10, &port)) + break; + + llcc_priv->filtered_ports &= ~(1 << port); + port_ops = llcc_priv->port_ops[port]; + if (port_ops->event_filter_config) + port_ops->event_filter_config(llcc_priv, filter, val, + false); + } + + mutex_unlock(&llcc_priv->mutex); + return count; + +filter_remove_free: + mutex_unlock(&llcc_priv->mutex); + return count; +} + +static ssize_t perfmon_start_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + uint32_t val = 0, mask; + unsigned long start; + + if (kstrtoul(buf, 10, &start)) + return -EINVAL; + + mutex_lock(&llcc_priv->mutex); + if (start) { + if (!llcc_priv->configured_counters) + pr_err("start failed. perfmon not configured\n"); + + val = MANUAL_MODE | MONITOR_EN; + if (llcc_priv->expires) { + if (hrtimer_is_queued(&llcc_priv->hrtimer)) + hrtimer_forward_now(&llcc_priv->hrtimer, + llcc_priv->expires); + else + hrtimer_start(&llcc_priv->hrtimer, + llcc_priv->expires, + HRTIMER_MODE_REL_PINNED); + } + + } else { + if (llcc_priv->expires) + hrtimer_cancel(&llcc_priv->hrtimer); + + if (!llcc_priv->configured_counters) + pr_err("stop failed. perfmon not configured\n"); + } + + mask = PERFMON_MODE_MONITOR_MODE_MASK | PERFMON_MODE_MONITOR_EN_MASK; + llcc_bcast_modify(llcc_priv, PERFMON_MODE, val, mask); + + + mutex_unlock(&llcc_priv->mutex); + return count; +} + +static ssize_t perfmon_ns_periodic_dump_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + + if (kstrtos64(buf, 10, &llcc_priv->expires)) + return -EINVAL; + + mutex_lock(&llcc_priv->mutex); + if (!llcc_priv->expires) { + hrtimer_cancel(&llcc_priv->hrtimer); + mutex_unlock(&llcc_priv->mutex); + return count; + } + + if (hrtimer_is_queued(&llcc_priv->hrtimer)) + hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires); + else + hrtimer_start(&llcc_priv->hrtimer, llcc_priv->expires, + HRTIMER_MODE_REL_PINNED); + + mutex_unlock(&llcc_priv->mutex); + return count; +} + +static ssize_t perfmon_scid_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev); + uint32_t val; + unsigned int i, j, offset; + ssize_t cnt = 0, print; + unsigned long total; + + for (i = 0; i < SCID_MAX; i++) { + total = 0; + offset = TRP_SCID_n_STATUS(i); + + for (j = 0; j < llcc_priv->num_banks; j++) { + regmap_read(llcc_priv->llcc_map, + llcc_priv->bank_off[j] + offset, &val); + val = (val & TRP_SCID_STATUS_CURRENT_CAP_MASK) + >> TRP_SCID_STATUS_CURRENT_CAP_SHIFT; + total += val; + } + + llcc_bcast_read(llcc_priv, offset, &val); + if (val & TRP_SCID_STATUS_ACTIVE_MASK) + print = snprintf(buf, MAX_STRING_SIZE, "SCID %02d %10s", + i, "ACTIVE"); + else + print = snprintf(buf, MAX_STRING_SIZE, "SCID %02d %10s", + i, "DEACTIVE"); + + buf += print; + cnt += print; + print = snprintf(buf, MAX_STRING_SIZE, ",0x%08lx\n", total); + buf += print; + cnt += print; + } + + return cnt; +} + +static DEVICE_ATTR_RO(perfmon_counter_dump); +static DEVICE_ATTR_WO(perfmon_configure); +static DEVICE_ATTR_WO(perfmon_remove); +static DEVICE_ATTR_WO(perfmon_filter_config); +static DEVICE_ATTR_WO(perfmon_filter_remove); +static DEVICE_ATTR_WO(perfmon_start); +static DEVICE_ATTR_RO(perfmon_scid_status); +static DEVICE_ATTR_WO(perfmon_ns_periodic_dump); + +static struct attribute *llcc_perfmon_attrs[] = { + &dev_attr_perfmon_counter_dump.attr, + &dev_attr_perfmon_configure.attr, + &dev_attr_perfmon_remove.attr, + &dev_attr_perfmon_filter_config.attr, + &dev_attr_perfmon_filter_remove.attr, + &dev_attr_perfmon_start.attr, + &dev_attr_perfmon_scid_status.attr, + &dev_attr_perfmon_ns_periodic_dump.attr, + NULL, +}; + +static struct attribute_group llcc_perfmon_group = { + .attrs = llcc_perfmon_attrs, +}; + +static void perfmon_counter_config(struct llcc_perfmon_private *llcc_priv, + unsigned int port, unsigned int event_counter_num) +{ + uint32_t val; + + val = (port & PERFMON_PORT_SELECT_MASK) | + ((event_counter_num << EVENT_SELECT_SHIFT) & + PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE | CLEAR_ON_DUMP; + llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(event_counter_num), + val); +} + +static void feac_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_FEAC, counter_num); +} + +static void feac_event_enable(struct llcc_perfmon_private *llcc_priv, + bool enable) +{ + uint32_t val = 0, mask; + + if (enable) { + val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | + (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN; + + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC)) + val |= (FILTER_0 << FEAC_SCALING_FILTER_SEL_SHIFT) | + FEAC_SCALING_FILTER_EN; + } + + mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK + | PROF_CFG_EN_MASK; + + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC)) + mask |= FEAC_SCALING_FILTER_SEL_MASK | + FEAC_SCALING_FILTER_EN_MASK; + + llcc_bcast_modify(llcc_priv, FEAC_PROF_CFG, val, mask); +} + +static void feac_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter == SCID) { + if (enable) + val = (match << SCID_MATCH_SHIFT) + | SCID_MASK_MASK; + + mask = SCID_MATCH_MASK | SCID_MASK_MASK; + llcc_bcast_modify(llcc_priv, FEAC_PROF_FILTER_0_CFG6, val, + mask); + } else if (filter == MID) { + if (enable) + val = (match << MID_MATCH_SHIFT) + | MID_MASK_MASK; + + mask = MID_MATCH_MASK | MID_MASK_MASK; + llcc_bcast_modify(llcc_priv, FEAC_PROF_FILTER_0_CFG5, val, + mask); + } else { + pr_err("unknown filter/not supported\n"); + } +} + +static struct event_port_ops feac_port_ops = { + .event_config = feac_event_config, + .event_enable = feac_event_enable, + .event_filter_config = feac_event_filter_config, +}; + +static void ferc_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = event_type << EVENT_SEL_SHIFT; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_FERC, counter_num); +} + +static void ferc_event_enable(struct llcc_perfmon_private *llcc_priv, + bool enable) +{ + uint32_t val = 0, mask; + + if (enable) + val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | + (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN; + + mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK + | PROF_CFG_EN_MASK; + llcc_bcast_modify(llcc_priv, FERC_PROF_CFG, val, mask); +} + +static void ferc_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter != PROFILING_TAG) { + pr_err("unknown filter/not supported\n"); + return; + } + + if (enable) + val = (match << PROFTAG_MATCH_SHIFT) | + FILTER_0_MASK << PROFTAG_MASK_SHIFT; + + mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK; + llcc_bcast_modify(llcc_priv, FERC_PROF_FILTER_0_CFG0, val, mask); +} + +static struct event_port_ops ferc_port_ops = { + .event_config = ferc_event_config, + .event_enable = ferc_event_enable, + .event_filter_config = ferc_event_filter_config, +}; + +static void fewc_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_FEWC, counter_num); +} + +static void fewc_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter != PROFILING_TAG) { + pr_err("unknown filter/not supported\n"); + return; + } + + if (enable) + val = (match << PROFTAG_MATCH_SHIFT) | + FILTER_0_MASK << PROFTAG_MASK_SHIFT; + + mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK; + llcc_bcast_modify(llcc_priv, FEWC_PROF_FILTER_0_CFG0, val, mask); +} + +static struct event_port_ops fewc_port_ops = { + .event_config = fewc_event_config, + .event_filter_config = fewc_event_filter_config, +}; + +static void beac_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, BEAC_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_BEAC, counter_num); +} + +static void beac_event_enable(struct llcc_perfmon_private *llcc_priv, + bool enable) +{ + uint32_t val = 0, mask; + + if (enable) + val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | + (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN; + + mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK + | PROF_CFG_EN_MASK; + llcc_bcast_modify(llcc_priv, BEAC_PROF_CFG, val, mask); +} + +static void beac_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter != PROFILING_TAG) { + pr_err("unknown filter/not supported\n"); + return; + } + + if (enable) + val = (match << BEAC_PROFTAG_MATCH_SHIFT) + | FILTER_0_MASK << BEAC_PROFTAG_MASK_SHIFT; + + mask = BEAC_PROFTAG_MASK_MASK | BEAC_PROFTAG_MATCH_MASK; + llcc_bcast_modify(llcc_priv, BEAC_PROF_FILTER_0_CFG5, val, mask); + + if (enable) + val = match << BEAC_MC_PROFTAG_SHIFT; + + mask = BEAC_MC_PROFTAG_MASK; + llcc_bcast_modify(llcc_priv, BEAC_PROF_CFG, val, mask); +} + +static struct event_port_ops beac_port_ops = { + .event_config = beac_event_config, + .event_enable = beac_event_enable, + .event_filter_config = beac_event_filter_config, +}; + +static void berc_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_BERC, counter_num); +} + +static void berc_event_enable(struct llcc_perfmon_private *llcc_priv, + bool enable) +{ + uint32_t val = 0, mask; + + if (enable) + val = (BYTE_SCALING << BYTE_SCALING_SHIFT) | + (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN; + + mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK + | PROF_CFG_EN_MASK; + llcc_bcast_modify(llcc_priv, BERC_PROF_CFG, val, mask); +} + +static void berc_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter != PROFILING_TAG) { + pr_err("unknown filter/not supported\n"); + return; + } + + if (enable) + val = (match << PROFTAG_MATCH_SHIFT) | + FILTER_0_MASK << PROFTAG_MASK_SHIFT; + + mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK; + llcc_bcast_modify(llcc_priv, BERC_PROF_FILTER_0_CFG0, val, mask); +} + +static struct event_port_ops berc_port_ops = { + .event_config = berc_event_config, + .event_enable = berc_event_enable, + .event_filter_config = berc_event_filter_config, +}; + +static void trp_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_TRP, counter_num); +} + +static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv, + enum filter_type filter, unsigned long match, bool enable) +{ + uint32_t val = 0, mask; + + if (filter == SCID) { + if (enable) + val = (match << TRP_SCID_MATCH_SHIFT) + | TRP_SCID_MASK_MASK; + + mask = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK; + } else if (filter == WAY_ID) { + if (enable) + val = (match << TRP_WAY_ID_MATCH_SHIFT) + | TRP_WAY_ID_MASK_MASK; + + mask = TRP_WAY_ID_MATCH_MASK | + TRP_WAY_ID_MASK_MASK; + } else if (filter == PROFILING_TAG) { + if (enable) + val = (match << TRP_PROFTAG_MATCH_SHIFT) + | FILTER_0_MASK << TRP_PROFTAG_MASK_SHIFT; + + mask = TRP_PROFTAG_MATCH_MASK | TRP_PROFTAG_MASK_MASK; + } else { + pr_err("unknown filter/not supported\n"); + return; + } + + llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val, mask); +} + +static struct event_port_ops trp_port_ops = { + .event_config = trp_event_config, + .event_filter_config = trp_event_filter_config, +}; + +static void drp_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_DRP, counter_num); +} + +static void drp_event_enable(struct llcc_perfmon_private *llcc_priv, + bool enable) +{ + uint32_t val = 0, mask; + + if (enable) + val = (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN; + + mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_EN_MASK; + llcc_bcast_modify(llcc_priv, DRP_PROF_CFG, val, mask); +} + +static struct event_port_ops drp_port_ops = { + .event_config = drp_event_config, + .event_enable = drp_event_enable, +}; + +static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv, + unsigned int event_type, unsigned int event_counter_num, + bool enable) +{ + uint32_t val = 0, mask, counter_num = 0; + + mask = EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR)) + mask |= FILTER_SEL_MASK | FILTER_EN_MASK; + + if (enable) { + val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK; + if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR)) + val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN; + + counter_num = event_counter_num; + } + + llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(event_counter_num), + val, mask); + perfmon_counter_config(llcc_priv, EVENT_PORT_PMGR, counter_num); +} + +static struct event_port_ops pmgr_port_ops = { + .event_config = pmgr_event_config, +}; + +static void llcc_register_event_port(struct llcc_perfmon_private *llcc_priv, + struct event_port_ops *ops, unsigned int event_port_num) +{ + if (llcc_priv->port_configd >= MAX_NUMBER_OF_PORTS) { + pr_err("Register port Failure!"); + return; + } + + llcc_priv->port_configd = llcc_priv->port_configd + 1; + llcc_priv->port_ops[event_port_num] = ops; +} + +static enum hrtimer_restart llcc_perfmon_timer_handler(struct hrtimer *hrtimer) +{ + struct llcc_perfmon_private *llcc_priv = container_of(hrtimer, + struct llcc_perfmon_private, hrtimer); + + perfmon_counter_dump(llcc_priv); + hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires); + return HRTIMER_RESTART; +} + +static int llcc_perfmon_probe(struct platform_device *pdev) +{ + int result = 0; + struct llcc_perfmon_private *llcc_priv; + struct device *dev = &pdev->dev; + uint32_t val; + + llcc_priv = devm_kzalloc(&pdev->dev, sizeof(*llcc_priv), GFP_KERNEL); + if (llcc_priv == NULL) + return -ENOMEM; + + llcc_priv->llcc_map = syscon_node_to_regmap(dev->parent->of_node); + if (IS_ERR(llcc_priv->llcc_map)) + return PTR_ERR(llcc_priv->llcc_map); + + result = of_property_read_u32(pdev->dev.parent->of_node, + "qcom,llcc-broadcast-off", &llcc_priv->broadcast_off); + if (result) { + pr_err("Invalid qcom,broadcast-off entry\n"); + return result; + } + + llcc_bcast_read(llcc_priv, LLCC_COMMON_STATUS0, &val); + + llcc_priv->num_banks = (val & LB_CNT_MASK) >> LB_CNT_SHIFT; + result = of_property_read_variable_u32_array(pdev->dev.parent->of_node, + "qcom,llcc-banks-off", (u32 *)&llcc_priv->bank_off, + 1, llcc_priv->num_banks); + if (result < 0) { + pr_err("Invalid qcom,llcc-banks-off entry\n"); + return result; + } + + result = sysfs_create_group(&pdev->dev.kobj, &llcc_perfmon_group); + if (result) { + pr_err("Unable to create sysfs version group\n"); + return result; + } + + mutex_init(&llcc_priv->mutex); + platform_set_drvdata(pdev, llcc_priv); + llcc_register_event_port(llcc_priv, &feac_port_ops, EVENT_PORT_FEAC); + llcc_register_event_port(llcc_priv, &ferc_port_ops, EVENT_PORT_FERC); + llcc_register_event_port(llcc_priv, &fewc_port_ops, EVENT_PORT_FEWC); + llcc_register_event_port(llcc_priv, &beac_port_ops, EVENT_PORT_BEAC); + llcc_register_event_port(llcc_priv, &berc_port_ops, EVENT_PORT_BERC); + llcc_register_event_port(llcc_priv, &trp_port_ops, EVENT_PORT_TRP); + llcc_register_event_port(llcc_priv, &drp_port_ops, EVENT_PORT_DRP); + llcc_register_event_port(llcc_priv, &pmgr_port_ops, EVENT_PORT_PMGR); + hrtimer_init(&llcc_priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + llcc_priv->hrtimer.function = llcc_perfmon_timer_handler; + llcc_priv->expires = 0; + return 0; +} + +static int llcc_perfmon_remove(struct platform_device *pdev) +{ + struct llcc_perfmon_private *llcc_priv = platform_get_drvdata(pdev); + + while (hrtimer_active(&llcc_priv->hrtimer)) + hrtimer_cancel(&llcc_priv->hrtimer); + + mutex_destroy(&llcc_priv->mutex); + sysfs_remove_group(&pdev->dev.kobj, &llcc_perfmon_group); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id of_match_llcc[] = { + { + .compatible = "qcom,llcc-perfmon", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match_llcc); + +static struct platform_driver llcc_perfmon_driver = { + .probe = llcc_perfmon_probe, + .remove = llcc_perfmon_remove, + .driver = { + .name = LLCC_PERFMON_NAME, + .of_match_table = of_match_llcc, + } +}; +module_platform_driver(llcc_perfmon_driver); + +MODULE_DESCRIPTION("QCOM LLCC PMU MONITOR"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/llcc_perfmon.h b/drivers/soc/qcom/llcc_perfmon.h new file mode 100644 index 0000000000000000000000000000000000000000..c423e9bff36d59c8d12debe0cdc71fa351e7545a --- /dev/null +++ b/drivers/soc/qcom/llcc_perfmon.h @@ -0,0 +1,197 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SOC_QCOM_LLCC_PERFMON_H_ +#define _SOC_QCOM_LLCC_PERFMON_H_ + +#define LLCC_COMMON_STATUS0 (0x3000C) +/* FEAC */ +#define FEAC_PROF_FILTER_0_CFG5 (0x037014) +#define FEAC_PROF_FILTER_0_CFG6 (0x037018) +#define FEAC_PROF_EVENT_n_CFG(n) (0x037060 + 4 * n) +#define FEAC_PROF_CFG (0x0370A0) + +/* FERC */ +#define FERC_PROF_FILTER_0_CFG0 (0x03B000) +#define FERC_PROF_EVENT_n_CFG(n) (0x03B020 + 4 * n) +#define FERC_PROF_CFG (0x03B060) + +/* FEWC */ +#define FEWC_PROF_FILTER_0_CFG0 (0x033000) +#define FEWC_PROF_EVENT_n_CFG(n) (0x033020 + 4 * n) + +/* BEAC */ +#define BEAC_PROF_FILTER_0_CFG5 (0x049014) +#define BEAC_PROF_EVENT_n_CFG(n) (0x049040 + 4 * n) +#define BEAC_PROF_CFG (0x049080) + +/* BERC */ +#define BERC_PROF_FILTER_0_CFG0 (0x039000) +#define BERC_PROF_EVENT_n_CFG(n) (0x039020 + 4 * n) +#define BERC_PROF_CFG (0x039060) + +/* TRP */ +#define TRP_PROF_FILTER_0_CFG1 (0x024004) +#define TRP_PROF_EVENT_n_CFG(n) (0x024020 + 4 * n) +#define TRP_SCID_n_STATUS(n) (0x000004 + 0x1000 * n) + +/* DRP */ +#define DRP_PROF_EVENT_n_CFG(n) (0x044010 + 4 * n) +#define DRP_PROF_CFG (0x044050) + +/* PMGR */ +#define PMGR_PROF_EVENT_n_CFG(n) (0x03F000 + 4 * n) + +#define PERFMON_COUNTER_n_CONFIG(n) (0x031020 + 4 * n) +#define PERFMON_MODE (0x03100C) +#define PERFMON_DUMP (0x031010) +#define BROADCAST_COUNTER_n_VALUE(n) (0x031060 + 4 * n) + +#define LLCC_COUNTER_n_VALUE(n) (0x031060 + 4 * n) + +#define EVENT_NUM_MAX (64) +#define SCID_MAX (32) + +/* Perfmon */ +#define CLEAR_ON_ENABLE BIT(31) +#define CLEAR_ON_DUMP BIT(30) +#define FREEZE_ON_SATURATE BIT(29) +#define CHAINING_EN BIT(28) +#define COUNT_CLOCK_EVENT BIT(24) + +#define EVENT_SELECT_SHIFT (16) +#define PERFMON_EVENT_SELECT_MASK GENMASK(EVENT_SELECT_SHIFT + 4,\ + EVENT_SELECT_SHIFT) +#define PORT_SELECT_SHIFT (0) +#define PERFMON_PORT_SELECT_MASK GENMASK(PORT_SELECT_SHIFT + 3,\ + PORT_SELECT_SHIFT) + +#define MANUAL_MODE (0) +#define TIMED_MODE (1) +#define TRIGGER_MODE (2) +#define MONITOR_EN_SHIFT (15) +#define MONITOR_EN BIT(MONITOR_EN_SHIFT) +#define PERFMON_MODE_MONITOR_EN_MASK GENMASK(MONITOR_EN_SHIFT + 0,\ + MONITOR_EN_SHIFT) +#define MONITOR_MODE_SHIFT (0) +#define PERFMON_MODE_MONITOR_MODE_MASK GENMASK(MONITOR_MODE_SHIFT + 0,\ + MONITOR_MODE_SHIFT) + +#define MONITOR_DUMP BIT(0) + +/* COMMON */ +#define BYTE_SCALING (1024) +#define BEAT_SCALING (32) +#define LB_CNT_SHIFT (28) +#define LB_CNT_MASK GENMASK(LB_CNT_SHIFT + 3, \ + LB_CNT_SHIFT) + +#define BYTE_SCALING_SHIFT (16) +#define PROF_CFG_BYTE_SCALING_MASK GENMASK(BYTE_SCALING_SHIFT + 11,\ + BYTE_SCALING_SHIFT) +#define BEAT_SCALING_SHIFT (8) +#define PROF_CFG_BEAT_SCALING_MASK GENMASK(BEAT_SCALING_SHIFT + 7,\ + BEAT_SCALING_SHIFT) +#define PROF_EN_SHIFT (0) +#define PROF_EN BIT(PROF_EN_SHIFT) +#define PROF_CFG_EN_MASK GENMASK(PROF_EN_SHIFT + 0,\ + PROF_EN_SHIFT) + +#define FILTER_EN_SHIFT (31) +#define FILTER_EN BIT(FILTER_EN_SHIFT) +#define FILTER_EN_MASK GENMASK(FILTER_EN_SHIFT + 0,\ + FILTER_EN_SHIFT) +#define FILTER_0 (0) +#define FILTER_0_MASK GENMASK(FILTER_0 + 0, \ + FILTER_0) +#define FILTER_1 (1) +#define FILTER_1_MASK GENMASK(FILTER_1 + 0, \ + FILTER_1) + +#define FILTER_SEL_SHIFT (16) +#define FILTER_SEL_MASK GENMASK(FILTER_SEL_SHIFT + 0,\ + FILTER_SEL_SHIFT) +#define EVENT_SEL_SHIFT (0) +#define EVENT_SEL_MASK GENMASK(EVENT_SEL_SHIFT + 5,\ + EVENT_SEL_SHIFT) + +#define MID_MASK_SHIFT (16) +#define MID_MASK_MASK GENMASK(MID_MASK_SHIFT + 15, \ + MID_MASK_SHIFT) +#define MID_MATCH_SHIFT (0) +#define MID_MATCH_MASK GENMASK(MID_MATCH_SHIFT + 15, \ + MID_MATCH_SHIFT) +#define SCID_MASK_SHIFT (16) +#define SCID_MASK_MASK GENMASK(SCID_MASK_SHIFT + 15, \ + SCID_MASK_SHIFT) +#define SCID_MATCH_SHIFT (0) +#define SCID_MATCH_MASK GENMASK(SCID_MATCH_SHIFT + 15, \ + SCID_MATCH_SHIFT) +#define PROFTAG_MASK_SHIFT (2) +#define PROFTAG_MASK_MASK GENMASK(PROFTAG_MASK_SHIFT + 1,\ + PROFTAG_MASK_SHIFT) +#define PROFTAG_MATCH_SHIFT (0) +#define PROFTAG_MATCH_MASK GENMASK(PROFTAG_MATCH_SHIFT + 1,\ + PROFTAG_MATCH_SHIFT) +/* FEAC */ +#define FEAC_SCALING_FILTER_SEL_SHIFT (2) +#define FEAC_SCALING_FILTER_SEL_MASK GENMASK(FEAC_SCALING_FILTER_SEL_SHIFT \ + + 0, \ + FEAC_SCALING_FILTER_SEL_SHIFT) +#define FEAC_SCALING_FILTER_EN_SHIFT (1) +#define FEAC_SCALING_FILTER_EN BIT(FEAC_SCALING_FILTER_EN_SHIFT) +#define FEAC_SCALING_FILTER_EN_MASK GENMASK(FEAC_SCALING_FILTER_EN_SHIFT \ + + 0, \ + FEAC_SCALING_FILTER_EN_SHIFT) +/* BEAC */ +#define BEAC_PROFTAG_MASK_SHIFT (14) +#define BEAC_PROFTAG_MASK_MASK GENMASK(BEAC_PROFTAG_MASK_SHIFT + 1,\ + BEAC_PROFTAG_MASK_SHIFT) +#define BEAC_PROFTAG_MATCH_SHIFT (12) +#define BEAC_PROFTAG_MATCH_MASK GENMASK(BEAC_PROFTAG_MATCH_SHIFT + 1,\ + BEAC_PROFTAG_MATCH_SHIFT) +#define BEAC_MC_PROFTAG_SHIFT (1) +#define BEAC_MC_PROFTAG_MASK GENMASK(BEAC_MC_PROFTAG_SHIFT + 1,\ + BEAC_MC_PROFTAG_SHIFT) +/* TRP */ +#define TRP_SCID_MATCH_SHIFT (0) +#define TRP_SCID_MATCH_MASK GENMASK(TRP_SCID_MATCH_SHIFT + 4,\ + TRP_SCID_MATCH_SHIFT) +#define TRP_SCID_MASK_SHIFT (8) +#define TRP_SCID_MASK_MASK GENMASK(TRP_SCID_MASK_SHIFT + 4,\ + TRP_SCID_MASK_SHIFT) +#define TRP_WAY_ID_MATCH_SHIFT (16) +#define TRP_WAY_ID_MATCH_MASK GENMASK(TRP_WAY_ID_MATCH_SHIFT + 3,\ + TRP_WAY_ID_MATCH_SHIFT) +#define TRP_WAY_ID_MASK_SHIFT (20) +#define TRP_WAY_ID_MASK_MASK GENMASK(TRP_WAY_ID_MASK_SHIFT + 3,\ + TRP_WAY_ID_MASK_SHIFT) +#define TRP_PROFTAG_MATCH_SHIFT (24) +#define TRP_PROFTAG_MATCH_MASK GENMASK(TRP_PROFTAG_MATCH_SHIFT + 1,\ + TRP_PROFTAG_MATCH_SHIFT) +#define TRP_PROFTAG_MASK_SHIFT (28) +#define TRP_PROFTAG_MASK_MASK GENMASK(TRP_PROFTAG_MASK_SHIFT + 1,\ + TRP_PROFTAG_MASK_SHIFT) + +#define TRP_SCID_STATUS_ACTIVE_SHIFT (0) +#define TRP_SCID_STATUS_ACTIVE_MASK GENMASK( \ + TRP_SCID_STATUS_ACTIVE_SHIFT \ + + 0, \ + TRP_SCID_STATUS_ACTIVE_SHIFT) +#define TRP_SCID_STATUS_DEACTIVE_SHIFT (1) +#define TRP_SCID_STATUS_CURRENT_CAP_SHIFT (16) +#define TRP_SCID_STATUS_CURRENT_CAP_MASK GENMASK( \ + TRP_SCID_STATUS_CURRENT_CAP_SHIFT \ + + 13, \ + TRP_SCID_STATUS_CURRENT_CAP_SHIFT) + +#endif /* _SOC_QCOM_LLCC_PERFMON_H_ */ diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c new file mode 100644 index 0000000000000000000000000000000000000000..325864a948aff2cf306e704ce345bd10fb1cb932 --- /dev/null +++ b/drivers/soc/qcom/mem-offline.c @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AOP_MSG_ADDR_MASK 0xffffffff +#define AOP_MSG_ADDR_HIGH_SHIFT 32 +#define MAX_LEN 96 + +static unsigned long start_section_nr, end_section_nr; +static struct kobject *kobj; +static unsigned int offline_granule, sections_per_block; +#define MODULE_CLASS_NAME "mem-offline" +#define BUF_LEN 100 + +struct section_stat { + unsigned long success_count; + unsigned long fail_count; + unsigned long avg_time; + unsigned long best_time; + unsigned long worst_time; + unsigned long total_time; + unsigned long last_recorded_time; +}; + +enum memory_states { + MEMORY_ONLINE, + MEMORY_OFFLINE, + MAX_STATE, +}; + +static struct mem_offline_mailbox { + struct mbox_client cl; + struct mbox_chan *mbox; +} mailbox; + +static struct section_stat *mem_info; + +void record_stat(unsigned long sec, ktime_t delay, int mode) +{ + unsigned int total_sec = end_section_nr - start_section_nr + 1; + unsigned int blk_nr = (sec - start_section_nr + mode * total_sec) / + sections_per_block; + + if (sec > end_section_nr) + return; + + if (delay < mem_info[blk_nr].best_time || !mem_info[blk_nr].best_time) + mem_info[blk_nr].best_time = delay; + + if (delay > mem_info[blk_nr].worst_time) + mem_info[blk_nr].worst_time = delay; + + ++mem_info[blk_nr].success_count; + if (mem_info[blk_nr].fail_count) + --mem_info[blk_nr].fail_count; + + mem_info[blk_nr].total_time += delay; + + mem_info[blk_nr].avg_time = + mem_info[blk_nr].total_time / mem_info[blk_nr].success_count; + + mem_info[blk_nr].last_recorded_time = delay; +} + +static int aop_send_msg(unsigned long addr, bool online) +{ + struct qmp_pkt pkt; + char mbox_msg[MAX_LEN]; + unsigned long addr_low, addr_high; + + addr_low = addr & AOP_MSG_ADDR_MASK; + addr_high = (addr >> AOP_MSG_ADDR_HIGH_SHIFT) & AOP_MSG_ADDR_MASK; + + snprintf(mbox_msg, MAX_LEN, + "{class: ddr, event: pasr, addr_hi: 0x%08lx, addr_lo: 0x%08lx, refresh: %s}", + addr_high, addr_low, online ? "on" : "off"); + + pkt.size = MAX_LEN; + pkt.data = mbox_msg; + return mbox_send_message(mailbox.mbox, &pkt); +} + +static int mem_event_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mn = arg; + unsigned long start, end, sec_nr; + static ktime_t cur; + ktime_t delay = 0; + phys_addr_t start_addr, end_addr; + unsigned int idx = end_section_nr - start_section_nr + 1; + + start = SECTION_ALIGN_DOWN(mn->start_pfn); + end = SECTION_ALIGN_UP(mn->start_pfn + mn->nr_pages); + + if ((start != mn->start_pfn) || (end != mn->start_pfn + mn->nr_pages)) { + WARN("mem-offline: %s pfn not aligned to section\n", __func__); + pr_err("mem-offline: start pfn = %lu end pfn = %lu\n", + mn->start_pfn, mn->start_pfn + mn->nr_pages); + return -EINVAL; + } + + start_addr = __pfn_to_phys(start); + end_addr = __pfn_to_phys(end); + sec_nr = pfn_to_section_nr(start); + switch (action) { + case MEM_GOING_ONLINE: + pr_debug("mem-offline: MEM_GOING_ONLINE : start = 0x%lx end = 0x%lx", + start_addr, end_addr); + ++mem_info[(sec_nr - start_section_nr + MEMORY_ONLINE * + idx) / sections_per_block].fail_count; + cur = ktime_get(); + + if (aop_send_msg(__pfn_to_phys(start), true)) + pr_err("PASR: AOP online request addr:0x%llx failed\n", + __pfn_to_phys(start)); + + break; + case MEM_ONLINE: + pr_info("mem-offline: Onlined memory block mem%lu\n", sec_nr); + delay = ktime_ms_delta(ktime_get(), cur); + record_stat(sec_nr, delay, MEMORY_ONLINE); + cur = 0; + break; + case MEM_GOING_OFFLINE: + pr_debug("mem-offline: MEM_GOING_OFFLINE : start = 0x%lx end = 0x%lx", + start_addr, end_addr); + ++mem_info[(sec_nr - start_section_nr + MEMORY_OFFLINE * + idx) / sections_per_block].fail_count; + cur = ktime_get(); + break; + case MEM_OFFLINE: + pr_info("mem-offline: Offlined memory block mem%lu\n", sec_nr); + + if (aop_send_msg(__pfn_to_phys(start), false)) + pr_err("PASR: AOP offline request addr:0x%llx failed\n", + __pfn_to_phys(start)); + + delay = ktime_ms_delta(ktime_get(), cur); + record_stat(sec_nr, delay, MEMORY_OFFLINE); + cur = 0; + break; + case MEM_CANCEL_ONLINE: + pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%lx end = 0x%lx", + start_addr, end_addr); + break; + default: + break; + } + return NOTIFY_OK; +} + +static int mem_online_remaining_blocks(void) +{ + unsigned long memblock_end_pfn = __phys_to_pfn(memblock_end_of_DRAM()); + unsigned long ram_end_pfn = __phys_to_pfn(bootloader_memory_limit); + unsigned long block_size, memblock, pfn; + unsigned int nid; + phys_addr_t phys_addr; + int fail = 0; + + block_size = memory_block_size_bytes(); + sections_per_block = block_size / MIN_MEMORY_BLOCK_SIZE; + + start_section_nr = pfn_to_section_nr(memblock_end_pfn); + end_section_nr = pfn_to_section_nr(ram_end_pfn); + + if (start_section_nr == end_section_nr) { + pr_err("mem-offline: System booted with no zone movable memory blocks. Cannot perform memory offlining\n"); + return -EINVAL; + } + for (memblock = start_section_nr; memblock <= end_section_nr; + memblock += sections_per_block) { + pfn = section_nr_to_pfn(memblock); + phys_addr = __pfn_to_phys(pfn); + + if (phys_addr & (((PAGES_PER_SECTION * sections_per_block) + << PAGE_SHIFT) - 1)) { + fail = 1; + pr_warn("mem-offline: PFN of mem%lu block not aligned to section start. Not adding this memory block\n", + memblock); + continue; + } + nid = memory_add_physaddr_to_nid(phys_addr); + if (add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block)) { + pr_warn("mem-offline: Adding memory block mem%lu failed\n", + memblock); + fail = 1; + } + } + return fail; +} + +static ssize_t show_mem_offline_granule(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, BUF_LEN, "%lu\n", (unsigned long)offline_granule * + SZ_1M); +} + +static ssize_t show_mem_perf_stats(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + + unsigned int blk_start = start_section_nr / sections_per_block; + unsigned int blk_end = end_section_nr / sections_per_block; + unsigned int idx = blk_end - blk_start + 1; + unsigned int char_count = 0; + unsigned int i, j; + + for (j = 0; j < MAX_STATE; j++) { + char_count += snprintf(buf + char_count, BUF_LEN, + "\n\t%s\n\t\t\t", j == 0 ? "ONLINE" : "OFFLINE"); + for (i = blk_start; i <= blk_end; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%s%d\t\t", "mem", i); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tLast recd time:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lums\t\t", mem_info[i+j*idx].last_recorded_time); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tAvg time:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lums\t\t", mem_info[i+j*idx].avg_time); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tBest time:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lums\t\t", mem_info[i+j*idx].best_time); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tWorst time:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lums\t\t", mem_info[i+j*idx].worst_time); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tSuccess count:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lu\t\t", mem_info[i+j*idx].success_count); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + char_count += snprintf(buf + char_count, BUF_LEN, + "\tFail count:\t"); + for (i = 0; i <= blk_end - blk_start; i++) + char_count += snprintf(buf + char_count, BUF_LEN, + "%lu\t\t", mem_info[i+j*idx].fail_count); + char_count += snprintf(buf + char_count, BUF_LEN, "\n"); + } + return char_count; +} + +static struct kobj_attribute perf_stats_attr = + __ATTR(perf_stats, 0444, show_mem_perf_stats, NULL); + +static struct kobj_attribute offline_granule_attr = + __ATTR(offline_granule, 0444, show_mem_offline_granule, NULL); + +static struct attribute *mem_root_attrs[] = { + &perf_stats_attr.attr, + &offline_granule_attr.attr, + NULL, +}; + +static struct attribute_group mem_attr_group = { + .attrs = mem_root_attrs, +}; + +static int mem_sysfs_init(void) +{ + unsigned int total_blks = (end_section_nr - start_section_nr + 1) / + sections_per_block; + + if (start_section_nr == end_section_nr) + return -EINVAL; + + kobj = kobject_create_and_add(MODULE_CLASS_NAME, kernel_kobj); + if (!kobj) + return -ENOMEM; + + if (sysfs_create_group(kobj, &mem_attr_group)) + kobject_put(kobj); + + mem_info = kzalloc(sizeof(*mem_info) * total_blks * MAX_STATE, + GFP_KERNEL); + if (!mem_info) + return -ENOMEM; + + return 0; +} + +static int mem_parse_dt(struct platform_device *pdev) +{ + const unsigned int *val; + struct device_node *node = pdev->dev.of_node; + + val = of_get_property(node, "granule", NULL); + if (!val && !*val) { + pr_err("mem-offine: granule property not found in DT\n"); + return -EINVAL; + } + offline_granule = be32_to_cpup(val); + if (!offline_granule && !(offline_granule & (offline_granule - 1)) && + offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) { + pr_err("mem-offine: invalid granule property\n"); + return -EINVAL; + } + + mailbox.cl.dev = &pdev->dev; + mailbox.cl.tx_block = true; + mailbox.cl.tx_tout = 1000; + mailbox.cl.knows_txdone = false; + + mailbox.mbox = mbox_request_channel(&mailbox.cl, 0); + if (IS_ERR(mailbox.mbox)) { + pr_err("mem-offline: failed to get mailbox channel %pK %d\n", + mailbox.mbox, PTR_ERR(mailbox.mbox)); + return PTR_ERR(mailbox.mbox); + } + + return 0; +} + +static struct notifier_block hotplug_memory_callback_nb = { + .notifier_call = mem_event_callback, + .priority = 0, +}; + +static int mem_offline_driver_probe(struct platform_device *pdev) +{ + if (mem_parse_dt(pdev)) + return -ENODEV; + + if (mem_online_remaining_blocks()) + pr_err("mem-offline: !!ERROR!! Auto onlining some memory blocks failed. System could run with less RAM\n"); + + if (mem_sysfs_init()) + return -ENODEV; + + if (register_hotmemory_notifier(&hotplug_memory_callback_nb)) { + pr_err("mem-offline: Registering memory hotplug notifier failed\n"); + return -ENODEV; + } + pr_info("mem-offline: Added memory blocks ranging from mem%lu - mem%lu\n", + start_section_nr, end_section_nr); + return 0; +} + +static const struct of_device_id mem_offline_match_table[] = { + {.compatible = "qcom,mem-offline"}, + {} +}; + +static struct platform_driver mem_offline_driver = { + .probe = mem_offline_driver_probe, + .driver = { + .name = "mem_offline", + .of_match_table = mem_offline_match_table, + .owner = THIS_MODULE, + }, +}; + +static int __init mem_module_init(void) +{ + return platform_driver_register(&mem_offline_driver); +} + +subsys_initcall(mem_module_init); diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c index d85c506fb7968d6e07697b126a5c10cb9b550cae..d229900b8e3ea28d34d86efcb60557f99298ee43 100644 --- a/drivers/soc/qcom/minidump_log.c +++ b/drivers/soc/qcom/minidump_log.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,7 +46,7 @@ static void __init register_log_buf(void) static void register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size, u32 cpu) { - struct page *sp_page = vmalloc_to_page((const void *) sp); + struct page *sp_page; struct vm_struct *stack_vm_area = task_stack_vm_area(current); ksp_entry->virt_addr = sp; @@ -92,17 +92,44 @@ static void __init register_kernel_sections(void) } } +static inline bool in_stack_range(u64 sp, u64 base_addr, unsigned int + stack_size) +{ + u64 min_addr = base_addr; + u64 max_addr = base_addr + stack_size; + + return (min_addr <= sp && sp < max_addr); +} + +static unsigned int calculate_copy_pages(u64 sp, struct vm_struct *stack_area) +{ + u64 tsk_stack_base = (u64) stack_area->addr; + u64 offset; + unsigned int stack_pages, copy_pages; + + if (in_stack_range(sp, tsk_stack_base, get_vm_area_size(stack_area))) { + offset = sp - tsk_stack_base; + stack_pages = get_vm_area_size(stack_area) / PAGE_SIZE; + copy_pages = stack_pages - (offset / PAGE_SIZE); + } else { + copy_pages = 0; + } + return copy_pages; +} + void dump_stack_minidump(u64 sp) { struct md_region ksp_entry, ktsk_entry; u32 cpu = smp_processor_id(); struct vm_struct *stack_vm_area; - unsigned int stack_pages, i, copy_pages; - u64 base_addr; + unsigned int i, copy_pages; if (is_idle_task(current)) return; + if (sp < KIMAGE_VADDR || sp > -256UL) + sp = current_stack_pointer; + /* * Since stacks are now allocated with vmalloc, the translation to * physical address is not a simple linear transformation like it is @@ -113,18 +140,16 @@ void dump_stack_minidump(u64 sp) */ stack_vm_area = task_stack_vm_area(current); if (stack_vm_area) { - sp = PAGE_ALIGN(sp); - scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d", - cpu); - base_addr = (u64) stack_vm_area->addr; - stack_pages = get_vm_area_size(stack_vm_area) >> PAGE_SHIFT; - copy_pages = stack_pages - ((sp - base_addr) / PAGE_SIZE); + sp &= ~(PAGE_SIZE - 1); + copy_pages = calculate_copy_pages(sp, stack_vm_area); for (i = 0; i < copy_pages; i++) { + scnprintf(ksp_entry.name, sizeof(ksp_entry.name), + "KSTACK%d_%d", cpu, i); register_stack_entry(&ksp_entry, sp, PAGE_SIZE, cpu); sp += PAGE_SIZE; } } else { - sp &= (THREAD_SIZE - 1); + sp &= ~(THREAD_SIZE - 1); scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d", cpu); register_stack_entry(&ksp_entry, sp, THREAD_SIZE, cpu); diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile index b33835657e1f5a63a60180072bb00631fb0154d5..c2ef70c8162c989acb9ec9330ed125cfde852de5 100644 --- a/drivers/soc/qcom/msm_bus/Makefile +++ b/drivers/soc/qcom/msm_bus/Makefile @@ -10,7 +10,7 @@ ifdef CONFIG_QCOM_BUS_CONFIG_RPMH msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o msm_bus_proxy_client.o obj-$(CONFIG_OF) += msm_bus_of_rpmh.o else - obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o \ + obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \ msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o obj-$(CONFIG_OF) += msm_bus_of_adhoc.o endif diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c index c0a439873f8a07023ca393f20c4f6cdfacfaf33c..371ca87f3956c0c69431c8437740db44cb95be8d 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -73,26 +73,26 @@ static void copy_remaining_nodes(struct list_head *edge_list, struct list_head * "util" file for these common func/macros. * */ -uint64_t msm_bus_div64(unsigned int w, uint64_t bw) +uint64_t msm_bus_div64(uint64_t num, unsigned int base) { - uint64_t *b = &bw; + uint64_t *n = # - if ((bw > 0) && (bw < w)) + if ((num > 0) && (num < base)) return 1; - switch (w) { + switch (base) { case 0: WARN(1, "AXI: Divide by 0 attempted\n"); - case 1: return bw; - case 2: return (bw >> 1); - case 4: return (bw >> 2); - case 8: return (bw >> 3); - case 16: return (bw >> 4); - case 32: return (bw >> 5); + case 1: return num; + case 2: return (num >> 1); + case 4: return (num >> 2); + case 8: return (num >> 3); + case 16: return (num >> 4); + case 32: return (num >> 5); } - do_div(*b, w); - return *b; + do_div(*n, base); + return *n; } int msm_bus_device_match_adhoc(struct device *dev, void *id) @@ -342,47 +342,49 @@ static int getpath(struct device *src_dev, int dest, const char *cl_name) } } - unsigned int i; - /* Setup the new edge list */ - list_for_each_entry(bus_node, &traverse_list, link) { - /* Setup list of black-listed nodes */ - setup_bl_list(bus_node, &black_list); - for (i = 0; i < bus_node->node_info->num_connections; - i++) { - bool skip; - struct msm_bus_node_device_type *node_conn; - - node_conn = - to_msm_bus_node( + if (!found) { + unsigned int i; + /* Setup the new edge list */ + list_for_each_entry(bus_node, &traverse_list, link) { + /* Setup list of black-listed nodes */ + setup_bl_list(bus_node, &black_list); + + for (i = 0; i < + bus_node->node_info->num_connections; i++) { + bool skip; + struct msm_bus_node_device_type + *node_conn; + node_conn = to_msm_bus_node( bus_node->node_info->dev_connections[i]); - - if (node_conn->node_info->is_traversed) { - MSM_BUS_ERR("Circ Path %d\n", - node_conn->node_info->id); - goto reset_traversed; - } - skip = chk_bl_list(&black_list, + if ( + node_conn->node_info->is_traversed) { + MSM_BUS_ERR("Circ Path %d\n", + node_conn->node_info->id); + goto reset_traversed; + } + skip = chk_bl_list(&black_list, bus_node->node_info->connections[i]); - if (!skip) { - list_add_tail(&node_conn->link, - &edge_list); + if (!skip) { + list_add_tail( + &node_conn->link, &edge_list); node_conn->node_info->is_traversed = - true; + true; + } } } - } - /* Keep tabs of the previous search list */ - search_node = kzalloc(sizeof(struct bus_search_type), - GFP_KERNEL); - INIT_LIST_HEAD(&search_node->node_list); - list_splice_init(&traverse_list, - &search_node->node_list); - /* Add the previous search list to a route list */ - list_add_tail(&search_node->link, &route_list); - /* Advancing the list depth */ - depth_index++; - list_splice_init(&edge_list, &traverse_list); + /* Keep tabs of the previous search list */ + search_node = kzalloc(sizeof(struct bus_search_type), + GFP_KERNEL); + INIT_LIST_HEAD(&search_node->node_list); + list_splice_init(&traverse_list, + &search_node->node_list); + /* Add the previous search list to a route list */ + list_add_tail(&search_node->link, &route_list); + /* Advancing the list depth */ + depth_index++; + list_splice_init(&edge_list, &traverse_list); + } } reset_traversed: copy_remaining_nodes(&edge_list, &traverse_list, &route_list); @@ -447,19 +449,18 @@ static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev, if (util_fact && (util_fact != 100)) { sum_ab *= util_fact; - sum_ab = msm_bus_div64(100, sum_ab); + sum_ab = msm_bus_div64(sum_ab, 100); } if (vrail_comp && (vrail_comp != 100)) { max_ib *= 100; - max_ib = msm_bus_div64(vrail_comp, max_ib); + max_ib = msm_bus_div64(max_ib, vrail_comp); } /* Account for multiple channels if any */ if (bus_dev->node_info->agg_params.num_aggports > 1) - sum_ab = msm_bus_div64( - bus_dev->node_info->agg_params.num_aggports, - sum_ab); + sum_ab = msm_bus_div64(sum_ab, + bus_dev->node_info->agg_params.num_aggports); if (!bus_dev->node_info->agg_params.buswidth) { MSM_BUS_WARN("No bus width found for %d. Using default\n", @@ -468,8 +469,8 @@ static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev, } bw_max_hz = max(max_ib, sum_ab); - bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth, - bw_max_hz); + bw_max_hz = msm_bus_div64(bw_max_hz, + bus_dev->node_info->agg_params.buswidth); return bw_max_hz; } @@ -512,19 +513,18 @@ static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev, if (util_fact && (util_fact != 100)) { sum_ab *= util_fact; - sum_ab = msm_bus_div64(100, sum_ab); + sum_ab = msm_bus_div64(sum_ab, 100); } if (vrail_comp && (vrail_comp != 100)) { max_ib *= 100; - max_ib = msm_bus_div64(vrail_comp, max_ib); + max_ib = msm_bus_div64(max_ib, vrail_comp); } /* Account for multiple channels if any */ if (bus_dev->node_info->agg_params.num_aggports > 1) - sum_ab = msm_bus_div64( - bus_dev->node_info->agg_params.num_aggports, - sum_ab); + sum_ab = msm_bus_div64(sum_ab, + bus_dev->node_info->agg_params.num_aggports); if (!bus_dev->node_info->agg_params.buswidth) { MSM_BUS_WARN("No bus width found for %d. Using default\n", @@ -533,8 +533,8 @@ static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev, } bw_max_hz = max(max_ib, sum_ab); - bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth, - bw_max_hz); + bw_max_hz = msm_bus_div64(bw_max_hz, + bus_dev->node_info->agg_params.buswidth); return bw_max_hz; } @@ -868,7 +868,7 @@ static void unregister_client_adhoc(uint32_t cl) } curr = client->curr; - if (curr >= pdata->num_usecases) { + if ((curr < 0) || (curr >= pdata->num_usecases)) { MSM_BUS_ERR("Invalid index Defaulting curr to 0"); curr = 0; } @@ -1106,75 +1106,6 @@ static int update_client_paths(struct msm_bus_client *client, bool log_trns, return ret; } -static int query_client_paths(struct msm_bus_client *client, bool log_trns, - unsigned int idx) -{ - int lnode, src, dest, cur_idx; - uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw; - int i, ret = 0; - struct msm_bus_scale_pdata *pdata; - struct device *src_dev; - - if (!client) { - MSM_BUS_ERR("Client handle Null"); - ret = -ENXIO; - goto exit_update_client_paths; - } - - pdata = client->pdata; - if (!pdata) { - MSM_BUS_ERR("Client pdata Null"); - ret = -ENXIO; - goto exit_update_client_paths; - } - - cur_idx = client->curr; - client->curr = idx; - for (i = 0; i < pdata->usecase->num_paths; i++) { - src = pdata->usecase[idx].vectors[i].src; - dest = pdata->usecase[idx].vectors[i].dst; - - lnode = client->src_pnode[i]; - src_dev = client->src_devs[i]; - req_clk = client->pdata->usecase[idx].vectors[i].ib; - req_bw = client->pdata->usecase[idx].vectors[i].ab; - if (cur_idx < 0) { - curr_clk = 0; - curr_bw = 0; - } else { - curr_clk = - client->pdata->usecase[cur_idx].vectors[i].ib; - curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab; - MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__, - curr_bw, curr_clk); - } - - if (pdata->active_only) { - slp_clk = 0; - slp_bw = 0; - } else { - slp_clk = req_clk; - slp_bw = req_bw; - } - - ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk, - slp_bw, curr_clk, curr_bw, lnode, pdata->active_only); - - if (ret) { - MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n", - __func__, ret, pdata->active_only); - goto exit_update_client_paths; - } - - if (log_trns) - getpath_debug(src, lnode, pdata->active_only); - } - commit_data(); -exit_update_client_paths: - return ret; -} - - static int update_context(uint32_t cl, bool active_only, unsigned int ctx_idx) { @@ -1347,8 +1278,8 @@ static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib) commit_data(); cl->cur_act_ib = ib; cl->cur_act_ab = ab; - cl->cur_slp_ib = slp_ib; - cl->cur_slp_ab = slp_ab; + cl->cur_dual_ib = slp_ib; + cl->cur_dual_ab = slp_ab; if (log_transaction) getpath_debug(cl->mas, cl->first_hop, cl->active_only); @@ -1373,18 +1304,18 @@ static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab, if ((cl->cur_act_ib == act_ib) && (cl->cur_act_ab == act_ab) && - (cl->cur_slp_ib == slp_ib) && - (cl->cur_slp_ab == slp_ab)) { + (cl->cur_dual_ib == slp_ib) && + (cl->cur_dual_ab == slp_ab)) { MSM_BUS_ERR("No change in vote"); goto exit_change_context; } if (!slp_ab && !slp_ib) cl->active_only = true; - msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_slp_ib); - ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab, - cl->cur_act_ab, cl->cur_act_ab, cl->first_hop, - cl->active_only); + msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib); + ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, + slp_ab, cl->cur_act_ab, cl->cur_act_ab, + cl->first_hop, cl->active_only); if (ret) { MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n", __func__, ret, cl->active_only); @@ -1393,8 +1324,8 @@ static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab, commit_data(); cl->cur_act_ib = act_ib; cl->cur_act_ab = act_ab; - cl->cur_slp_ib = slp_ib; - cl->cur_slp_ab = slp_ab; + cl->cur_dual_ib = slp_ib; + cl->cur_dual_ab = slp_ab; trace_bus_update_request_end(cl->name); exit_change_context: rt_mutex_unlock(&msm_bus_adhoc_lock); @@ -1416,6 +1347,7 @@ static void unregister_adhoc(struct msm_bus_client_handle *cl) cl->first_hop, cl->active_only); commit_data(); msm_bus_dbg_remove_client(cl); + kfree(cl->name); kfree(cl); exit_unregister_client: rt_mutex_unlock(&msm_bus_adhoc_lock); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c index 95f61aaecaafd1a76eeddc6a941f23b57ffdc87f..4e81163f663424f67e07a046daa183b1a775a539 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -243,7 +243,7 @@ enum bimc_m_bke_health_3 { (M_BKE_GC_GC_BMSK >> \ (M_BKE_GC_GC_SHFT + 1)) -static int bimc_div(int64_t *a, uint32_t b) +static int bimc_div(uint64_t *a, uint32_t b) { if ((*a > 0) && (*a < b)) { *a = 0; @@ -549,8 +549,8 @@ static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev, if (info && info->num_qports && ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) { - bw = msm_bus_div64(info->num_qports, - dev->node_bw[ACTIVE_CTX].sum_ab); + bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab, + info->num_qports); MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", info->id, bw); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c index 2ee4516ea5714e9cb2a607ab613dd640d7e2e3b9..61bfba938d62e21cbb3e53a03cd55ec3eac4a26a 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2018, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1192,6 +1192,9 @@ static int msm_bus_device_probe(struct platform_device *pdev) devm_kfree(&pdev->dev, pdata->info); devm_kfree(&pdev->dev, pdata); + + dev_info(&pdev->dev, "Bus scaling driver probe successful\n"); + exit_device_probe: return ret; } diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index 3122e995730eb71f850572238ed18297e5bded82..f16c2bb938b0de07e1fc3426d02781b7e6e76ffc 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -42,6 +42,7 @@ ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) static int msm_bus_dev_init_qos(struct device *dev, void *data); +static int msm_bus_dev_sbm_config(struct device *dev, bool enable); static struct list_head bcm_query_list_inorder[VCD_MAX_CNT]; static struct msm_bus_node_device_type *cur_rsc; @@ -481,9 +482,11 @@ static int bcm_query_list_add(struct msm_bus_node_device_type *cur_dev) cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]); cur_vcd = cur_bcm->bcmdev->clk_domain; - if (!cur_bcm->query_dirty) + if (!cur_bcm->query_dirty) { list_add_tail(&cur_bcm->query_link, &bcm_query_list_inorder[cur_vcd]); + cur_bcm->query_dirty = true; + } } exit_bcm_query_list_add: @@ -562,6 +565,7 @@ int msm_bus_commit_data(struct list_head *clist) list_for_each_entry_safe(node, node_tmp, clist, link) { bcm_clist_add(node); + msm_bus_dev_sbm_config(&node->dev, false); } if (!cur_rsc) { @@ -654,6 +658,7 @@ int msm_bus_commit_data(struct list_head *clist) list_for_each_entry_safe(node, node_tmp, clist, link) { if (unlikely(node->node_info->defer_qos)) msm_bus_dev_init_qos(&node->dev, NULL); + msm_bus_dev_sbm_config(&node->dev, true); } exit_msm_bus_commit_data: @@ -1005,6 +1010,87 @@ static int msm_bus_dev_init_qos(struct device *dev, void *data) return ret; } +static int msm_bus_dev_sbm_config(struct device *dev, bool enable) +{ + int ret = 0, idx = 0; + struct msm_bus_node_device_type *node_dev = NULL; + struct msm_bus_node_device_type *fab_dev = NULL; + + node_dev = to_msm_bus_node(dev); + if (!node_dev) { + MSM_BUS_ERR("%s: Unable to get node device info", __func__); + return -ENXIO; + } + + if (!node_dev->node_info->num_disable_ports) + return 0; + + if ((node_dev->node_bw[DUAL_CTX].sum_ab || + node_dev->node_bw[DUAL_CTX].max_ib || + !node_dev->is_connected) && !enable) + return 0; + else if (((!node_dev->node_bw[DUAL_CTX].sum_ab && + !node_dev->node_bw[DUAL_CTX].max_ib) || + node_dev->is_connected) && enable) + return 0; + + if (enable) { + for (idx = 0; idx < node_dev->num_regs; idx++) { + if (!node_dev->node_regs[idx].reg) + node_dev->node_regs[idx].reg = + devm_regulator_get(dev, + node_dev->node_regs[idx].name); + + if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg))) + return -ENXIO; + ret = regulator_enable(node_dev->node_regs[idx].reg); + if (ret) { + MSM_BUS_ERR("%s: Failed to enable reg:%s\n", + __func__, node_dev->node_regs[idx].name); + return ret; + } + } + node_dev->is_connected = true; + } + + fab_dev = to_msm_bus_node(node_dev->node_info->bus_device); + if (!fab_dev) { + MSM_BUS_ERR("%s: Unable to get bus device info for %d", + __func__, + node_dev->node_info->id); + return -ENXIO; + } + + if (fab_dev->fabdev && + fab_dev->fabdev->noc_ops.sbm_config) { + ret = fab_dev->fabdev->noc_ops.sbm_config( + node_dev, + fab_dev->fabdev->qos_base, + fab_dev->fabdev->sbm_offset, + enable); + } + + if (!enable) { + for (idx = 0; idx < node_dev->num_regs; idx++) { + if (!node_dev->node_regs[idx].reg) + node_dev->node_regs[idx].reg = + devm_regulator_get(dev, + node_dev->node_regs[idx].name); + + if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg))) + return -ENXIO; + ret = regulator_disable(node_dev->node_regs[idx].reg); + if (ret) { + MSM_BUS_ERR("%s: Failed to disable reg:%s\n", + __func__, node_dev->node_regs[idx].name); + return ret; + } + } + node_dev->is_connected = false; + } + return ret; +} + static int msm_bus_fabric_init(struct device *dev, struct msm_bus_node_device_type *pdata) { @@ -1041,6 +1127,7 @@ static int msm_bus_fabric_init(struct device *dev, fabdev->qos_freq = pdata->fabdev->qos_freq; fabdev->bus_type = pdata->fabdev->bus_type; fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg; + fabdev->sbm_offset = pdata->fabdev->sbm_offset; msm_bus_fab_init_noc_ops(node_dev); fabdev->qos_base = devm_ioremap(dev, @@ -1293,6 +1380,8 @@ static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata, node_info->num_bcm_devs = pdata_node_info->num_bcm_devs; node_info->num_rsc_devs = pdata_node_info->num_rsc_devs; node_info->num_qports = pdata_node_info->num_qports; + node_info->num_disable_ports = pdata_node_info->num_disable_ports; + node_info->disable_ports = pdata_node_info->disable_ports; node_info->virt_dev = pdata_node_info->virt_dev; node_info->is_fab_dev = pdata_node_info->is_fab_dev; node_info->is_bcm_dev = pdata_node_info->is_bcm_dev; @@ -1516,6 +1605,9 @@ static struct device *msm_bus_device_init( pdata->qos_bcms[i].vec.vec_b; } } + bus_node->num_regs = pdata->num_regs; + if (bus_node->num_regs) + bus_node->node_regs = pdata->node_regs; bus_dev->of_node = pdata->of_node; diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h index 8735edb7e3a8da979ddee0d0d81f14072dc1bb0a..9c842a0c3fed3b5165e3d43916211956f568a93b 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_noc.h +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,6 +47,7 @@ struct msm_bus_noc_info { uint32_t qos_baseoffset; uint32_t qos_delta; uint32_t *mas_modes; + uint32_t sbm_offset; struct msm_bus_noc_commit cdata[NUM_CTX]; }; diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c index f51939fdc70719abd47fc209740805600ed0f737..c5a70fdb104a42ac656cafcb8b3429a45dc0b4f9 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -410,8 +410,8 @@ static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev, NOC_QOS_MODE_LIMITER))) { struct msm_bus_noc_qos_bw qos_bw; - bw = msm_bus_div64(info->num_qports, - dev->node_bw[ACTIVE_CTX].sum_ab); + bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab, + info->num_qports); for (i = 0; i < info->num_qports; i++) { if (!info->qport) { diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c index 2ae7be324a51af0648566f379a0601413cd5e1f8..a76320f7611c25ea5df2ce0067bb01391fdfdbd2 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -12,6 +12,9 @@ #define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__ +#include +#include +#include #include #include #include @@ -29,9 +32,23 @@ #define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT) #define MIN_SAT_FIELD 1 #define MIN_BW_FIELD 1 +#define READ_TIMEOUT_MS msecs_to_jiffies(1) +#define READ_DELAY_US 10 #define NOC_QOS_REG_BASE(b, o) ((b) + (o)) +/*Sideband Manager Disable Macros*/ +#define DISABLE_SBM_FLAGOUTCLR0_LOW_OFF 0x80 +#define DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF 0x84 +#define DISABLE_SBM_FLAGOUTSET0_LOW_OFF 0x88 +#define DISABLE_SBM_FLAGOUTSET0_HIGH_OFF 0x8C +#define DISABLE_SBM_FLAGOUTSTATUS0_LOW_OFF 0x90 +#define DISABLE_SBM_FLAGOUTSTATUS0_HIGH_OFF 0x94 +#define DISABLE_SBM_SENSEIN0_LOW_OFF 0x100 +#define DISABLE_SBM_SENSEIN0_HIGH_OFF 0x104 + +#define DISABLE_SBM_REG_BASE(b, o, d) ((b) + (o) + (d)) + #define NOC_QOS_MAINCTL_LOWn_ADDR(b, o, n, d) \ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n)) enum noc_qos_id_mainctl_lown { @@ -361,12 +378,87 @@ static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info, return ret; } +static int msm_bus_noc_sbm_config(struct msm_bus_node_device_type *node_dev, + void __iomem *noc_base, uint32_t sbm_offset, + bool enable) +{ + int ret = 0, idx; + unsigned long j, j_timeout; + uint32_t flagset_offset, flagclr_offset, sense_offset; + + for (idx = 0; idx < node_dev->node_info->num_disable_ports; idx++) { + uint32_t disable_port = node_dev->node_info->disable_ports[idx]; + uint32_t reg_val = 0; + + if (disable_port >= 64) { + return -EINVAL; + } else if (disable_port < 32) { + flagset_offset = DISABLE_SBM_FLAGOUTSET0_LOW_OFF; + flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_LOW_OFF; + sense_offset = DISABLE_SBM_SENSEIN0_LOW_OFF; + } else { + flagset_offset = DISABLE_SBM_FLAGOUTSET0_HIGH_OFF; + flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF; + sense_offset = DISABLE_SBM_SENSEIN0_HIGH_OFF; + disable_port = disable_port - 32; + } + + if (enable) { + reg_val |= 0x1 << disable_port; + writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base, + sbm_offset, flagclr_offset)); + /* Ensure SBM reconnect took place */ + wmb(); + + j = jiffies; + j_timeout = j + READ_TIMEOUT_MS; + while (((0x1 << disable_port) & + readl_relaxed(DISABLE_SBM_REG_BASE(noc_base, + sbm_offset, sense_offset)))) { + udelay(READ_DELAY_US); + j = jiffies; + if (time_after(j, j_timeout)) { + MSM_BUS_ERR("%s: SBM enable timeout.\n", + __func__); + goto sbm_timeout; + } + } + } else { + reg_val |= 0x1 << disable_port; + writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base, + sbm_offset, flagset_offset)); + /* Ensure SBM disconnect took place */ + wmb(); + + j = jiffies; + j_timeout = j + READ_TIMEOUT_MS; + while (!((0x1 << disable_port) & + readl_relaxed(DISABLE_SBM_REG_BASE(noc_base, + sbm_offset, sense_offset)))) { + udelay(READ_DELAY_US); + j = jiffies; + if (time_after(j, j_timeout)) { + MSM_BUS_ERR("%s: SBM disable timeout.\n" + , __func__); + goto sbm_timeout; + } + } + } + } + return ret; + +sbm_timeout: + return -ETIME; + +} + int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev) { if (!bus_dev) return -ENODEV; bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init; + bus_dev->fabdev->noc_ops.sbm_config = msm_bus_noc_sbm_config; return 0; } diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c index 34ba05f35f050decfa768b26782b46c7d4e379a1..f24bc92f16bf5b00ecf7d8246ed380f4a738a500 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_of.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,7 +41,7 @@ static int get_num(const char *const str[], const char *name) return -EINVAL; } -static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, +static struct msm_bus_scale_pdata *get_pdata(struct device *dev, struct device_node *of_node) { struct msm_bus_scale_pdata *pdata = NULL; @@ -51,12 +51,12 @@ static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, const uint32_t *vec_arr = NULL; bool mem_err = false; - if (!pdev) { - pr_err("Error: Null Platform device\n"); + if (!dev) { + pr_err("Error: Null device\n"); return NULL; } - pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata), + pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata), GFP_KERNEL); if (!pdata) { mem_err = true; @@ -89,7 +89,7 @@ static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, pdata->alc = of_property_read_bool(of_node, "qcom,msm-bus,alc-voter"); if (pdata->alc) { - usecase_lat = devm_kzalloc(&pdev->dev, + usecase_lat = devm_kzalloc(dev, (sizeof(struct msm_bus_lat_vectors) * pdata->num_usecases), GFP_KERNEL); if (!usecase_lat) { @@ -122,7 +122,7 @@ static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, return pdata; } - usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) * + usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) * pdata->num_usecases), GFP_KERNEL); if (!usecase) { mem_err = true; @@ -149,7 +149,7 @@ static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, for (i = 0; i < num_usecases; i++) { usecase[i].num_paths = num_paths; - usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths * + usecase[i].vectors = devm_kzalloc(dev, num_paths * sizeof(struct msm_bus_vectors), GFP_KERNEL); if (!usecase[i].vectors) { mem_err = true; @@ -206,7 +206,7 @@ struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev) } of_node = pdev->dev.of_node; - pdata = get_pdata(pdev, of_node); + pdata = get_pdata(&pdev->dev, of_node); if (!pdata) { pr_err("client has to provide missing entry for successful registration\n"); return NULL; @@ -216,6 +216,37 @@ struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev) } EXPORT_SYMBOL(msm_bus_cl_get_pdata); +/** + * msm_bus_cl_get_pdata_from_dev() - Generate bus client data from device tree + * provided by clients. + * + * of_node: Device tree node to extract information from + * + * The function returns a valid pointer to the allocated bus-scale-pdata + * if the vectors were correctly read from the client's device node. + * Any error in reading or parsing the device node will return NULL + * to the caller. + */ +struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev) +{ + struct device_node *of_node; + struct msm_bus_scale_pdata *pdata = NULL; + + of_node = dev->of_node; + + if (!of_node) + return NULL; + + pdata = get_pdata(dev, of_node); + if (!pdata) { + pr_err("client has to provide missing entry for successful registration\n"); + return NULL; + } + + return pdata; +} +EXPORT_SYMBOL(msm_bus_cl_get_pdata_from_dev); + /** * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree * node provided by clients. This function should be used when a client @@ -247,7 +278,7 @@ struct msm_bus_scale_pdata *msm_bus_pdata_from_node( return NULL; } - pdata = get_pdata(pdev, of_node); + pdata = get_pdata(&pdev->dev, of_node); if (!pdata) { pr_err("client has to provide missing entry for successful registration\n"); return NULL; diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c index 3db0bb98ccbde8d2359b9e118003cb82482ac143..a6edab0a720b04aa0f9be85620f02346e15d7af0 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -606,11 +606,6 @@ static int get_bus_node_device_data( } of_node_put(qos_clk_node); } - - if (msmbus_coresight_init_adhoc(pdev, dev_node)) - dev_warn(&pdev->dev, - "Coresight support absent for bus: %d\n", - node_device->node_info->id); } else { node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node, "bus_qos_clk"); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c index c0dff5da45c4950a7741e80f61bd7cd8184dce5a..8be7ba5c82daac92803c6d95be0a375ab97adb36 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -167,6 +167,11 @@ static struct msm_bus_fab_device_type *get_fab_device_info( if (ret) dev_dbg(&pdev->dev, "Bus base offset is missing\n"); + ret = of_property_read_u32(dev_node, "qcom,sbm-offset", + &fab_dev->sbm_offset); + if (ret) + dev_dbg(&pdev->dev, "sbm disable offset is missing\n"); + ret = of_property_read_u32(dev_node, "qcom,qos-off", &fab_dev->qos_off); if (ret) @@ -375,6 +380,25 @@ static struct msm_bus_node_info_type *get_node_info_data( node_info->qport = get_arr(pdev, dev_node, "qcom,qport", &node_info->num_qports); + node_info->num_disable_ports = of_property_count_elems_of_size(dev_node, + "qcom,disable-ports", sizeof(uint32_t)); + + if (node_info->num_disable_ports < 0) { + node_info->num_disable_ports = 0; + dev_dbg(&pdev->dev, "no disable ports\n"); + } + + if (node_info->num_disable_ports) { + node_info->disable_ports = devm_kcalloc(&pdev->dev, + node_info->num_disable_ports, sizeof(uint32_t), + GFP_KERNEL); + if (!node_info->disable_ports) + return NULL; + ret = of_property_read_u32_array(dev_node, "qcom,disable-ports", + node_info->disable_ports, + node_info->num_disable_ports); + } + if (of_get_property(dev_node, "qcom,connections", &size)) { node_info->num_connections = size / sizeof(int); node_info->connections = devm_kzalloc(&pdev->dev, size, @@ -498,10 +522,12 @@ static int get_bus_node_device_data( { bool enable_only; bool setrate_only; - int num_elems = 0, num_bcms = 0, i = 0, ret = 0; + int num_elems = 0, num_bcms = 0, i = 0, ret = 0, num_regs = 0; uint32_t *vec_arr = NULL; struct qos_bcm_type *qos_bcms = NULL; struct device_node *qos_clk_node = NULL; + const char *reg_name; + struct property *prop; node_device->node_info = get_node_info_data(dev_node, pdev); if (IS_ERR_OR_NULL(node_device->node_info)) { @@ -672,6 +698,24 @@ static int get_bus_node_device_data( scnprintf(node_device->clk[DUAL_CTX].reg_name, MAX_REG_NAME, "%c", '\0'); + num_regs = of_property_count_strings(dev_node, + "node-reg-names"); + if (num_regs < 0) + node_device->num_regs = 0; + else { + i = 0; + node_device->num_regs = num_regs; + node_device->node_regs = devm_kzalloc(&pdev->dev, + (num_regs * sizeof(struct node_regulator)), + GFP_KERNEL); + + of_property_for_each_string(dev_node, "node-reg-names", + prop, reg_name) { + scnprintf(node_device->node_regs[i].name, + MAX_REG_NAME, "%s", reg_name); + i++; + } + } } return 0; } diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c index 7159c1789cce7fba4dd7ad5c8dd7bc9e30b91ce9..cc36ddd272b3dc99c533a6725ec78334f24c1a2e 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -159,6 +159,7 @@ static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration cd->mas_arb[i].hw_id, cd->mas_arb[i].bw); break; + } cd->mas_arb[i].dirty = false; } @@ -179,8 +180,9 @@ static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration cd->slv_arb[i].hw_id, cd->slv_arb[i].bw); break; - cd->slv_arb[i].dirty = false; } + cd->slv_arb[i].dirty = false; + } return status; } diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h index 9476249d97a02547c8c74e54aab5354a98731504..ffa6f92177d4d684e334ab844ff131269f6b2408 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h +++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h @@ -52,6 +52,9 @@ struct msm_bus_noc_ops { uint32_t qos_delta, uint32_t qos_freq, int enable_lim, uint64_t lim_bw); bool (*update_bw_reg)(int mode); + int (*sbm_config)(struct msm_bus_node_device_type *node_dev, + void __iomem *noc_base, uint32_t sbm_offset, + bool enable); }; struct nodebw { @@ -76,6 +79,11 @@ struct nodevector { uint64_t query_vec_b; }; +struct node_regulator { + char name[MAX_REG_NAME]; + struct regulator *reg; +}; + struct qos_bcm_type { int qos_bcm_id; struct nodevector vec; @@ -111,6 +119,7 @@ struct msm_bus_fab_device_type { uint32_t base_offset; uint32_t qos_freq; uint32_t qos_off; + uint32_t sbm_offset; struct msm_bus_noc_ops noc_ops; enum msm_bus_hw_sel bus_type; bool bypass_qos_prg; @@ -189,6 +198,8 @@ struct msm_bus_node_info_type { struct rule_update_path_info rule; uint64_t lim_bw; bool defer_qos; + uint32_t *disable_ports; + int num_disable_ports; struct node_agg_params_type agg_params; }; @@ -209,6 +220,8 @@ struct msm_bus_node_device_type { struct nodeclk *node_qos_clks; uint32_t num_qos_bcms; struct qos_bcm_type *qos_bcms; + uint32_t num_regs; + struct node_regulator *node_regs; unsigned int ap_owned; struct device_node *of_node; struct device dev; @@ -217,6 +230,7 @@ struct msm_bus_node_device_type { bool query_dirty; struct list_head dev_link; struct list_head devlist; + bool is_connected; }; static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d) diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c index f23810ac91ee79b104dd64d34b09d5a773611635..df9b9770f07aeafbbeab21585462217338d701f0 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -93,6 +93,7 @@ static struct rule_node_info *gen_node(u32 id, void *data) if (!node_match) { node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL); + if (!node_match) goto exit_node_match; node_match->id = id; diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index b1afa81e2febf503d35b8579b0867cd4e4c26c3f..421d1233260da3ece3ed8c63516a39b0510997c3 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -56,7 +57,10 @@ #define PIL_NUM_DESC 10 #define MAX_LEN 96 +#define NUM_OF_ENCRYPTED_KEY 3 + static void __iomem *pil_info_base; +static struct md_global_toc *g_md_toc; /** * proxy_timeout - Override for proxy vote timeouts @@ -137,6 +141,70 @@ struct pil_priv { size_t region_size; }; +static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev) +{ + struct md_ss_region __iomem *region_info; + struct ramdump_segment *ramdump_segs, *s; + struct pil_priv *priv = desc->priv; + void __iomem *subsys_segtable_base; + u64 ss_region_ptr = 0; + void __iomem *offset; + int ss_mdump_seg_cnt; + int ss_valid_seg_cnt; + int ret, i; + + ss_region_ptr = desc->minidump->md_ss_smem_regions_baseptr; + if (!ramdump_dev) + return -ENODEV; + ss_mdump_seg_cnt = desc->minidump->ss_region_count; + subsys_segtable_base = + ioremap((unsigned long)ss_region_ptr, + ss_mdump_seg_cnt * sizeof(struct md_ss_region)); + region_info = (struct md_ss_region __iomem *)subsys_segtable_base; + if (!region_info) + return -EINVAL; + pr_debug("Segments in minidump 0x%x\n", ss_mdump_seg_cnt); + ramdump_segs = kcalloc(ss_mdump_seg_cnt, + sizeof(*ramdump_segs), GFP_KERNEL); + if (!ramdump_segs) + return -ENOMEM; + + if (desc->subsys_vmid > 0) + ret = pil_assign_mem_to_linux(desc, priv->region_start, + (priv->region_end - priv->region_start)); + + s = ramdump_segs; + ss_valid_seg_cnt = ss_mdump_seg_cnt; + for (i = 0; i < ss_mdump_seg_cnt; i++) { + memcpy(&offset, ®ion_info, sizeof(region_info)); + offset = offset + sizeof(region_info->name) + + sizeof(region_info->seq_num); + if (__raw_readl(offset) == MD_REGION_VALID) { + memcpy(&s->name, ®ion_info, sizeof(region_info)); + offset = offset + sizeof(region_info->md_valid); + s->address = __raw_readl(offset); + offset = offset + + sizeof(region_info->region_base_address); + s->size = __raw_readl(offset); + pr_debug("Minidump : Dumping segment %s with address 0x%lx and size 0x%x\n", + s->name, s->address, (unsigned int)s->size); + } else + ss_valid_seg_cnt--; + s++; + region_info++; + } + ret = do_minidump(ramdump_dev, ramdump_segs, ss_valid_seg_cnt); + kfree(ramdump_segs); + if (ret) + pil_err(desc, "%s: Minidump collection failed for subsys %s rc:%d\n", + __func__, desc->name, ret); + + if (desc->subsys_vmid > 0) + ret = pil_assign_mem_to_subsys(desc, priv->region_start, + (priv->region_end - priv->region_start)); + return ret; +} + /** * pil_do_ramdump() - Ramdump an image * @desc: descriptor from pil_desc_init() @@ -145,13 +213,45 @@ struct pil_priv { * Calls the ramdump API with a list of segments generated from the addresses * that the descriptor corresponds to. */ -int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev) +int pil_do_ramdump(struct pil_desc *desc, + void *ramdump_dev, void *minidump_dev) { + struct ramdump_segment *ramdump_segs, *s; struct pil_priv *priv = desc->priv; struct pil_seg *seg; int count = 0, ret; - struct ramdump_segment *ramdump_segs, *s; + if (desc->minidump) { + pr_debug("Minidump : md_ss_toc->md_ss_toc_init is 0x%x\n", + (unsigned int)desc->minidump->md_ss_toc_init); + pr_debug("Minidump : md_ss_toc->md_ss_enable_status is 0x%x\n", + (unsigned int)desc->minidump->md_ss_enable_status); + pr_debug("Minidump : md_ss_toc->encryption_status is 0x%x\n", + (unsigned int)desc->minidump->encryption_status); + pr_debug("Minidump : md_ss_toc->ss_region_count is 0x%x\n", + (unsigned int)desc->minidump->ss_region_count); + pr_debug("Minidump : md_ss_toc->md_ss_smem_regions_baseptr is 0x%x\n", + (unsigned int) + desc->minidump->md_ss_smem_regions_baseptr); + /** + * Collect minidump if SS ToC is valid and segment table + * is initialized in memory and encryption status is set. + */ + if ((desc->minidump->md_ss_smem_regions_baseptr != 0) && + (desc->minidump->md_ss_toc_init == true) && + (desc->minidump->md_ss_enable_status == + MD_SS_ENABLED)) { + if (desc->minidump->encryption_status == + MD_SS_ENCR_DONE) { + pr_debug("Dumping Minidump for %s\n", + desc->name); + return pil_do_minidump(desc, minidump_dev); + } + pr_debug("Minidump aborted for %s\n", desc->name); + return -EINVAL; + } + } + pr_debug("Continuing with full SSR dump for %s\n", desc->name); list_for_each_entry(seg, &priv->segs, list) count++; @@ -1041,9 +1141,11 @@ bool is_timeout_disabled(void) int pil_desc_init(struct pil_desc *desc) { struct pil_priv *priv; - int ret; void __iomem *addr; + void *ss_toc_addr; + int ret; char buf[sizeof(priv->info->name)]; + struct device_node *ofnode = desc->dev->of_node; if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote, "Invalid proxy voting. Ignoring\n")) @@ -1066,6 +1168,18 @@ int pil_desc_init(struct pil_desc *desc) strlcpy(buf, desc->name, sizeof(buf)); __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4); } + if (of_property_read_u32(ofnode, "qcom,minidump-id", + &desc->minidump_id)) + pr_err("minidump-id not found for %s\n", desc->name); + else { + if (g_md_toc && g_md_toc->md_toc_init == true) { + ss_toc_addr = &g_md_toc->md_ss_toc[desc->minidump_id]; + pr_debug("Minidump : ss_toc_addr is %pa and desc->minidump_id is %d\n", + &ss_toc_addr, desc->minidump_id); + memcpy(&desc->minidump, &ss_toc_addr, + sizeof(ss_toc_addr)); + } + } ret = pil_parse_devicetree(desc); if (ret) @@ -1152,6 +1266,7 @@ static int __init msm_pil_init(void) struct device_node *np; struct resource res; int i; + size_t size; np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil"); if (!np) { @@ -1174,6 +1289,14 @@ static int __init msm_pil_init(void) for (i = 0; i < resource_size(&res)/sizeof(u32); i++) writel_relaxed(0, pil_info_base + (i * sizeof(u32))); + /* Get Global minidump ToC*/ + g_md_toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, + &size); + pr_debug("Minidump: g_md_toc is %pa\n", &g_md_toc); + if (PTR_ERR(g_md_toc) == -EPROBE_DEFER) { + pr_err("SMEM is not initialized.\n"); + return -EPROBE_DEFER; + } out: return register_pm_notifier(&pil_pm_notifier); } diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h index 4b02853fd6f9846b29be589f5327bb9caed00b8f..b99f0431199a24b25c0833c1abce8d5ea8032a25 100644 --- a/drivers/soc/qcom/peripheral-loader.h +++ b/drivers/soc/qcom/peripheral-loader.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include #include +#include "minidump_private.h" struct device; struct module; @@ -63,6 +64,8 @@ struct pil_desc { bool signal_aop; struct mbox_client cl; struct mbox_chan *mbox; + struct md_ss_toc *minidump; + int minidump_id; }; /** @@ -109,7 +112,8 @@ extern void pil_shutdown(struct pil_desc *desc); extern void pil_free_memory(struct pil_desc *desc); extern void pil_desc_release(struct pil_desc *desc); extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc); -extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev); +extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev, + void *minidump_dev); extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr, size_t size); extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr, @@ -129,7 +133,8 @@ static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc) { return 0; } -static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev) +static inline int pil_do_ramdump(struct pil_desc *desc, + void *ramdump_dev, void *minidump_dev) { return 0; } diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index 443e9e384ea292426ade61f3108be12629c13432..f336cfbdca490105c49f49c77473c31eab9f6d18 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,8 +19,11 @@ #include #include #include -#include +#include +#include +#include #include +#include #include "qdss_bridge.h" #define MODULE_NAME "qdss_bridge" @@ -128,18 +131,7 @@ static void mhi_ch_close(struct qdss_bridge_drvdata *drvdata) { flush_workqueue(drvdata->mhi_wq); qdss_destroy_buf_tbl(drvdata); - mhi_close_channel(drvdata->hdl); -} - -static void mhi_close_work_fn(struct work_struct *work) -{ - struct qdss_bridge_drvdata *drvdata = - container_of(work, - struct qdss_bridge_drvdata, - close_work); - - usb_qdss_close(drvdata->usb_ch); - mhi_ch_close(drvdata); + mhi_unprepare_from_transfer(drvdata->mhi_dev); } static void mhi_read_work_fn(struct work_struct *work) @@ -160,8 +152,8 @@ static void mhi_read_work_fn(struct work_struct *work) if (!entry) break; - err = mhi_queue_xfer(drvdata->hdl, entry->buf, QDSS_BUF_SIZE, - mhi_flags); + err = mhi_queue_transfer(drvdata->mhi_dev, DMA_FROM_DEVICE, + entry->buf, QDSS_BUF_SIZE, mhi_flags); if (err) { pr_err_ratelimited("Unable to read from MHI buffer err:%d", err); @@ -182,18 +174,18 @@ static int mhi_queue_read(struct qdss_bridge_drvdata *drvdata) } static int usb_write(struct qdss_bridge_drvdata *drvdata, - struct mhi_result *result) + unsigned char *buf, size_t len) { int ret = 0; struct qdss_buf_tbl_lst *entry; - entry = qdss_get_buf_tbl_entry(drvdata, result->buf_addr); + entry = qdss_get_buf_tbl_entry(drvdata, buf); if (!entry) return -EINVAL; - entry->usb_req->buf = result->buf_addr; - entry->usb_req->length = result->bytes_xferd; - ret = usb_qdss_data_write(drvdata->usb_ch, entry->usb_req); + entry->usb_req->buf = buf; + entry->usb_req->length = len; + ret = usb_qdss_write(drvdata->usb_ch, entry->usb_req); return ret; } @@ -201,26 +193,50 @@ static int usb_write(struct qdss_bridge_drvdata *drvdata, static void mhi_read_done_work_fn(struct work_struct *work) { unsigned char *buf = NULL; - struct mhi_result result; int err = 0; + size_t len = 0; + struct qdss_mhi_buf_tbl_t *tp, *_tp; struct qdss_bridge_drvdata *drvdata = container_of(work, struct qdss_bridge_drvdata, read_done_work); + LIST_HEAD(head); do { - err = mhi_poll_inbound(drvdata->hdl, &result); - if (err) { - pr_debug("MHI poll failed err:%d\n", err); + if (!(drvdata->opened)) break; - } - buf = result.buf_addr; - if (!buf) + spin_lock_bh(&drvdata->lock); + if (list_empty(&drvdata->read_done_list)) { + spin_unlock_bh(&drvdata->lock); break; - err = usb_write(drvdata, &result); - if (err) - qdss_buf_tbl_remove(drvdata, buf); - } while (1); + } + list_splice_tail_init(&drvdata->read_done_list, &head); + spin_unlock_bh(&drvdata->lock); + + list_for_each_entry_safe(tp, _tp, &head, link) { + list_del(&tp->link); + buf = tp->buf; + len = tp->len; + kfree(tp); + if (!buf) + break; + pr_debug("Read from mhi buf %pK len:%zd\n", buf, len); + /* + * The read buffers can come after the MHI channels are + * closed. If the channels are closed at the time of + * read, discard the buffers here and do not forward + * them to the mux layer. + */ + if (drvdata->opened) { + err = usb_write(drvdata, buf, len); + if (err) + qdss_buf_tbl_remove(drvdata, buf); + } else { + qdss_buf_tbl_remove(drvdata, buf); + } + } + list_del_init(&head); + } while (buf); } static void usb_write_done(struct qdss_bridge_drvdata *drvdata, @@ -245,7 +261,7 @@ static void usb_notifier(void *priv, unsigned int event, switch (event) { case USB_QDSS_CONNECT: - usb_qdss_alloc_req(drvdata->usb_ch, poolsize, 0); + usb_qdss_alloc_req(ch, poolsize, 0); mhi_queue_read(drvdata); break; @@ -268,18 +284,15 @@ static int mhi_ch_open(struct qdss_bridge_drvdata *drvdata) if (drvdata->opened) return 0; - - ret = mhi_open_channel(drvdata->hdl); + ret = mhi_prepare_for_transfer(drvdata->mhi_dev); if (ret) { pr_err("Unable to open MHI channel\n"); return ret; } - ret = mhi_get_free_desc(drvdata->hdl); - if (ret <= 0) - return -EIO; - + spin_lock_bh(&drvdata->lock); drvdata->opened = 1; + spin_unlock_bh(&drvdata->lock); return 0; } @@ -312,149 +325,125 @@ static void qdss_bridge_open_work_fn(struct work_struct *work) pr_err("Open work failed with err:%d\n", ret); } -static void mhi_notifier(struct mhi_cb_info *cb_info) +static void qdss_mhi_write_cb(struct mhi_device *mhi_dev, + struct mhi_result *result) { - struct mhi_result *result; - struct qdss_bridge_drvdata *drvdata; - - if (!cb_info) - return; +} - result = cb_info->result; - if (!result) { - pr_err_ratelimited("Failed to obtain MHI result\n"); - return; - } +static void qdss_mhi_read_cb(struct mhi_device *mhi_dev, + struct mhi_result *result) +{ + struct qdss_bridge_drvdata *drvdata = NULL; + struct qdss_mhi_buf_tbl_t *tp; + void *buf = NULL; - drvdata = (struct qdss_bridge_drvdata *)cb_info->result->user_data; - if (!drvdata) { - pr_err_ratelimited("MHI returned invalid drvdata\n"); + drvdata = mhi_dev->priv_data; + if (!drvdata) return; - } - - switch (cb_info->cb_reason) { - case MHI_CB_MHI_ENABLED: - queue_work(drvdata->mhi_wq, &drvdata->open_work); - break; - - case MHI_CB_XFER: - if (!drvdata->opened) - break; - + buf = result->buf_addr; + + if (drvdata->opened && + result->transaction_status != -ENOTCONN) { + tp = kmalloc(sizeof(*tp), GFP_ATOMIC); + if (!tp) + return; + tp->buf = buf; + tp->len = result->bytes_xferd; + spin_lock_bh(&drvdata->lock); + list_add_tail(&tp->link, &drvdata->read_done_list); + spin_unlock_bh(&drvdata->lock); queue_work(drvdata->mhi_wq, &drvdata->read_done_work); - break; - - case MHI_CB_MHI_DISABLED: - if (!drvdata->opened) - break; - - drvdata->opened = 0; - queue_work(drvdata->mhi_wq, &drvdata->close_work); - break; - - default: - pr_err_ratelimited("MHI returned invalid cb reason 0x%x\n", - cb_info->cb_reason); - break; + } else { + qdss_buf_tbl_remove(drvdata, buf); } } -static int qdss_mhi_register_ch(struct qdss_bridge_drvdata *drvdata) +static void qdss_mhi_remove(struct mhi_device *mhi_dev) { - struct mhi_client_info_t *client_info; - int ret; - struct mhi_client_info_t *mhi_info; - - client_info = devm_kzalloc(drvdata->dev, sizeof(*client_info), - GFP_KERNEL); - if (!client_info) - return -ENOMEM; + struct qdss_bridge_drvdata *drvdata = NULL; - client_info->mhi_client_cb = mhi_notifier; - drvdata->client_info = client_info; - - mhi_info = client_info; - mhi_info->chan = MHI_CLIENT_QDSS_IN; - mhi_info->dev = drvdata->dev; - mhi_info->node_name = "qcom,mhi"; - mhi_info->user_data = drvdata; - - ret = mhi_register_channel(&drvdata->hdl, mhi_info); - return ret; + if (!mhi_dev) + return; + drvdata = mhi_dev->priv_data; + if (!drvdata) + return; + if (!drvdata->opened) + return; + spin_lock_bh(&drvdata->lock); + drvdata->opened = 0; + spin_unlock_bh(&drvdata->lock); + usb_qdss_close(drvdata->usb_ch); + flush_workqueue(drvdata->mhi_wq); + qdss_destroy_buf_tbl(drvdata); } int qdss_mhi_init(struct qdss_bridge_drvdata *drvdata) { - int ret; - drvdata->mhi_wq = create_singlethread_workqueue(MODULE_NAME); if (!drvdata->mhi_wq) return -ENOMEM; + spin_lock_init(&drvdata->lock); INIT_WORK(&(drvdata->read_work), mhi_read_work_fn); INIT_WORK(&(drvdata->read_done_work), mhi_read_done_work_fn); INIT_WORK(&(drvdata->open_work), qdss_bridge_open_work_fn); - INIT_WORK(&(drvdata->close_work), mhi_close_work_fn); INIT_LIST_HEAD(&drvdata->buf_tbl); + INIT_LIST_HEAD(&drvdata->read_done_list); drvdata->opened = 0; - ret = qdss_mhi_register_ch(drvdata); - if (ret) { - destroy_workqueue(drvdata->mhi_wq); - pr_err("Unable to register MHI read channel err:%d\n", ret); - return ret; - } - return 0; } -static int qdss_mhi_probe(struct platform_device *pdev) +static int qdss_mhi_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) { int ret; - struct device *dev = &pdev->dev; struct qdss_bridge_drvdata *drvdata; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + drvdata = devm_kzalloc(&mhi_dev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) { ret = -ENOMEM; return ret; } - drvdata->dev = &pdev->dev; - platform_set_drvdata(pdev, drvdata); + drvdata->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, drvdata); ret = qdss_mhi_init(drvdata); if (ret) goto err; - + queue_work(drvdata->mhi_wq, &drvdata->open_work); return 0; err: pr_err("Device probe failed err:%d\n", ret); return ret; } -static const struct of_device_id qdss_mhi_table[] = { - {.compatible = "qcom,qdss-mhi"}, - {}, +static const struct mhi_device_id qdss_mhi_match_table[] = { + { .chan = "QDSS" }, + { NULL }, }; -static struct platform_driver qdss_mhi_driver = { +static struct mhi_driver qdss_mhi_driver = { + .id_table = qdss_mhi_match_table, .probe = qdss_mhi_probe, + .remove = qdss_mhi_remove, + .dl_xfer_cb = qdss_mhi_read_cb, + .ul_xfer_cb = qdss_mhi_write_cb, .driver = { .name = MODULE_NAME, .owner = THIS_MODULE, - .of_match_table = qdss_mhi_table, - }, + } }; static int __init qdss_bridge_init(void) { - return platform_driver_register(&qdss_mhi_driver); + return mhi_driver_register(&qdss_mhi_driver); } static void __exit qdss_bridge_exit(void) { - platform_driver_unregister(&qdss_mhi_driver); + mhi_driver_unregister(&qdss_mhi_driver); } module_init(qdss_bridge_init); diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h index 97b9c4099141c10526a3375559dbbda771953e39..60c8b4c63cd2d168736fde27bdd48aea18f98bb5 100644 --- a/drivers/soc/qcom/qdss_bridge.h +++ b/drivers/soc/qcom/qdss_bridge.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,9 +20,17 @@ struct qdss_buf_tbl_lst { atomic_t available; }; +struct qdss_mhi_buf_tbl_t { + struct list_head link; + unsigned char *buf; + size_t len; +}; + struct qdss_bridge_drvdata { - struct device *dev; + int alias; bool opened; + spinlock_t lock; + struct mhi_device *mhi_dev; struct work_struct read_work; struct work_struct read_done_work; struct work_struct open_work; @@ -31,6 +39,7 @@ struct qdss_bridge_drvdata { struct mhi_client_handle *hdl; struct mhi_client_info_t *client_info; struct list_head buf_tbl; + struct list_head read_done_list; struct usb_qdss_ch *usb_ch; }; diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c new file mode 100644 index 0000000000000000000000000000000000000000..a6a7b2506538a2f9396ec20203459fa417ea7041 --- /dev/null +++ b/drivers/soc/qcom/qmi_rmnet.c @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include "qmi_rmnet_i.h" + +#define MODEM_0_INSTANCE 0 +#define MODEM_0 0 +#define MODEM_1 1 + +struct qmi_elem_info data_ep_id_type_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum data_ep_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct data_ep_id_type_v01, + ep_type), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct data_ep_id_type_v01, + iface_id), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .elem_len = 0, + .elem_size = 0, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = 0, + .ei_array = NULL, + }, +}; +EXPORT_SYMBOL(data_ep_id_type_v01_ei); + +static void *qmi_rmnet_qmi_init(void) +{ + struct qmi_info *qmi_info; + + qmi_info = kzalloc(sizeof(*qmi_info), GFP_KERNEL); + if (!qmi_info) + return NULL; + + return (void *)qmi_info; +} + +struct rmnet_flow_map * +qmi_rmnet_get_flow_map(struct qos_info *qos, uint32_t flow_id, int ip_type) +{ + struct rmnet_flow_map *itm; + + if (!qos) + return NULL; + + list_for_each_entry(itm, &qos->flow_head, list) { + if (unlikely(!itm)) + return NULL; + + if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type)) + return itm; + } + return NULL; +} + +struct rmnet_bearer_map * +qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id) +{ + struct rmnet_bearer_map *itm; + + if (!qos) + return NULL; + + list_for_each_entry(itm, &qos->bearer_head, list) { + if (unlikely(!itm)) + return NULL; + + if (itm->bearer_id == bearer_id) + return itm; + } + return NULL; +} + +static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm, + struct rmnet_flow_map *new_map) +{ + itm->bearer_id = new_map->bearer_id; + itm->flow_id = new_map->flow_id; + itm->ip_type = new_map->ip_type; + itm->tcm_handle = new_map->tcm_handle; +} + +static int qmi_rmnet_add_flow(struct net_device *dev, struct qmi_info *qmi, + struct rmnet_flow_map *new_map) +{ + struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); + struct rmnet_flow_map *itm; + struct rmnet_bearer_map *bearer; + unsigned long flags; + + if (!qos_info) + return -EINVAL; + + pr_debug("%s() bearer[%u], flow[%u], ip[%u]\n", __func__, + new_map->bearer_id, new_map->flow_id, new_map->ip_type); + + write_lock_irqsave(&qos_info->flow_map_lock, flags); + itm = qmi_rmnet_get_flow_map(qos_info, new_map->flow_id, + new_map->ip_type); + if (itm) { + qmi_rmnet_update_flow_map(itm, new_map); + } else { + write_unlock_irqrestore(&qos_info->flow_map_lock, flags); + itm = kzalloc(sizeof(*itm), GFP_KERNEL); + if (!itm) + return -ENOMEM; + + qmi_rmnet_update_flow_map(itm, new_map); + write_lock_irqsave(&qos_info->flow_map_lock, flags); + list_add(&itm->list, &qos_info->flow_head); + } + + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map->bearer_id); + if (bearer) { + bearer->flow_ref++; + } else { + write_unlock_irqrestore(&qos_info->flow_map_lock, flags); + bearer = kzalloc(sizeof(*bearer), GFP_KERNEL); + if (!bearer) + return -ENOMEM; + + bearer->bearer_id = new_map->bearer_id; + bearer->flow_ref = 1; + bearer->grant_size = qos_info->default_grant; + write_lock_irqsave(&qos_info->flow_map_lock, flags); + list_add(&bearer->list, &qos_info->bearer_head); + } + write_unlock_irqrestore(&qos_info->flow_map_lock, flags); + return 0; +} + +static int +qmi_rmnet_del_flow(struct net_device *dev, struct rmnet_flow_map *new_map) +{ + struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); + struct rmnet_flow_map *itm; + struct rmnet_bearer_map *bearer; + unsigned long flags; + int bearer_removed = 0; + + if (!qos_info) { + pr_err("%s() NULL qos info\n", __func__); + return -EINVAL; + } + pr_debug("%s() bearer[%u], flow[%u], ip[%u]\n", __func__, + new_map->bearer_id, new_map->flow_id, new_map->ip_type); + + write_lock_irqsave(&qos_info->flow_map_lock, flags); + itm = qmi_rmnet_get_flow_map(qos_info, new_map->flow_id, + new_map->ip_type); + if (itm) + list_del(&itm->list); + + /*clear bearer map*/ + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map->bearer_id); + if (bearer && --bearer->flow_ref == 0) { + list_del(&bearer->list); + bearer_removed = 1; + } + write_unlock_irqrestore(&qos_info->flow_map_lock, flags); + + kfree(itm); + if (bearer_removed) + kfree(bearer); + return 0; +} + +void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb) +{ + void *port = rmnet_get_rmnet_port(dev); + struct qmi_info *qmi = rmnet_get_qmi_pt(port); + struct qos_info *qos = rmnet_get_qos_pt(dev); + + if (!qmi || !qos) + return; + + dfc_qmi_burst_check(dev, qos, skb); +} +EXPORT_SYMBOL(qmi_rmnet_burst_fc_check); + +#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE +int qmi_rmnet_reg_dereg_fc_ind(void *port, int reg) +{ + struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + int rc = 0; + + if (!qmi) { + pr_err("%s - qmi_info is NULL\n", __func__); + return -EINVAL; + } + + if (qmi->fc_info[0].dfc_client) { + rc = dfc_reg_unreg_fc_ind(qmi->fc_info[0].dfc_client, reg); + if (rc < 0) { + pr_err("%s() failed dfc_reg_unreg_fc_ind[0] rc=%d\n", + __func__, rc); + goto out; + } + } + if (qmi->fc_info[1].dfc_client) { + rc = dfc_reg_unreg_fc_ind(qmi->fc_info[1].dfc_client, reg); + if (rc < 0) + pr_err("%s() failed dfc_reg_unreg_fc_ind[1] rc=%d\n", + __func__, rc); + } +out: + return rc; +} +EXPORT_SYMBOL(qmi_rmnet_reg_dereg_fc_ind); +#endif + +void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) +{ + struct tcmsg *tcm = (struct tcmsg *)tcm_pt; + struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + struct rmnet_flow_map new_map; + int idx; + + if (!dev || !port || !tcm_pt) + return; + + switch (tcm->tcm_family) { + case 1: + /* + * flow activate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle + */ + if (!qmi) + return; + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + new_map.tcm_handle = tcm->tcm_handle; + qmi_rmnet_add_flow(dev, qmi, &new_map); + break; + case 2: + /* + * flow deactivate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type + */ + if (!qmi) + return; + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + qmi_rmnet_del_flow(dev, &new_map); + break; + case 4: + /* + * modem up + * tcm->tcm_handle - instance, tcm->tcm_info - ep_type, + * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags + */ + pr_debug("%s() instance[%u], ep_type[%u], iface[%u]\n", + __func__, tcm->tcm_handle, tcm->tcm_info, + tcm->tcm_parent); + + if (tcm->tcm_ifindex != 1) + return; + + if (tcm->tcm_handle == MODEM_0_INSTANCE) + idx = MODEM_0; + else + idx = MODEM_1; + + if (!qmi) { + qmi = (struct qmi_info *)qmi_rmnet_qmi_init(); + if (!qmi) + return; + qmi->modem_count = 1; + rmnet_init_qmi_pt(port, qmi); + } else if (!qmi->fc_info[idx].dfc_client) { + /* + * dfc_client is per modem, we may receive multiple + * modem up events due to netmagrd restarts so only + * increase modem_count when we need to create a new + * dfc client. + */ + qmi->modem_count++; + } + if (qmi->fc_info[idx].dfc_client == NULL) { + qmi->fc_info[idx].svc.instance = tcm->tcm_handle; + qmi->fc_info[idx].svc.ep_type = tcm->tcm_info; + qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent; + if (dfc_qmi_client_init(port, idx) < 0) + pr_err("%s failed[%d]\n", __func__, idx); + } + break; + case 5: + /* modem down: tcm->tcm_handle - instance*/ + pr_debug("%s() instance[%u]\n", __func__, tcm->tcm_handle); + if (!qmi) + return; + + if (tcm->tcm_handle == MODEM_0_INSTANCE) + idx = MODEM_0; + else + idx = MODEM_1; + + /* + * dfc_client can be deleted by service request before + * modem down event arrival. Decrease modem_count here always + */ + qmi->modem_count--; + if (qmi->fc_info[idx].dfc_client) { + dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client); + qmi->fc_info[idx].dfc_client = NULL; + } + if (qmi->modem_count == 0) { + kfree(qmi); + rmnet_reset_qmi_pt(port); + } + break; + default: + pr_debug("%s(): No handler\n", __func__); + break; + } +} +EXPORT_SYMBOL(qmi_rmnet_change_link); + +void *qmi_rmnet_qos_init(struct net_device *real_dev, uint8_t mux_id) +{ + struct qos_info *qos_info; + + qos_info = kmalloc(sizeof(struct qos_info), GFP_KERNEL); + if (!qos_info) + return NULL; + + qos_info->mux_id = mux_id; + qos_info->real_dev = real_dev; + qos_info->default_grant = 10240; + qos_info->tran_num = 0; + rwlock_init(&qos_info->flow_map_lock); + INIT_LIST_HEAD(&qos_info->flow_head); + INIT_LIST_HEAD(&qos_info->bearer_head); + + return (void *)qos_info; +} +EXPORT_SYMBOL(qmi_rmnet_qos_init); + +void qmi_rmnet_qos_exit(struct net_device *dev) +{ + struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); + + kfree(qos); +} +EXPORT_SYMBOL(qmi_rmnet_qos_exit); diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h new file mode 100644 index 0000000000000000000000000000000000000000..d2047763b6dd468f3a9341309a9c24e890d74f61 --- /dev/null +++ b/drivers/soc/qcom/qmi_rmnet_i.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_QMI_I_H +#define _RMNET_QMI_I_H + +#include +#include + +#define IP_VER_4 4 +#define IP_VER_6 6 + +#define MAX_MODEM_NUM 2 + +struct rmnet_flow_map { + struct list_head list; + u8 bearer_id; + u32 flow_id; + int ip_type; + u32 tcm_handle; +}; + +struct rmnet_bearer_map { + struct list_head list; + u8 bearer_id; + int flow_ref; + u32 grant_size; + u32 counter; + u16 seq; + u8 ack_req; +}; + +struct svc_info { + u32 instance; + u32 ep_type; + u32 iface_id; +}; + +struct fc_info { + struct svc_info svc; + void *dfc_client; +}; + +struct qos_info { + uint8_t mux_id; + struct net_device *real_dev; + rwlock_t flow_map_lock; + struct list_head flow_head; + struct list_head bearer_head; + uint32_t default_grant; + uint32_t tran_num; +}; + +struct qmi_info { + int modem_count; + struct fc_info fc_info[MAX_MODEM_NUM]; +}; + +enum data_ep_type_enum_v01 { + DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN, + DATA_EP_TYPE_RESERVED_V01 = 0x00, + DATA_EP_TYPE_HSIC_V01 = 0x01, + DATA_EP_TYPE_HSUSB_V01 = 0x02, + DATA_EP_TYPE_PCIE_V01 = 0x03, + DATA_EP_TYPE_EMBEDDED_V01 = 0x04, + DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX +}; + +struct data_ep_id_type_v01 { + + enum data_ep_type_enum_v01 ep_type; + uint32_t iface_id; +}; + +extern struct qmi_elem_info data_ep_id_type_v01_ei[]; + +struct rmnet_flow_map * +qmi_rmnet_get_flow_map(struct qos_info *qos_info, + uint32_t flow_id, int ip_type); + +struct rmnet_bearer_map * +qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id); + +int dfc_qmi_client_init(void *port, int modem); + +void dfc_qmi_client_exit(void *dfc_data); + +void dfc_qmi_burst_check(struct net_device *dev, + struct qos_info *qos, struct sk_buff *skb); + +int dfc_reg_unreg_fc_ind(void *dfc_data, int reg); +#endif /*_RMNET_QMI_I_H*/ diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 6553ac0620dda1ed07b6329ded90f652825f6771..2259a1a0195f73a02a04c48cfaf35181d37937df 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2011 Google, Inc - * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,12 +25,28 @@ DEFINE_MUTEX(secure_buffer_mutex); +struct cp2_mem_chunks { + u32 chunk_list; + u32 chunk_list_size; + u32 chunk_size; +} __attribute__ ((__packed__)); + +struct cp2_lock_req { + struct cp2_mem_chunks chunks; + u32 mem_usage; + u32 lock; +} __attribute__ ((__packed__)); + struct mem_prot_info { phys_addr_t addr; u64 size; }; #define MEM_PROT_ASSIGN_ID 0x16 +#define MEM_PROTECT_LOCK_ID2 0x0A +#define MEM_PROTECT_LOCK_ID2_FLAT 0x11 +#define V2_CHUNK_SIZE SZ_1M +#define FEATURE_ID_CP 12 struct dest_vm_and_perm_info { u32 vm; @@ -42,6 +58,129 @@ struct dest_vm_and_perm_info { static void *qcom_secure_mem; #define QCOM_SECURE_MEM_SIZE (512*1024) +static int secure_buffer_change_chunk(u32 chunks, + u32 nchunks, + u32 chunk_size, + int lock) +{ + struct cp2_lock_req request; + u32 resp; + int ret; + struct scm_desc desc = {0}; + + desc.args[0] = request.chunks.chunk_list = chunks; + desc.args[1] = request.chunks.chunk_list_size = nchunks; + desc.args[2] = request.chunks.chunk_size = chunk_size; + /* Usage is now always 0 */ + desc.args[3] = request.mem_usage = 0; + desc.args[4] = request.lock = lock; + desc.args[5] = 0; + desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL, + SCM_VAL); + + kmap_flush_unused(); + kmap_atomic_flush_unused(); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, + MEM_PROTECT_LOCK_ID2_FLAT), &desc); + resp = desc.ret[0]; + + return ret; +} + +static int secure_buffer_change_table(struct sg_table *table, int lock) +{ + int i, j; + int ret = -EINVAL; + u32 *chunk_list; + struct scatterlist *sg; + + for_each_sg(table->sgl, sg, table->nents, i) { + int nchunks; + int size = sg->length; + int chunk_list_len; + phys_addr_t chunk_list_phys; + + /* + * This should theoretically be a phys_addr_t but the protocol + * indicates this should be a u32. + */ + u32 base; + u64 tmp = sg_dma_address(sg); + + WARN((tmp >> 32) & 0xffffffff, + "%s: there are ones in the upper 32 bits of the sg at %pK! They will be truncated! Address: 0x%llx\n", + __func__, sg, tmp); + if (unlikely(!size || (size % V2_CHUNK_SIZE))) { + WARN(1, + "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n", + __func__, i, size, V2_CHUNK_SIZE); + return -EINVAL; + } + + base = (u32)tmp; + + nchunks = size / V2_CHUNK_SIZE; + chunk_list_len = sizeof(u32)*nchunks; + + chunk_list = kzalloc(chunk_list_len, GFP_KERNEL); + + if (!chunk_list) + return -ENOMEM; + + chunk_list_phys = virt_to_phys(chunk_list); + for (j = 0; j < nchunks; j++) + chunk_list[j] = base + j * V2_CHUNK_SIZE; + + /* + * Flush the chunk list before sending the memory to the + * secure environment to ensure the data is actually present + * in RAM + */ + dmac_flush_range(chunk_list, chunk_list + chunk_list_len); + + ret = secure_buffer_change_chunk(chunk_list_phys, + nchunks, V2_CHUNK_SIZE, lock); + + if (!ret) { + /* + * Set or clear the private page flag to communicate the + * status of the chunk to other entities + */ + if (lock) + SetPagePrivate(sg_page(sg)); + else + ClearPagePrivate(sg_page(sg)); + } + + kfree(chunk_list); + } + + return ret; +} + +int msm_secure_table(struct sg_table *table) +{ + int ret; + + mutex_lock(&secure_buffer_mutex); + ret = secure_buffer_change_table(table, 1); + mutex_unlock(&secure_buffer_mutex); + + return ret; +} + +int msm_unsecure_table(struct sg_table *table) +{ + int ret; + + mutex_lock(&secure_buffer_mutex); + ret = secure_buffer_change_table(table, 0); + mutex_unlock(&secure_buffer_mutex); + + return ret; +} + static struct dest_vm_and_perm_info * populate_dest_info(int *dest_vmids, int nelements, int *dest_perms, size_t *size_in_bytes) @@ -278,6 +417,19 @@ const char *msm_secure_vmid_to_string(int secure_vmid) } } +#define MAKE_CP_VERSION(major, minor, patch) \ + (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) + +bool msm_secure_v2_is_supported(void) +{ + /* + * if the version is < 1.1.0 then dynamic buffer allocation is + * not supported + */ + return (scm_get_feat_version(FEATURE_ID_CP) >= + MAKE_CP_VERSION(1, 1, 0)); +} + static int __init alloc_secure_shared_memory(void) { int ret = 0; diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index f51fb2ea72001adaf8aa0e0c78f2d0dd90f41710..ac5979cf56d9d45dd24bef4b9e700f1f3a37656d 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2013, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -104,6 +104,7 @@ struct smp2p_entry { struct irq_domain *domain; DECLARE_BITMAP(irq_enabled, 32); + DECLARE_BITMAP(irq_pending, 32); DECLARE_BITMAP(irq_rising, 32); DECLARE_BITMAP(irq_falling, 32); @@ -142,6 +143,7 @@ struct qcom_smp2p { unsigned local_pid; unsigned remote_pid; + int irq; struct regmap *ipc_regmap; int ipc_offset; int ipc_bit; @@ -170,11 +172,11 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data) struct smp2p_smem_item *in; struct smp2p_entry *entry; struct qcom_smp2p *smp2p = data; + unsigned long status; unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND]; unsigned pid = smp2p->remote_pid; size_t size; int irq_pin; - u32 status; char buf[SMP2P_MAX_ENTRY_NAME]; u32 val; int i; @@ -215,19 +217,22 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data) status = val ^ entry->last_value; entry->last_value = val; + status |= *entry->irq_pending; /* No changes of this entry? */ if (!status) continue; - for_each_set_bit(i, entry->irq_enabled, 32) { - if (!(status & BIT(i))) - continue; - + for_each_set_bit(i, &status, 32) { if ((val & BIT(i) && test_bit(i, entry->irq_rising)) || (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) { irq_pin = irq_find_mapping(entry->domain, i); handle_nested_irq(irq_pin); + + if (test_bit(i, entry->irq_enabled)) + clear_bit(i, entry->irq_pending); + else + set_bit(i, entry->irq_pending); } } } @@ -289,6 +294,8 @@ static int smp2p_irq_map(struct irq_domain *d, irq_set_chip_data(irq, entry); irq_set_nested_thread(irq, 1); irq_set_noprobe(irq); + irq_set_parent(irq, entry->smp2p->irq); + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); return 0; } @@ -440,7 +447,6 @@ static int qcom_smp2p_probe(struct platform_device *pdev) struct device_node *node; struct qcom_smp2p *smp2p; const char *key; - int irq; int ret; smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL); @@ -477,10 +483,10 @@ static int qcom_smp2p_probe(struct platform_device *pdev) return -EINVAL; } - irq = platform_get_irq(pdev, 0); - if (irq < 0) { + smp2p->irq = platform_get_irq(pdev, 0); + if (smp2p->irq < 0) { dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n"); - return irq; + return smp2p->irq; } ret = qcom_smp2p_alloc_outbound_item(smp2p); @@ -519,7 +525,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev) /* Kick the outgoing edge after allocating entries */ qcom_smp2p_kick(smp2p); - ret = devm_request_threaded_irq(&pdev->dev, irq, + ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq, NULL, qcom_smp2p_intr, IRQF_ONESHOT, "smp2p", (void *)smp2p); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index d5f1cf0972afb2bdc70164947d91fe3d21d2efc1..0822f7148969f0dc7439bee0042e810324e113ca 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -319,6 +319,12 @@ static struct msm_soc_info cpu_of_id[] = { /* qcs405 ID */ [352] = {MSM_CPU_QCS405, "QCS405"}, + /* sdxprairie ID */ + [357] = {SDX_CPU_SDXPRAIRIE, "SDXPRAIRIE"}, + + /* sdmmagpie ID */ + [365] = {MSM_CPU_SDMMAGPIE, "SDMMAGPIE"}, + /* Uninitialized IDs are not known to run Linux. * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are * considered as unknown CPU. @@ -1190,6 +1196,14 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 352; strlcpy(dummy_socinfo.build_id, "qcs405 - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_sdxprairie()) { + dummy_socinfo.id = 357; + strlcpy(dummy_socinfo.build_id, "sdxprairie - ", + sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_sdmmagpie()) { + dummy_socinfo.id = 365; + strlcpy(dummy_socinfo.build_id, "sdmmagpie - ", + sizeof(dummy_socinfo.build_id)); } else strlcat(dummy_socinfo.build_id, "Dummy socinfo", sizeof(dummy_socinfo.build_id)); diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c index 8774a06da418fb6d5deb0b12729e5a99ee030bc7..2f5bd24a93f2df73abf2b3bef1611943257c889a 100644 --- a/drivers/soc/qcom/subsys-pil-tz.c +++ b/drivers/soc/qcom/subsys-pil-tz.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -864,7 +864,7 @@ static int subsys_ramdump(int enable, const struct subsys_desc *subsys) if (!enable) return 0; - return pil_do_ramdump(&d->desc, d->ramdump_dev); + return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL); } static void subsys_free_memory(const struct subsys_desc *subsys) diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c index 01e99873ae3cf2a2f3ffb4fd4a1cf8d6e3d71a9b..aa694f05f8dbc68d34621b2d85e5f247ca045161 100644 --- a/drivers/soc/qcom/watchdog_v2.c +++ b/drivers/soc/qcom/watchdog_v2.c @@ -96,6 +96,10 @@ struct msm_watchdog_data { bool timer_expired; bool user_pet_complete; + unsigned long long timer_fired; + unsigned long long thread_start; + unsigned long long ping_start[NR_CPUS]; + unsigned long long ping_end[NR_CPUS]; }; /* @@ -374,6 +378,7 @@ static void keep_alive_response(void *info) struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info; cpumask_set_cpu(cpu, &wdog_dd->alive_mask); + wdog_dd->ping_end[cpu] = sched_clock(); /* Make sure alive mask is cleared and set in order */ smp_mb(); } @@ -390,9 +395,11 @@ static void ping_other_cpus(struct msm_watchdog_data *wdog_dd) /* Make sure alive mask is cleared and set in order */ smp_mb(); for_each_cpu(cpu, cpu_online_mask) { - if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu)) + if (!cpu_idle_pc_state[cpu] && !cpu_isolated(cpu)) { + wdog_dd->ping_start[cpu] = sched_clock(); smp_call_function_single(cpu, keep_alive_response, wdog_dd, 1); + } } } @@ -401,6 +408,7 @@ static void pet_task_wakeup(unsigned long data) struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)data; wdog_dd->timer_expired = true; + wdog_dd->timer_fired = sched_clock(); wake_up(&wdog_dd->pet_complete); } @@ -410,7 +418,7 @@ static __ref int watchdog_kthread(void *arg) (struct msm_watchdog_data *)arg; unsigned long delay_time = 0; struct sched_param param = {.sched_priority = MAX_RT_PRIO-1}; - int ret; + int ret, cpu; sched_setscheduler(current, SCHED_FIFO, ¶m); while (!kthread_should_stop()) { @@ -419,6 +427,10 @@ static __ref int watchdog_kthread(void *arg) wdog_dd->timer_expired); } while (ret != 0); + wdog_dd->thread_start = sched_clock(); + for_each_cpu(cpu, cpu_present_mask) + wdog_dd->ping_start[cpu] = wdog_dd->ping_end[cpu] = 0; + if (wdog_dd->do_ipi_ping) ping_other_cpus(wdog_dd); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index d008e5b82db4bc411f889409c4e7e39d11907cbc..df3ccb30bc2dddba0d2d6accccdd6a6c5a7bc53c 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) /* Increment for next fragment */ req->seq++; - data += req->hdr.len; + data += NV_FRAGMENT_SIZE; left -= NV_FRAGMENT_SIZE; } while (left > 0); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index a172ab299e80316ee904a1c9277a36755699e4d4..6573152ce8936e728cfb86a0210ce33eeb4e81d8 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi, static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) { - if (!has_bspi(qspi) || (qspi->bspi_enabled)) + if (!has_bspi(qspi)) return; qspi->bspi_enabled = 1; @@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) { - if (!has_bspi(qspi) || (!qspi->bspi_enabled)) + if (!has_bspi(qspi)) return; qspi->bspi_enabled = 0; @@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) { - u32 data = 0; + u32 rd = 0; + u32 wr = 0; - if (qspi->curr_cs == cs) - return; if (qspi->base[CHIP_SELECT]) { - data = bcm_qspi_read(qspi, CHIP_SELECT, 0); - data = (data & ~0xff) | (1 << cs); - bcm_qspi_write(qspi, CHIP_SELECT, 0, data); + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); + wr = (rd & ~0xff) | (1 << cs); + if (rd == wr) + return; + bcm_qspi_write(qspi, CHIP_SELECT, 0, wr); usleep_range(10, 20); } + + dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs); qspi->curr_cs = cs; } @@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); } mspi_cdram = MSPI_CDRAM_CONT_BIT; - mspi_cdram |= (~(1 << spi->chip_select) & - MSPI_CDRAM_PCS); + + if (has_bspi(qspi)) + mspi_cdram &= ~1; + else + mspi_cdram |= (~(1 << spi->chip_select) & + MSPI_CDRAM_PCS); + mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : MSPI_CDRAM_BITSE_BIT); @@ -1247,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->base[MSPI] = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->base[MSPI])) { ret = PTR_ERR(qspi->base[MSPI]); - goto qspi_probe_err; + goto qspi_resource_err; } } else { goto qspi_resource_err; @@ -1258,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev, qspi->base[BSPI] = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->base[BSPI])) { ret = PTR_ERR(qspi->base[BSPI]); - goto qspi_probe_err; + goto qspi_resource_err; } qspi->bspi_mode = true; } else { diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 94f7b0713281929c5706555af3a9ed8418045670..02a8012a318a8d29d205fd2b0916c0f61f102d6a 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -38,7 +38,7 @@ struct driver_data { /* SSP register addresses */ void __iomem *ioaddr; - u32 ssdr_physical; + phys_addr_t ssdr_physical; /* SSP masks*/ u32 dma_cr1; diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index 758a28ee06d03f682a6453cd47b5a19d89178f65..dc638f080d7fc98720e4045d74cb3e4aaa6f52d2 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -3,4 +3,5 @@ obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \ ion_page_pool.o ion_system_heap.o \ ion_carveout_heap.o ion_chunk_heap.o \ ion_system_secure_heap.o ion_cma_heap.o \ - ion_secure_util.o msm/ + ion_secure_util.o ion_cma_secure_heap.o msm/ + diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index f71d430d169e69b7fb8860b03af05c69b39fad85..81c7eb4eab1c4e87f324425c20b6dc3d65093562 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1102,6 +1102,7 @@ struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, if (!((1 << heap->id) & heap_id_mask)) continue; if (heap->type == ION_HEAP_TYPE_SYSTEM || + heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA || heap->type == (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE) { type_valid = true; diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index c8f84887bb6ba24bcbddd466922a11733d8b3552..a8b9baae205f9207b6bc9644b6c9c92ed9d19d0b 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -43,6 +43,8 @@ #define ION_SECURE_DISPLAY_HEAP_NAME "secure_display" #define ION_AUDIO_HEAP_NAME "audio" +#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED) + /** * Debug feature. Make ION allocations DMA * ready to help identify clients who are wrongly @@ -385,9 +387,20 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data); void ion_chunk_heap_destroy(struct ion_heap *heap); #ifdef CONFIG_CMA +struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data); +void ion_secure_cma_heap_destroy(struct ion_heap *heap); + struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data); void ion_cma_heap_destroy(struct ion_heap *heap); #else +static inline struct ion_heap + *ion_secure_cma_heap_create(struct ion_platform_heap *h) +{ + return NULL; +} + +static inline void ion_cma_heap_destroy(struct ion_heap *h) {} + static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h) { return NULL; diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c new file mode 100644 index 0000000000000000000000000000000000000000..59bc0beee9796b3efdf349b7336ec4400e79b484 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -0,0 +1,834 @@ +/* + * drivers/staging/android/ion/ion_cma_secure_heap.c + * + * Copyright (C) Linaro 2012 + * Author: for ST-Ericsson. + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* for ion_heap_ops structure */ +#include "ion.h" + +#define ION_CMA_ALLOCATE_FAILED NULL + +struct ion_secure_cma_non_contig_info { + dma_addr_t phys; + int len; + struct list_head entry; +}; + +struct ion_secure_cma_buffer_info { + dma_addr_t phys; + struct sg_table *table; + bool is_cached; + int len; + struct list_head non_contig_list; + unsigned long ncelems; +}; + +struct ion_cma_alloc_chunk { + void *cpu_addr; + struct list_head entry; + dma_addr_t handle; + unsigned long chunk_size; + atomic_t cnt; +}; + +struct ion_cma_secure_heap { + struct device *dev; + /* + * Protects against races between threads allocating memory/adding to + * pool at the same time. (e.g. thread 1 adds to pool, thread 2 + * allocates thread 1's memory before thread 1 knows it needs to + * allocate more. + * Admittedly this is fairly coarse grained right now but the chance for + * contention on this lock is unlikely right now. This can be changed if + * this ever changes in the future + */ + struct mutex alloc_lock; + /* + * protects the list of memory chunks in this pool + */ + struct mutex chunk_lock; + struct ion_heap heap; + /* + * Bitmap for allocation. This contains the aggregate of all chunks. + */ + unsigned long *bitmap; + /* + * List of all allocated chunks + * + * This is where things get 'clever'. Individual allocations from + * dma_alloc_coherent must be allocated and freed in one chunk. + * We don't just want to limit the allocations to those confined + * within a single chunk (if clients allocate n small chunks we would + * never be able to use the combined size). The bitmap allocator is + * used to find the contiguous region and the parts of the chunks are + * marked off as used. The chunks won't be freed in the shrinker until + * the usage is actually zero. + */ + struct list_head chunks; + int npages; + phys_addr_t base; + struct work_struct work; + unsigned long last_alloc; + struct shrinker shrinker; + atomic_t total_allocated; + atomic_t total_pool_size; + atomic_t total_leaked; + unsigned long heap_size; + unsigned long default_prefetch_size; +}; + +static void ion_secure_pool_pages(struct work_struct *work); + +static int ion_heap_allow_secure_allocation(enum ion_heap_type type) +{ + return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA); +} + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replace by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + dma_addr_t handle, size_t size) +{ + struct page *page = pfn_to_page(PFN_DOWN(handle)); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + sg_dma_address(sgt->sgl) = handle; + return 0; +} + +static int ion_secure_cma_add_to_pool( + struct ion_cma_secure_heap *sheap, + unsigned long len, + bool prefetch) +{ + void *cpu_addr; + dma_addr_t handle; + unsigned long attrs = 0; + int ret = 0; + struct ion_cma_alloc_chunk *chunk; + + trace_ion_secure_cma_add_to_pool_start(len, + atomic_read(&sheap->total_pool_size), + prefetch); + mutex_lock(&sheap->chunk_lock); + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + ret = -ENOMEM; + goto out; + } + + attrs = DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_SKIP_ZEROING; + + cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL, + attrs); + + if (!cpu_addr) { + ret = -ENOMEM; + goto out_free; + } + + chunk->cpu_addr = cpu_addr; + chunk->handle = handle; + chunk->chunk_size = len; + atomic_set(&chunk->cnt, 0); + list_add(&chunk->entry, &sheap->chunks); + atomic_add(len, &sheap->total_pool_size); + /* clear the bitmap to indicate this region can be allocated from */ + bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT, + len >> PAGE_SHIFT); + goto out; + +out_free: + kfree(chunk); +out: + mutex_unlock(&sheap->chunk_lock); + + trace_ion_secure_cma_add_to_pool_end(len, + atomic_read(&sheap->total_pool_size), + prefetch); + + return ret; +} + +static void ion_secure_pool_pages(struct work_struct *work) +{ + struct ion_cma_secure_heap *sheap = container_of(work, + struct ion_cma_secure_heap, work); + + ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true); +} + +/* + * @s1: start of the first region + * @l1: length of the first region + * @s2: start of the second region + * @l2: length of the second region + * + * Returns the total number of bytes that intersect. + * + * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the + * maximum size to clear should only ever be l1 + * + */ +static unsigned int intersect(unsigned long s1, unsigned long l1, + unsigned long s2, unsigned long l2) +{ + unsigned long base1 = s1; + unsigned long end1 = s1 + l1; + unsigned long base2 = s2; + unsigned long end2 = s2 + l2; + + /* Case 0: The regions don't overlap at all */ + if (!(base1 < end2 && base2 < end1)) + return 0; + + /* Case 1: region 2 is subsumed by region 1 */ + if (base1 <= base2 && end2 <= end1) + return l2; + + /* case 2: region 1 is subsumed by region 2 */ + if (base2 <= base1 && end1 <= end2) + return l1; + + /* case 3: region1 overlaps region2 on the bottom */ + if (base2 < end1 && base2 > base1) + return end1 - base2; + + /* case 4: region 2 overlaps region1 on the bottom */ + if (base1 < end2 && base1 > base2) + return end2 - base1; + + pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n", + s1, l1, s2, l2); + WARN_ON(1); + /* retrun max intersection value, so that it will fail later*/ + return (unsigned int)(~0); +} + +int ion_secure_cma_prefetch(struct ion_heap *heap, void *data) +{ + unsigned long len = (unsigned long)data; + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + unsigned long diff; + + if ((int)heap->type != ION_HEAP_TYPE_SECURE_DMA) + return -EINVAL; + + if (len == 0) + len = sheap->default_prefetch_size; + + /* + * Only prefetch as much space as there is left in the pool so + * check against the current free size of the heap. + * This is slightly racy if someone else is allocating at the same + * time. CMA has a restricted size for the heap so worst case + * the prefetch doesn't work because the allocation fails. + */ + diff = sheap->heap_size - atomic_read(&sheap->total_pool_size); + + if (len > diff) + len = diff; + + sheap->last_alloc = len; + trace_ion_prefetching(sheap->last_alloc); + schedule_work(&sheap->work); + + return 0; +} + +static void bad_math_dump(unsigned long len, int total_overlap, + struct ion_cma_secure_heap *sheap, + bool alloc, dma_addr_t paddr) +{ + struct list_head *entry; + + pr_err("Bad math! expected total was %lx actual was %x\n", + len, total_overlap); + pr_err("attempted %s address was %pa len %lx\n", + alloc ? "allocation" : "free", &paddr, len); + pr_err("chunks:\n"); + list_for_each(entry, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = + container_of(entry, + struct ion_cma_alloc_chunk, entry); + pr_info("--- pa %pa len %lx\n", + &chunk->handle, chunk->chunk_size); + } + WARN(1, "mismatch in the sizes of secure cma chunks\n"); +} + +static int ion_secure_cma_alloc_from_pool( + struct ion_cma_secure_heap *sheap, + dma_addr_t *phys, + unsigned long len) +{ + dma_addr_t paddr; + unsigned long page_no; + int ret = 0; + int total_overlap = 0; + struct list_head *entry; + + mutex_lock(&sheap->chunk_lock); + + page_no = bitmap_find_next_zero_area(sheap->bitmap, + sheap->npages, 0, + len >> PAGE_SHIFT, 0); + if (page_no >= sheap->npages) { + ret = -ENOMEM; + goto out; + } + bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT); + paddr = sheap->base + (page_no << PAGE_SHIFT); + + list_for_each(entry, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + int overlap = intersect(chunk->handle, + chunk->chunk_size, paddr, len); + + atomic_add(overlap, &chunk->cnt); + total_overlap += overlap; + } + + if (total_overlap != len) { + bad_math_dump(len, total_overlap, sheap, 1, paddr); + ret = -EINVAL; + goto out; + } + + *phys = paddr; +out: + mutex_unlock(&sheap->chunk_lock); + return ret; +} + +static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap, + struct ion_cma_alloc_chunk *chunk) +{ + unsigned long attrs = 0; + + attrs = DMA_ATTR_NO_KERNEL_MAPPING; + /* This region is 'allocated' and not available to allocate from */ + bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT, + chunk->chunk_size >> PAGE_SHIFT); + dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr, + chunk->handle, attrs); + atomic_sub(chunk->chunk_size, &sheap->total_pool_size); + list_del(&chunk->entry); + kfree(chunk); +} + +static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, + int max_nr) +{ + struct list_head *entry, *_n; + unsigned long drained_size = 0, skipped_size = 0; + + trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size); + + list_for_each_safe(entry, _n, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + + if (max_nr < 0) + break; + + if (atomic_read(&chunk->cnt) == 0) { + max_nr -= chunk->chunk_size; + drained_size += chunk->chunk_size; + ion_secure_cma_free_chunk(sheap, chunk); + } else { + skipped_size += chunk->chunk_size; + } + } + + trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); +} + +int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + + mutex_lock(&sheap->chunk_lock); + __ion_secure_cma_shrink_pool(sheap, INT_MAX); + mutex_unlock(&sheap->chunk_lock); + + return 0; +} + +static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_cma_secure_heap *sheap = container_of(shrinker, + struct ion_cma_secure_heap, shrinker); + int nr_to_scan = sc->nr_to_scan; + + /* + * Allocation path may invoke the shrinker. Proceeding any further + * would cause a deadlock in several places so don't shrink if that + * happens. + */ + if (!mutex_trylock(&sheap->chunk_lock)) + return -EAGAIN; + + __ion_secure_cma_shrink_pool(sheap, nr_to_scan); + + mutex_unlock(&sheap->chunk_lock); + + return atomic_read(&sheap->total_pool_size); +} + +static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_cma_secure_heap *sheap = container_of(shrinker, + struct ion_cma_secure_heap, shrinker); + return atomic_read(&sheap->total_pool_size); +} + +static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap, + dma_addr_t handle, + unsigned long len) +{ + struct list_head *entry, *_n; + int total_overlap = 0; + + mutex_lock(&sheap->chunk_lock); + bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT, + len >> PAGE_SHIFT); + + list_for_each_safe(entry, _n, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + int overlap = intersect(chunk->handle, + chunk->chunk_size, handle, len); + + /* + * Don't actually free this from the pool list yet, let either + * an explicit drain call or the shrinkers take care of the + * pool. + */ + atomic_sub_return(overlap, &chunk->cnt); + if (atomic_read(&chunk->cnt) < 0) { + WARN(1, "Invalid chunk size of %d\n", + atomic_read(&chunk->cnt)); + goto out; + } + + total_overlap += overlap; + } + + if (atomic_read(&sheap->total_pool_size) < 0) { + WARN(1, "total pool size of %d is unexpected\n", + atomic_read(&sheap->total_pool_size)); + goto out; + } + + if (total_overlap != len) + bad_math_dump(len, total_overlap, sheap, 0, handle); +out: + mutex_unlock(&sheap->chunk_lock); +} + +/* ION CMA heap operations functions */ +static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate( + struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, + unsigned long flags) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info; + int ret; + + dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ION_CMA_ALLOCATE_FAILED; + + mutex_lock(&sheap->alloc_lock); + ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len); + + if (ret) { +retry: + ret = ion_secure_cma_add_to_pool(sheap, len, false); + if (ret) { + mutex_unlock(&sheap->alloc_lock); + dev_err(sheap->dev, "Fail to allocate buffer\n"); + goto err; + } + ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len); + if (ret) { + /* + * Lost the race with the shrinker, try again + */ + goto retry; + } + } + mutex_unlock(&sheap->alloc_lock); + + atomic_add(len, &sheap->total_allocated); + info->table = kmalloc(sizeof(*info->table), GFP_KERNEL); + if (!info->table) { + dev_err(sheap->dev, "Fail to allocate sg table\n"); + goto err; + } + + info->len = len; + ion_secure_cma_get_sgtable(sheap->dev, + info->table, info->phys, len); + + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer); + return info; + +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap, + struct ion_secure_cma_buffer_info + *info) +{ + struct ion_secure_cma_non_contig_info *nc_info, *temp; + + list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) { + ion_secure_cma_free_from_pool(sheap, nc_info->phys, + nc_info->len); + list_del(&nc_info->entry); + kfree(nc_info); + } +} + +static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap, + struct ion_secure_cma_buffer_info *info, + bool release_memory) +{ + if (release_memory) { + if (info->ncelems) + __ion_secure_cma_free_non_contig(sheap, info); + else + ion_secure_cma_free_from_pool(sheap, info->phys, + info->len); + } + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate_non_contig( + struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, + unsigned long flags) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info; + int ret; + unsigned long alloc_size = len; + struct ion_secure_cma_non_contig_info *nc_info, *temp; + unsigned long ncelems = 0; + struct scatterlist *sg; + unsigned long total_allocated = 0; + + dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ION_CMA_ALLOCATE_FAILED; + + INIT_LIST_HEAD(&info->non_contig_list); + info->table = kmalloc(sizeof(*info->table), GFP_KERNEL); + if (!info->table) { + dev_err(sheap->dev, "Fail to allocate sg table\n"); + goto err; + } + mutex_lock(&sheap->alloc_lock); + while (total_allocated < len) { + if (alloc_size < SZ_1M) { + pr_err("Cannot allocate less than 1MB\n"); + goto err2; + } + nc_info = kzalloc(sizeof(*nc_info), GFP_KERNEL); + if (!nc_info) + goto err2; + + ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys, + alloc_size); + if (ret) { +retry: + ret = ion_secure_cma_add_to_pool(sheap, alloc_size, + false); + if (ret) { + alloc_size = alloc_size / 2; + if (!IS_ALIGNED(alloc_size, SZ_1M)) + alloc_size = round_down(alloc_size, + SZ_1M); + kfree(nc_info); + continue; + } + ret = ion_secure_cma_alloc_from_pool(sheap, + &nc_info->phys, + alloc_size); + if (ret) { + /* + * Lost the race with the shrinker, try again + */ + goto retry; + } + } + nc_info->len = alloc_size; + list_add_tail(&nc_info->entry, &info->non_contig_list); + ncelems++; + total_allocated += alloc_size; + alloc_size = min(alloc_size, len - total_allocated); + } + mutex_unlock(&sheap->alloc_lock); + atomic_add(total_allocated, &sheap->total_allocated); + + nc_info = list_first_entry_or_null(&info->non_contig_list, + struct + ion_secure_cma_non_contig_info, + entry); + if (!nc_info) { + pr_err("%s: Unable to find first entry of non contig list\n", + __func__); + goto err1; + } + info->phys = nc_info->phys; + info->len = total_allocated; + info->ncelems = ncelems; + + ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL); + if (unlikely(ret)) + goto err1; + + sg = info->table->sgl; + list_for_each_entry(nc_info, &info->non_contig_list, entry) { + sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0); + sg_dma_address(sg) = nc_info->phys; + sg = sg_next(sg); + } + buffer->priv_virt = info; + dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer); + return info; + +err2: + mutex_unlock(&sheap->alloc_lock); +err1: + list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, + entry) { + list_del(&nc_info->entry); + kfree(nc_info); + } + kfree(info->table); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static int ion_secure_cma_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long flags) +{ + unsigned long secure_allocation = flags & ION_FLAG_SECURE; + struct ion_secure_cma_buffer_info *buf = NULL; + unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG; + + if (!secure_allocation && + !ion_heap_allow_secure_allocation(heap->type)) { + pr_err("%s: non-secure allocation disallowed from heap %s %lx\n", + __func__, heap->name, flags); + return -ENOMEM; + } + + if (ION_IS_CACHED(flags)) { + pr_err("%s: cannot allocate cached memory from secure heap %s\n", + __func__, heap->name); + return -ENOMEM; + } + + if (!IS_ALIGNED(len, SZ_1M)) { + pr_err("%s: length of allocation from %s must be a multiple of 1MB\n", + __func__, heap->name); + return -ENOMEM; + } + trace_ion_secure_cma_allocate_start(heap->name, len, flags); + if (!allow_non_contig) + buf = __ion_secure_cma_allocate(heap, buffer, len, + flags); + else + buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len, + flags); + trace_ion_secure_cma_allocate_end(heap->name, len, flags); + if (buf) { + int ret; + + if (!msm_secure_v2_is_supported()) { + pr_err("%s: securing buffers from clients is not supported on this platform\n", + __func__); + ret = 1; + } else { + trace_ion_cp_secure_buffer_start(heap->name, len, + flags); + ret = msm_secure_table(buf->table); + trace_ion_cp_secure_buffer_end(heap->name, len, + flags); + } + if (ret) { + struct ion_cma_secure_heap *sheap = + container_of(buffer->heap, + struct ion_cma_secure_heap, heap); + + pr_err("%s: failed to secure buffer\n", __func__); + __ion_secure_cma_free(sheap, buf, true); + } + return ret; + } else { + return -ENOMEM; + } +} + +static void ion_secure_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_secure_heap *sheap = + container_of(buffer->heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info = buffer->priv_virt; + int ret = 0; + + dev_dbg(sheap->dev, "Release buffer %pK\n", buffer); + if (msm_secure_v2_is_supported()) + ret = msm_unsecure_table(info->table); + atomic_sub(buffer->size, &sheap->total_allocated); + if (atomic_read(&sheap->total_allocated) < 0) { + WARN(1, "no memory is allocated from this pool\n"); + return; + } + + /* release memory */ + if (ret) { + WARN(1, "Unsecure failed, can't free the memory. Leaking it!"); + atomic_add(buffer->size, &sheap->total_leaked); + } + + __ion_secure_cma_free(sheap, info, ret ? false : true); +} + +static int ion_secure_cma_mmap(struct ion_heap *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + pr_info("%s: mmaping from secure heap %s disallowed\n", + __func__, mapper->name); + return -EINVAL; +} + +static void *ion_secure_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + pr_info("%s: kernel mapping from secure heap %s disallowed\n", + __func__, heap->name); + return ERR_PTR(-EINVAL); +} + +static void ion_secure_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_secure_cma_ops = { + .allocate = ion_secure_cma_allocate, + .free = ion_secure_cma_free, + .map_user = ion_secure_cma_mmap, + .map_kernel = ion_secure_cma_map_kernel, + .unmap_kernel = ion_secure_cma_unmap_kernel, +}; + +struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_secure_heap *sheap; + int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long); + + sheap = kzalloc(sizeof(*sheap), GFP_KERNEL); + if (!sheap) + return ERR_PTR(-ENOMEM); + + sheap->dev = data->priv; + mutex_init(&sheap->chunk_lock); + mutex_init(&sheap->alloc_lock); + sheap->heap.ops = &ion_secure_cma_ops; + sheap->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA; + sheap->npages = data->size >> PAGE_SHIFT; + sheap->base = data->base; + sheap->heap_size = data->size; + sheap->bitmap = kmalloc(map_size, GFP_KERNEL); + INIT_LIST_HEAD(&sheap->chunks); + INIT_WORK(&sheap->work, ion_secure_pool_pages); + sheap->shrinker.seeks = DEFAULT_SEEKS; + sheap->shrinker.batch = 0; + sheap->shrinker.scan_objects = ion_secure_cma_shrinker; + sheap->shrinker.count_objects = ion_secure_cma_shrinker_count; + sheap->default_prefetch_size = sheap->heap_size; + register_shrinker(&sheap->shrinker); + + if (!sheap->bitmap) { + kfree(sheap); + return ERR_PTR(-ENOMEM); + } + + /* + * we initially mark everything in the allocator as being free so that + * allocations can come in later + */ + bitmap_fill(sheap->bitmap, sheap->npages); + + return &sheap->heap; +} + +void ion_secure_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + + kfree(sheap); +} diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 85e5260f6feb065c22a594328ac471b5f95a77c0..a30fa2b2cd461079f97c529cd2b01aa0c6679414 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -335,6 +335,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) heap = ion_chunk_heap_create(heap_data); break; #ifdef CONFIG_CMA + case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA: + heap = ion_secure_cma_heap_create(heap_data); + break; case ION_HEAP_TYPE_DMA: heap = ion_cma_heap_create(heap_data); break; @@ -387,6 +390,9 @@ void ion_heap_destroy(struct ion_heap *heap) ion_chunk_heap_destroy(heap); break; #ifdef CONFIG_CMA + case ION_HEAP_TYPE_SECURE_DMA: + ion_secure_cma_heap_destroy(heap); + break; case ION_HEAP_TYPE_DMA: ion_cma_heap_destroy(heap); break; diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c index 3d4e83fa4516eca448d61d5cf1284f5ace74ee11..472763a29c18efb8de4b3661101f65d824b0dfd5 100644 --- a/drivers/staging/android/ion/ion_secure_util.c +++ b/drivers/staging/android/ion/ion_secure_util.c @@ -103,6 +103,13 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list, struct scatterlist *sg; int ret, i; + if (source_nelems <= 0) { + pr_err("%s: source_nelems invalid\n", + __func__); + ret = -EINVAL; + goto out; + } + ret = hyp_assign_table(sgt, source_vm_list, source_nelems, &dest_vmid, &dest_perms, 1); if (ret) { @@ -126,6 +133,13 @@ int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list, int i; int ret = 0; + if (dest_nelems <= 0) { + pr_err("%s: dest_nelems invalid\n", + __func__); + ret = -EINVAL; + goto out; + } + dest_perms = kcalloc(dest_nelems, sizeof(*dest_perms), GFP_KERNEL); if (!dest_perms) { ret = -ENOMEM; diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c index b8a70087bf2ac3df05888cf3d03a61ff11d0403f..57a705f58b0ffd27532560965c8b62c2d255ad58 100644 --- a/drivers/staging/android/ion/msm/msm_ion_of.c +++ b/drivers/staging/android/ion/msm/msm_ion_of.c @@ -89,6 +89,7 @@ static struct heap_types_info { MAKE_HEAP_TYPE_MAPPING(CARVEOUT), MAKE_HEAP_TYPE_MAPPING(CHUNK), MAKE_HEAP_TYPE_MAPPING(DMA), + MAKE_HEAP_TYPE_MAPPING(SECURE_DMA), MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE), MAKE_HEAP_TYPE_MAPPING(HYP_CMA), }; diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h new file mode 100644 index 0000000000000000000000000000000000000000..181ae1cc2c052f619441d36ec9223fb9f32c5fb4 --- /dev/null +++ b/drivers/staging/android/ion/msm_ion_priv.h @@ -0,0 +1,112 @@ +/* + * drivers/staging/android/ion/msm_ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_ION_PRIV_H +#define _MSM_ION_PRIV_H + +#include +#include +#include +#include +#include +#include +#include + +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap); +void ion_iommu_heap_destroy(struct ion_heap *heap); + +struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap); +void ion_cp_heap_destroy(struct ion_heap *heap); + +struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap); +void ion_system_secure_heap_destroy(struct ion_heap *heap); +int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data); +int ion_system_secure_heap_drain(struct ion_heap *heap, void *data); + +struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap); +void ion_cma_secure_heap_destroy(struct ion_heap *heap); + +long msm_ion_custom_ioctl(struct ion_client *client, + unsigned int cmd, + unsigned long arg); + +#ifdef CONFIG_CMA +struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *heap); +void ion_secure_cma_heap_destroy(struct ion_heap *heap); + +int ion_secure_cma_prefetch(struct ion_heap *heap, void *data); + +int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused); + +#else +static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data) +{ + return -ENODEV; +} + +static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) +{ + return -ENODEV; +} + +#endif + +struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *pheap); +void ion_removed_heap_destroy(struct ion_heap *heap); + +#define ION_CP_ALLOCATE_FAIL -1 +#define ION_RESERVED_ALLOCATE_FAIL -1 + +void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, + unsigned long *size); + +void ion_mem_map_show(struct ion_heap *heap); + +int ion_heap_is_system_secure_heap_type(enum ion_heap_type type); + +int ion_heap_allow_secure_allocation(enum ion_heap_type type); + +int ion_heap_allow_heap_secure(enum ion_heap_type type); + +int ion_heap_allow_handle_secure(enum ion_heap_type type); + +int get_secure_vmid(unsigned long flags); + +bool is_secure_vmid_valid(int vmid); + +/** + * Functions to help assign/unassign sg_table for System Secure Heap + */ + +int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid); +int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid); + +/** + * ion_create_chunked_sg_table - helper function to create sg table + * with specified chunk size + * @buffer_base: The starting address used for the sg dma address + * @chunk_size: The size of each entry in the sg table + * @total_size: The total size of the sg table (i.e. the sum of the + * entries). This will be rounded up to the nearest + * multiple of `chunk_size' + */ +struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, + size_t chunk_size, + size_t total_size); + +void show_ion_usage(struct ion_device *dev); +#endif /* _MSM_ION_PRIV_H */ diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h index cd2102fccde499a9e8fe24c4c07ee8b6cca8575e..a725b84bcf4537b94a27e21c347019fe80fc1b1d 100644 --- a/drivers/staging/android/uapi/msm_ion.h +++ b/drivers/staging/android/uapi/msm_ion.h @@ -14,6 +14,7 @@ enum msm_ion_heap_types { ION_HEAP_TYPE_MSM_START = 6, + ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START, ION_HEAP_TYPE_SYSTEM_SECURE, ION_HEAP_TYPE_HYP_CMA, }; @@ -65,6 +66,12 @@ enum ion_heap_ids { #define ION_FLAGS_CP_MASK 0x7FFF0000 +/** + * Flag to allow non continguous allocation of memory from secure + * heap + */ +#define ION_FLAG_ALLOW_NON_CONTIG ION_BIT(28) + /** * Flag to use when allocating to indicate that a heap is secure. * Do NOT use BIT macro since it is defined in #ifdef __KERNEL__ diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 8e84b2e7f5bda121f31bb6f98038d226b457eb3e..b83d17db06bddd9d7238f842a1c8cc3464d19eef 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -315,7 +315,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch) } fd = dpaa2_dq_fd(dq); - fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); fq->stats.frames++; fq->consume(priv, ch, fd, &ch->napi); @@ -1888,7 +1888,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, queue.destination.id = fq->channel->dpcon_id; queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 1; - queue.user_context = (u64)fq; + queue.user_context = (u64)(uintptr_t)fq; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_RX, 0, fq->flowid, DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, @@ -1940,7 +1940,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, queue.destination.id = fq->channel->dpcon_id; queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 0; - queue.user_context = (u64)fq; + queue.user_context = (u64)(uintptr_t)fq; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c index 975dbbb3abd083608b36812820f96739875e300d..7da3eb4ca4be98561ce8718cd42b9fd0259c12f9 100644 --- a/drivers/staging/ks7010/ks_hostif.c +++ b/drivers/staging/ks7010/ks_hostif.c @@ -242,9 +242,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, offset = 0; while (bsize > offset) { - /* DPRINTK(4, "Element ID=%d\n",*bp); */ - switch (*bp) { - case 0: /* ssid */ + switch (*bp) { /* Information Element ID */ + case WLAN_EID_SSID: if (*(bp + 1) <= SSID_MAX_SIZE) { ap->ssid.size = *(bp + 1); } else { @@ -254,8 +253,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } memcpy(ap->ssid.body, bp + 2, ap->ssid.size); break; - case 1: /* rate */ - case 50: /* ext rate */ + case WLAN_EID_SUPP_RATES: + case WLAN_EID_EXT_SUPP_RATES: if ((*(bp + 1) + ap->rate_set.size) <= RATE_SET_MAX_SIZE) { memcpy(&ap->rate_set.body[ap->rate_set.size], @@ -271,9 +270,9 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, (RATE_SET_MAX_SIZE - ap->rate_set.size); } break; - case 3: /* DS parameter */ + case WLAN_EID_DS_PARAMS: break; - case 48: /* RSN(WPA2) */ + case WLAN_EID_RSN: ap->rsn_ie.id = *bp; if (*(bp + 1) <= RSN_IE_BODY_MAX) { ap->rsn_ie.size = *(bp + 1); @@ -284,8 +283,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size); break; - case 221: /* WPA */ - if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */ + case WLAN_EID_VENDOR_SPECIFIC: /* WPA */ + if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */ ap->wpa_ie.id = *bp; if (*(bp + 1) <= RSN_IE_BODY_MAX) { ap->wpa_ie.size = *(bp + 1); @@ -300,18 +299,18 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info, } break; - case 2: /* FH parameter */ - case 4: /* CF parameter */ - case 5: /* TIM */ - case 6: /* IBSS parameter */ - case 7: /* Country */ - case 42: /* ERP information */ - case 47: /* Reserve ID 47 Broadcom AP */ + case WLAN_EID_FH_PARAMS: + case WLAN_EID_CF_PARAMS: + case WLAN_EID_TIM: + case WLAN_EID_IBSS_PARAMS: + case WLAN_EID_COUNTRY: + case WLAN_EID_ERP_INFO: break; default: DPRINTK(4, "unknown Element ID=%d\n", *bp); break; } + offset += 2; /* id & size field */ offset += *(bp + 1); /* +size offset */ bp += (*(bp + 1) + 2); /* pointer update */ diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h index 5bae8d468e23eb1b717058e0ded2d578f89a9afe..9ac317e4b507c671b0b7ed0d78e60a84f9a69427 100644 --- a/drivers/staging/ks7010/ks_hostif.h +++ b/drivers/staging/ks7010/ks_hostif.h @@ -13,6 +13,7 @@ #define _KS_HOSTIF_H_ #include +#include /* * HOST-MAC I/F events diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index a986737ec010b93b9b0a112d1b4666749159e276..82a499fb23bb1b96c65b67f019b5e1c6ab3834d3 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -190,7 +190,7 @@ struct client_obd { struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */ /* the grant values are protected by loi_list_lock below */ - unsigned long cl_dirty_pages; /* all _dirty_ in pahges */ + unsigned long cl_dirty_pages; /* all _dirty_ in pages */ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */ unsigned long cl_dirty_transit; /* dirty synchronous */ unsigned long cl_avail_grant; /* bytes of credit for ost */ diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index 6e16c930a021a257290e87345c911096649e6619..c2aadb2d1fead08a80f291c7e238600a8291944d 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -2694,7 +2694,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp, if (lsm && !lmm) { int i; - for (i = 1; i < lsm->lsm_md_stripe_count; i++) { + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { /* * For migrating inode, the master stripe and master * object will be the same, so do not need iput, see diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index e1207c227b7999fc1bb523683d8ee4644e257592..c356d00d87a574bdd7102a72a76d80fd31f9a099 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -1528,7 +1528,7 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages && + if (cli->cl_dirty_pages < cli->cl_dirty_max_pages && atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 46b3f19e0878fa4741b189a76fa8987f599660c7..db3eb7ec5809dc0b4b033a48e0d8941d1b64ddaf 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1702,6 +1702,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev) priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL); priv->oldaddr = kmalloc(16, GFP_KERNEL); + if (!priv->oldaddr) + return -ENOMEM; oldaddr = priv->oldaddr; align = ((long)oldaddr) & 3; if (align) { diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c index 8f2d508183b29a8ab6a049c488174588b49979e3..9030d71a3d0b41fe437022fd1355c637719d3a60 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c @@ -36,6 +36,10 @@ MODULE_PARM_DESC(enable_compat_alsa, static void snd_devm_unregister_child(struct device *dev, void *res) { struct device *childdev = *(struct device **)res; + struct bcm2835_chip *chip = dev_get_drvdata(childdev); + struct snd_card *card = chip->card; + + snd_card_free(card); device_unregister(childdev); } @@ -61,6 +65,13 @@ static int snd_devm_add_child(struct device *dev, struct device *child) return 0; } +static void snd_bcm2835_release(struct device *dev) +{ + struct bcm2835_chip *chip = dev_get_drvdata(dev); + + kfree(chip); +} + static struct device * snd_create_device(struct device *parent, struct device_driver *driver, @@ -76,6 +87,7 @@ snd_create_device(struct device *parent, device_initialize(device); device->parent = parent; device->driver = driver; + device->release = snd_bcm2835_release; dev_set_name(device, "%s", name); @@ -86,18 +98,19 @@ snd_create_device(struct device *parent, return device; } -static int snd_bcm2835_free(struct bcm2835_chip *chip) -{ - kfree(chip); - return 0; -} - /* component-destructor * (see "Management of Cards and Components") */ static int snd_bcm2835_dev_free(struct snd_device *device) { - return snd_bcm2835_free(device->device_data); + struct bcm2835_chip *chip = device->device_data; + struct snd_card *card = chip->card; + + /* TODO: free pcm, ctl */ + + snd_device_free(card, chip); + + return 0; } /* chip-specific constructor @@ -122,7 +135,7 @@ static int snd_bcm2835_create(struct snd_card *card, err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err) { - snd_bcm2835_free(chip); + kfree(chip); return err; } @@ -130,31 +143,14 @@ static int snd_bcm2835_create(struct snd_card *card, return 0; } -static void snd_devm_card_free(struct device *dev, void *res) +static struct snd_card *snd_bcm2835_card_new(struct device *dev) { - struct snd_card *snd_card = *(struct snd_card **)res; - - snd_card_free(snd_card); -} - -static struct snd_card *snd_devm_card_new(struct device *dev) -{ - struct snd_card **dr; struct snd_card *card; int ret; - dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL); - if (!dr) - return ERR_PTR(-ENOMEM); - ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card); - if (ret) { - devres_free(dr); + if (ret) return ERR_PTR(ret); - } - - *dr = card; - devres_add(dev, dr); return card; } @@ -271,7 +267,7 @@ static int snd_add_child_device(struct device *device, return PTR_ERR(child); } - card = snd_devm_card_new(child); + card = snd_bcm2835_card_new(child); if (IS_ERR(card)) { dev_err(child, "Failed to create card"); return PTR_ERR(card); @@ -313,7 +309,7 @@ static int snd_add_child_device(struct device *device, return err; } - dev_set_drvdata(child, card); + dev_set_drvdata(child, chip); dev_info(child, "card created with %d channels\n", numchans); return 0; diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 4bc7956cefc4afcde8e724f47346c2937e45e3e3..ea3ce4e17b855275ba80dd7adca7ac91fb8b3f46 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -203,9 +203,10 @@ int tee_shm_get_fd(struct tee_shm *shm) if ((shm->flags & req_flags) != req_flags) return -EINVAL; + get_dma_buf(shm->dmabuf); fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); - if (fd >= 0) - get_dma_buf(shm->dmabuf); + if (fd < 0) + dma_buf_put(shm->dmabuf); return fd; } diff --git a/drivers/thermal/gov_low_limits.c b/drivers/thermal/gov_low_limits.c index 278869c70a46026ca107c643d72454b4617b2db9..d02ea2614895bcf3ba3c9c966de792899d0fef24 100644 --- a/drivers/thermal/gov_low_limits.c +++ b/drivers/thermal/gov_low_limits.c @@ -62,19 +62,30 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n", old_target, (int)instance->target); - if (old_target == instance->target) + if (instance->initialized && old_target == instance->target) continue; - if (old_target == THERMAL_NO_TARGET && + if (!instance->initialized) { + if (instance->target != THERMAL_NO_TARGET) { + trace_thermal_zone_trip(tz, trip, trip_type, + true); + tz->passive += 1; + } + } else { + if (old_target == THERMAL_NO_TARGET && instance->target != THERMAL_NO_TARGET) { - trace_thermal_zone_trip(tz, trip, trip_type, true); - tz->passive += 1; - } else if (old_target != THERMAL_NO_TARGET && + trace_thermal_zone_trip(tz, trip, trip_type, + true); + tz->passive += 1; + } else if (old_target != THERMAL_NO_TARGET && instance->target == THERMAL_NO_TARGET) { - trace_thermal_zone_trip(tz, trip, trip_type, false); - tz->passive -= 1; + trace_thermal_zone_trip(tz, trip, trip_type, + false); + tz->passive -= 1; + } } + instance->initialized = true; instance->cdev->updated = false; /* cdev needs update */ } diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index b2f73c14e8bc58fd80a08db3472a4f269c5d53cd..c32e91114b0bd596fbdb7e70d18fd95665c86c06 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -181,16 +181,26 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) if (instance->initialized && old_target == instance->target) continue; - /* Activate a passive thermal instance */ - if (old_target == THERMAL_NO_TARGET && - instance->target != THERMAL_NO_TARGET) { - update_passive_instance(tz, trip_type, 1); - trace_thermal_zone_trip(tz, trip, trip_type, true); - /* Deactivate a passive thermal instance */ - } else if (old_target != THERMAL_NO_TARGET && - instance->target == THERMAL_NO_TARGET) { - update_passive_instance(tz, trip_type, -1); - trace_thermal_zone_trip(tz, trip, trip_type, false); + if (!instance->initialized) { + if (instance->target != THERMAL_NO_TARGET) { + trace_thermal_zone_trip(tz, trip, trip_type, + true); + update_passive_instance(tz, trip_type, 1); + } + } else { + /* Activate a passive thermal instance */ + if (old_target == THERMAL_NO_TARGET && + instance->target != THERMAL_NO_TARGET) { + trace_thermal_zone_trip(tz, trip, trip_type, + true); + update_passive_instance(tz, trip_type, 1); + /* Deactivate a passive thermal instance */ + } else if (old_target != THERMAL_NO_TARGET && + instance->target == THERMAL_NO_TARGET) { + trace_thermal_zone_trip(tz, trip, trip_type, + false); + update_passive_instance(tz, trip_type, -1); + } } instance->initialized = true; diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c index 6584000ae94bac2e255804043fa5d2047a06a9f2..f2c886e167b3d8e50a91d2d016179d023d80514c 100644 --- a/drivers/thermal/tsens2xxx.c +++ b/drivers/thermal/tsens2xxx.c @@ -31,6 +31,7 @@ #define TSENS_TM_CRITICAL_INT_EN BIT(2) #define TSENS_TM_UPPER_INT_EN BIT(1) #define TSENS_TM_LOWER_INT_EN BIT(0) +#define TSENS_TM_UPPER_LOWER_INT_DISABLE 0xffffffff #define TSENS_TM_SN_UPPER_LOWER_THRESHOLD(n) ((n) + 0x20) #define TSENS_TM_SN_ADDR_OFFSET 0x4 #define TSENS_TM_UPPER_THRESHOLD_SET(n) ((n) << 12) @@ -525,6 +526,7 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev) void __iomem *srot_addr; void __iomem *sensor_int_mask_addr; unsigned int srot_val, crit_mask, crit_val; + void __iomem *int_mask_addr; srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4); srot_val = readl_relaxed(srot_addr); @@ -567,6 +569,9 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev) mb(); } + int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr); + writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr); + writel_relaxed(TSENS_TM_CRITICAL_INT_EN | TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN, TSENS_TM_INT_EN(tmdev->tsens_tm_addr)); diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index e32c51d549c3d3a7e4edc9cbb3421d9ff1285b21..be456ea27ab27985865c8dccbdf7be22a23e7b87 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI)) { + if (status & (UART_LSR_DR | UART_LSR_BI) && + iir & UART_IIR_RDI) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 3e4b717670d7432e32d4b66568900773f6e239ce..59cb62de236bd3689f2781379a47fb1825fdf74c 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -331,7 +331,7 @@ static int altera_uart_startup(struct uart_port *port) /* Enable RX interrupts now */ pp->imr = ALTERA_UART_CONTROL_RRDY_MSK; - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -347,7 +347,7 @@ static void altera_uart_shutdown(struct uart_port *port) /* Disable all interrupts now */ pp->imr = 0; - writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG); spin_unlock_irqrestore(&port->lock, flags); @@ -436,7 +436,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c) ALTERA_UART_STATUS_TRDY_MSK)) cpu_relax(); - writel(c, port->membase + ALTERA_UART_TXDATA_REG); + altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG); } static void altera_uart_console_write(struct console *co, const char *s, @@ -506,13 +506,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev, return -ENODEV; /* Enable RX interrupts now */ - writel(ALTERA_UART_CONTROL_RRDY_MSK, - port->membase + ALTERA_UART_CONTROL_REG); + altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK, + ALTERA_UART_CONTROL_REG); if (dev->baud) { unsigned int baudclk = port->uartclk / dev->baud; - writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG); + altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG); } dev->con->write = altera_uart_earlycon_write; diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 77fe306690c4174b775287e23a3340292f15dda6..71e37abb6bcbd0f4928d92d3f28f7353b6f82534 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -596,6 +596,11 @@ static int arc_serial_probe(struct platform_device *pdev) if (dev_id < 0) dev_id = 0; + if (dev_id >= ARRAY_SIZE(arc_uart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", dev_id); + return -EINVAL; + } + uart = &arc_uart_ports[dev_id]; port = &uart->port; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index f0252184291ed4a3ab49ab03ce634e7be8588bf4..7a3db9378fa388818eddbdeb12082ac703686b9e 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -2151,6 +2151,10 @@ static int lpuart_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } + if (ret >= ARRAY_SIZE(lpuart_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", ret); + return -EINVAL; + } sport->port.line = ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sport->port.membase = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 521500c575c825cec28d2282aabfbabb213f6ddd..8deaf2ad8b34ac4bafeb795a5f5a8e4dc5faf345 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2096,6 +2096,12 @@ static int serial_imx_probe(struct platform_device *pdev) else if (ret < 0) return ret; + if (sport->port.line >= ARRAY_SIZE(imx_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", + sport->port.line); + return -EINVAL; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index be94246b6fcca1874161470035b9d9bd91237f88..673c8fd7e34f6b5434c001a9337e563d83992548 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1667,6 +1667,10 @@ static int mxs_auart_probe(struct platform_device *pdev) s->port.line = pdev->id < 0 ? 0 : pdev->id; else if (ret < 0) return ret; + if (s->port.line >= ARRAY_SIZE(auart_port)) { + dev_err(&pdev->dev, "serial%d out of range\n", s->port.line); + return -EINVAL; + } if (of_id) { pdev->id_entry = of_id->data; diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 8aca18c4cdea4076e8666baff7d7f4953d6a6e1f..bedd4bdec4ab972b9bd323f0c2d3d806811c22f6 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1821,6 +1821,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index); + if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", index); + return -EINVAL; + } ourport = &s3c24xx_serial_ports[index]; ourport->drv_data = s3c24xx_get_driver_data(pdev); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 22f60239026c9ecf7a3077b31f01c9733f4d19b1..8a58ee32ff618ce9f3260521ed1d7689a0c1598c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -3076,6 +3076,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, dev_err(&pdev->dev, "failed to get alias id (%d)\n", id); return NULL; } + if (id >= ARRAY_SIZE(sci_ports)) { + dev_err(&pdev->dev, "serial%d out of range\n", id); + return NULL; + } sp = &sci_ports[id]; *dev_id = id; diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 31a630ae0870efec120ec63cdb15464dc947682a..21c35ad72b99865ad3852490c10905325c67c967 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1115,7 +1115,7 @@ static struct uart_port *cdns_uart_get_port(int id) struct uart_port *port; /* Try the given port id if failed use default method */ - if (cdns_uart_port[id].mapbase != 0) { + if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) { /* Find the next unused port */ for (id = 0; id < CDNS_UART_NR_PORTS; id++) if (cdns_uart_port[id].mapbase == 0) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 4149a965516e88f9475b18987c4964b83e7868e6..22952d70b98116bf3518cb5ecbf6a279672dbe51 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -187,6 +187,7 @@ static int acm_wb_alloc(struct acm *acm) wb = &acm->wb[wbn]; if (!wb->use) { wb->use = 1; + wb->len = 0; return wbn; } wbn = (wbn + 1) % ACM_NW; @@ -818,16 +819,18 @@ static int acm_tty_write(struct tty_struct *tty, static void acm_tty_flush_chars(struct tty_struct *tty) { struct acm *acm = tty->driver_data; - struct acm_wb *cur = acm->putbuffer; + struct acm_wb *cur; int err; unsigned long flags; + spin_lock_irqsave(&acm->write_lock, flags); + + cur = acm->putbuffer; if (!cur) /* nothing to do */ - return; + goto out; acm->putbuffer = NULL; err = usb_autopm_get_interface_async(acm->control); - spin_lock_irqsave(&acm->write_lock, flags); if (err < 0) { cur->use = 0; acm->putbuffer = cur; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 8367d4f985c1220773a7c6efc38df94d837c7f28..ec965ac5f1f5dfb4ae87f1f8ee830542e436c455 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -216,7 +216,7 @@ struct dwc2_hsotg_ep { unsigned char dir_in; unsigned char index; unsigned char mc; - unsigned char interval; + u16 interval; unsigned int halted:1; unsigned int periodic:1; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0d8e09ccb59c09e92b807ffdaf2fbf14f80fe018..6ef001a83fe28bf0320d322dcc4867304c070055 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3414,12 +3414,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); - dwc2_hsotg_enqueue_setup(hsotg); - - dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", - dwc2_readl(hsotg->regs + DIEPCTL0), - dwc2_readl(hsotg->regs + DOEPCTL0)); - /* clear global NAKs */ val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; if (!is_usb_reset) @@ -3430,6 +3424,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, mdelay(3); hsotg->lx_state = DWC2_L0; + + dwc2_hsotg_enqueue_setup(hsotg); + + dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", + dwc2_readl(hsotg->regs + DIEPCTL0), + dwc2_readl(hsotg->regs + DOEPCTL0)); } static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 9bd60ec83ac6d39dafd27061600423c56cc33b68..87484f71b2abbb29f446ee4aec57c4545a0d8f66 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -979,6 +979,24 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, if (dbg_hc(chan)) dev_vdbg(hsotg->dev, "%s()\n", __func__); + + /* + * In buffer DMA or external DMA mode channel can't be halted + * for non-split periodic channels. At the end of the next + * uframe/frame (in the worst case), the core generates a channel + * halted and disables the channel automatically. + */ + if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) || + hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) { + if (!chan->do_split && + (chan->ep_type == USB_ENDPOINT_XFER_ISOC || + chan->ep_type == USB_ENDPOINT_XFER_INT)) { + dev_err(hsotg->dev, "%s() Channel can't be halted\n", + __func__); + return; + } + } + if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status); @@ -2311,10 +2329,22 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup) */ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg) { - u32 hcfg, hfir, otgctl; + u32 hcfg, hfir, otgctl, usbcfg; dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg); + /* Set HS/FS Timeout Calibration to 7 (max available value). + * The number of PHY clocks that the application programs in + * this field is added to the high/full speed interpacket timeout + * duration in the core to account for any additional delays + * introduced by the PHY. This can be required, because the delay + * introduced by the PHY in generating the linestate condition + * can vary from one PHY to another. + */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg |= GUSBCFG_TOUTCAL(7); + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); + /* Restart the Phy Clock */ dwc2_writel(0, hsotg->regs + PCGCTL); diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index db4485349b8ad2066f4a547da1b3574e20080bda..29d22d14bdf8aabe48676e4d3d99601f9e0b9ce6 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_USB_DWC3) += dwc3.o dwc3-y := core.o debug_ipc.o -ifneq ($(CONFIG_FTRACE),) +ifneq ($(CONFIG_TRACING),) dwc3-y += trace.o endif diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 10a3ff124290b071d6609b98eac594e24c5e8049..a6e2a21e5089190f72cd11e0699feb670e7580fa 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -219,12 +219,26 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) do { reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (!(reg & DWC3_DCTL_CSFTRST)) - return 0; + goto done; udelay(1); } while (--retries); + phy_exit(dwc->usb3_generic_phy); + phy_exit(dwc->usb2_generic_phy); + return -ETIMEDOUT; + +done: + /* + * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared, + * we must wait at least 50ms before accessing the PHY domain + * (synchronization delay). DWC_usb31 programming guide section 1.3.2. + */ + if (dwc3_is_usb31(dwc)) + msleep(50); + + return 0; } /* diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index b6a75f6315b78fde38e431361a6789aaba9caca5..2e00951fd785585c1b7c9fcf53517ac838ee3328 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -43,6 +43,10 @@ #define dbg_setup(ep_num, req) \ dwc3_dbg_setup(dwc, ep_num, req) + +#define dbg_log_string(fmt, ...) \ + ipc_log_string(dwc->dwc_ipc_log_ctxt,\ + "%s: " fmt, __func__, ##__VA_ARGS__) /** * dwc3_gadget_ep_cmd_string - returns endpoint command string * @cmd: command code diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 3530795bbb8f0d7d934cc848d595433f7bd923ad..fdd0d5aa1f5e4d5c7a1a6dd50ea74aab73a6b734 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -590,9 +590,25 @@ static int dwc3_omap_resume(struct device *dev) return 0; } +static void dwc3_omap_complete(struct device *dev) +{ + struct dwc3_omap *omap = dev_get_drvdata(dev); + + if (extcon_get_state(omap->edev, EXTCON_USB)) + dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); + else + dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF); + + if (extcon_get_state(omap->edev, EXTCON_USB_HOST)) + dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); + else + dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT); +} + static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) + .complete = dwc3_omap_complete, }; #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9560c585993c1b161114d99809dffc604c81cd7c..94a84547699c326aae1dd1fd66851e1ac29a4ba1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3141,6 +3141,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) { u32 reg; + usb_phy_start_link_training(dwc->usb3_phy); + dwc->connected = true; /* @@ -3226,6 +3228,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) u8 speed; dbg_event(0xFF, "CONNECT DONE", 0); + usb_phy_stop_link_training(dwc->usb3_phy); reg = dwc3_readl(dwc->regs, DWC3_DSTS); speed = reg & DWC3_DSTS_CONNECTSPD; dwc->speed = speed; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 26ca3c91bc62363faf7fc84bdf7de8ae1ac763c5..87ecf278d42f7889a6ec2702b473f557b7bf108a 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1539,7 +1539,7 @@ static int count_ext_compat(struct usb_configuration *c) return res; } -static void fill_ext_compat(struct usb_configuration *c, u8 *buf) +static int fill_ext_compat(struct usb_configuration *c, u8 *buf) { int i, count; @@ -1566,10 +1566,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf) buf += 23; } count += 24; - if (count >= 4096) - return; + if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ) + return count; } } + + return count; } static int count_ext_prop(struct usb_configuration *c, int interface) @@ -1614,25 +1616,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) struct usb_os_desc *d; struct usb_os_desc_ext_prop *ext_prop; int j, count, n, ret; - u8 *start = buf; f = c->interface[interface]; + count = 10; /* header length */ for (j = 0; j < f->os_desc_n; ++j) { if (interface != f->os_desc_table[j].if_id) continue; d = f->os_desc_table[j].os_desc; if (d) list_for_each_entry(ext_prop, &d->ext_prop, entry) { - /* 4kB minus header length */ - n = buf - start; - if (n >= 4086) - return 0; - - count = ext_prop->data_len + + n = ext_prop->data_len + ext_prop->name_len + 14; - if (count > 4086 - n) - return -EINVAL; - usb_ext_prop_put_size(buf, count); + if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ) + return count; + usb_ext_prop_put_size(buf, n); usb_ext_prop_put_type(buf, ext_prop->type); ret = usb_ext_prop_put_name(buf, ext_prop->name, ext_prop->name_len); @@ -1658,11 +1655,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf) default: return -EINVAL; } - buf += count; + buf += n; + count += n; } } - return 0; + return count; } /* @@ -1949,6 +1947,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) req->complete = composite_setup_complete; buf = req->buf; os_desc_cfg = cdev->os_desc_config; + w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ); memset(buf, 0, w_length); buf[5] = 0x01; switch (ctrl->bRequestType & USB_RECIP_MASK) { @@ -1972,8 +1971,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) count += 16; /* header */ put_unaligned_le32(count, buf); buf += 16; - fill_ext_compat(os_desc_cfg, buf); - value = w_length; + value = fill_ext_compat(os_desc_cfg, buf); + value = min_t(u16, w_length, value); } break; case USB_RECIP_INTERFACE: @@ -2002,8 +2001,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) interface, buf); if (value < 0) return value; - - value = w_length; + value = min_t(u16, w_length, value); } break; } @@ -2286,8 +2284,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, goto end; } - /* OS feature descriptor length <= 4kB */ - cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); + cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ, + GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; usb_ep_free_request(ep0, cdev->os_desc_req); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 705d196657951596c26717e35bff440999e14702..5bbf99dc3208d01bbe9010db3e633d6bb7c366cd 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3537,7 +3537,7 @@ static int ffs_func_setup(struct usb_function *f, ffs_log("exit"); - return 0; + return USB_GADGET_DELAYED_STATUS; } static bool ffs_func_req_match(struct usb_function *f, diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index 96af5de4ee7821103425e054232268c94387e00c..d050dd56d67f00f33844fc9cd9e12623b91405d8 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -556,7 +556,17 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, goto done; } spin_lock_irq(&dev->lock); + if (dev->state == STATE_OFFLINE) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + if (dev->ep_out->desc) { + if (!cdev) { + spin_unlock_irq(&dev->lock); + return -ENODEV; + } + len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count); if (len > MTP_BULK_BUFFER_SIZE) { spin_unlock_irq(&dev->lock); @@ -1267,7 +1277,10 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) mtp_request_free(dev->rx_req[i], dev->ep_out); while ((req = mtp_req_get(dev, &dev->intr_idle))) mtp_request_free(req, dev->ep_intr); + spin_lock_irq(&dev->lock); dev->state = STATE_OFFLINE; + dev->cdev = NULL; + spin_unlock_irq(&dev->lock); kfree(f->os_desc_table); f->os_desc_n = 0; fi_mtp->func_inst.f = NULL; @@ -1323,7 +1336,9 @@ static void mtp_function_disable(struct usb_function *f) struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "mtp_function_disable\n"); + spin_lock_irq(&dev->lock); dev->state = STATE_OFFLINE; + spin_unlock_irq(&dev->lock); usb_ep_disable(dev->ep_in); usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_intr); diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index 994ccd9e2f54d9d6336d2ff53b88b03538b58150..fbc8cc49ae2042efe91b430db00a42f4a94b9238 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -1151,7 +1151,7 @@ static struct usb_function *qdss_alloc(struct usb_function_instance *fi) return &usb_qdss->port.function; } -DECLARE_USB_FUNCTION_INIT(qdss, qdss_alloc_inst, qdss_alloc); +DECLARE_USB_FUNCTION(qdss, qdss_alloc_inst, qdss_alloc); static int __init usb_qdss_init(void) { int ret; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index f05c3f3e6103c61ca322da00fce6d5fc02aa1881..97cb2dfd6369751a5d03f5cbcce78b6e41f54154 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -528,6 +528,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); return ret; } + iad_desc.bFirstInterface = ret; + std_ac_if_desc.bInterfaceNumber = ret; uac2->ac_intf = ret; uac2->ac_alt = 0; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 28cc75b425eb4e303f33bf6b8a0ede9f273ae756..4a0b075af8b09369800540376bf4050f12f3cb49 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -191,8 +191,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request); void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req) { - ep->ops->free_request(ep, req); trace_usb_ep_free_request(ep, req, 0); + ep->ops->free_request(ep, req); } EXPORT_SYMBOL_GPL(usb_ep_free_request); diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index 6f2f71c054be27f66cf56d56012aaf7332d2dad4..7874c112f3fd8d83395c587d787ef433a5e7c9dd 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -1309,7 +1309,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe) { struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); - if (ep->name) + if (ep->ep.name) nuke(ep, -ESHUTDOWN); } @@ -1697,7 +1697,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) curr_ep = get_ep_by_pipe(udc, i); /* If the ep is configured */ - if (curr_ep->name == NULL) { + if (!curr_ep->ep.name) { WARNING("Invalid EP?"); continue; } diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h index 86d2adafe149a1ccfb811584f1284ce331e34ab5..64eb0f2b5ea00c5b68246f37699cfa41abe3cf06 100644 --- a/drivers/usb/gadget/udc/goku_udc.h +++ b/drivers/usb/gadget/udc/goku_udc.h @@ -28,7 +28,7 @@ struct goku_udc_regs { # define INT_EP1DATASET 0x00040 # define INT_EP2DATASET 0x00080 # define INT_EP3DATASET 0x00100 -#define INT_EPnNAK(n) (0x00100 < (n)) /* 0 < n < 4 */ +#define INT_EPnNAK(n) (0x00100 << (n)) /* 0 < n < 4 */ # define INT_EP1NAK 0x00200 # define INT_EP2NAK 0x00400 # define INT_EP3NAK 0x00800 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 1099465b27f08052298b24ab269ae0d97730e0e8..b4599aa428f36c4364e998b81e1d313bef987f86 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -446,7 +446,8 @@ static int ohci_init (struct ohci_hcd *ohci) struct usb_hcd *hcd = ohci_to_hcd(ohci); /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; + if (!(hcd->driver->flags & HCD_LOCAL_MEM)) + hcd->self.sg_tablesize = ~0; if (distrust_firmware) ohci->flags |= OHCI_QUIRK_HUB_POWER; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 6dda3623a276d34f0926ee225a6becee16226a47..e1faee1f860299829bf1beb6ae9ec3105860b63e 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -65,6 +65,23 @@ #define AX_INDXC 0x30 #define AX_DATAC 0x34 +#define PT_ADDR_INDX 0xE8 +#define PT_READ_INDX 0xE4 +#define PT_SIG_1_ADDR 0xA520 +#define PT_SIG_2_ADDR 0xA521 +#define PT_SIG_3_ADDR 0xA522 +#define PT_SIG_4_ADDR 0xA523 +#define PT_SIG_1_DATA 0x78 +#define PT_SIG_2_DATA 0x56 +#define PT_SIG_3_DATA 0x34 +#define PT_SIG_4_DATA 0x12 +#define PT4_P1_REG 0xB521 +#define PT4_P2_REG 0xB522 +#define PT2_P1_REG 0xD520 +#define PT2_P2_REG 0xD521 +#define PT1_P1_REG 0xD522 +#define PT1_P2_REG 0xD523 + #define NB_PCIE_INDX_ADDR 0xe0 #define NB_PCIE_INDX_DATA 0xe4 #define PCIE_P_CNTL 0x10040 @@ -511,6 +528,98 @@ void usb_amd_dev_put(void) } EXPORT_SYMBOL_GPL(usb_amd_dev_put); +/* + * Check if port is disabled in BIOS on AMD Promontory host. + * BIOS Disabled ports may wake on connect/disconnect and need + * driver workaround to keep them disabled. + * Returns true if port is marked disabled. + */ +bool usb_amd_pt_check_port(struct device *device, int port) +{ + unsigned char value, port_shift; + struct pci_dev *pdev; + u16 reg; + + pdev = to_pci_dev(device); + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_1_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_2_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_3_DATA) + return false; + + pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); + + pci_read_config_byte(pdev, PT_READ_INDX, &value); + if (value != PT_SIG_4_DATA) + return false; + + /* Check disabled port setting, if bit is set port is enabled */ + switch (pdev->device) { + case 0x43b9: + case 0x43ba: + /* + * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) + * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 + * PT4_P2_REG bits[6..0] represents ports 13 to 7 + */ + if (port > 6) { + reg = PT4_P2_REG; + port_shift = port - 7; + } else { + reg = PT4_P1_REG; + port_shift = port + 1; + } + break; + case 0x43bb: + /* + * device is AMD_PROMONTORYA_2(0x43bb) + * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 + * PT2_P2_REG bits[5..0] represents ports 9 to 3 + */ + if (port > 2) { + reg = PT2_P2_REG; + port_shift = port - 3; + } else { + reg = PT2_P1_REG; + port_shift = port + 5; + } + break; + case 0x43bc: + /* + * device is AMD_PROMONTORYA_1(0x43bc) + * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 + * PT1_P2_REG[5..0] represents ports 9 to 4 + */ + if (port > 3) { + reg = PT1_P2_REG; + port_shift = port - 4; + } else { + reg = PT1_P1_REG; + port_shift = port + 4; + } + break; + default: + return false; + } + pci_write_config_word(pdev, PT_ADDR_INDX, reg); + pci_read_config_byte(pdev, PT_READ_INDX, &value); + + return !(value & BIT(port_shift)); +} +EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); + /* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b68dcb5dd0fdb0d69b2e2e48cc9f984e58bff56d..4ca0d9b7e463c54766e7881d239f8e6319d89c13 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); +bool usb_amd_pt_check_port(struct device *device, int port); #else struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} @@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} static inline void usb_amd_dev_put(void) {} static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} static inline void sb800_prefetch(struct device *dev, int on) {} +static inline bool usb_amd_pt_check_port(struct device *device, int port) +{ + return false; +} #endif /* CONFIG_USB_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 403446935025b7148cd7cfb87138526e6387b35e..b540f00c85df020226fe8fb90e20632ed5f12f87 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1531,6 +1531,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } + + if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && + (hcd->speed < HCD_USB3)) { + if (usb_amd_pt_check_port(hcd->self.controller, + port_index)) + t2 &= ~PORT_WAKE_BITS; + } } else t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 51600d7004865840196a17cd0bcfd0e71b6c7ca9..1d8b5b9f74c89a7cd07823587271e3fa8459b874 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -926,6 +926,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); + if (dev->udev && dev->udev->slot_id) + dev->udev->slot_id = 0; kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = NULL; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index d79ab0d85924d9c3dd5e13f1342a510193381eed..838d37e79fa22f1e7f22d26847282a04ce907244 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,6 +54,10 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb +#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 static const char hcd_name[] = "xhci_hcd"; @@ -143,6 +147,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if ((pdev->vendor == PCI_VENDOR_ID_AMD) && + ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) + xhci->quirks |= XHCI_U2_DISABLE_WAKE; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 5847a891d0c19052ec1541d364392faed5055818..4fa503a37c7bc34a5496ed779efa9dd8dfa29962 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -441,7 +441,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int ret; /* * xhci_suspend() needs `do_wakeup` to know whether host is allowed @@ -451,12 +450,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) * reconsider this when xhci_plat_suspend enlarges its scope, e.g., * also applies to runtime suspend. */ - ret = xhci_suspend(xhci, device_may_wakeup(dev)); - - if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) - clk_disable_unprepare(xhci->clk); - - return ret; + return xhci_suspend(xhci, device_may_wakeup(dev)); } static int __maybe_unused xhci_plat_resume(struct device *dev) @@ -465,9 +459,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; - if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) - clk_prepare_enable(xhci->clk); - ret = xhci_priv_resume_quirk(hcd); if (ret) return ret; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index af1f539e7a0d86231c703e3382822c8ff5f0aa80..7dbc66fb514f05299c61b27148249c46c7ae84f5 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -4772,6 +4772,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) * quirks */ struct device *dev = hcd->self.sysdev; + unsigned int minor_rev; int retval; /* Accept arbitrarily long scatter-gather lists */ @@ -4799,12 +4800,19 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) */ hcd->has_tt = 1; } else { - /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */ - if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) { - xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); + /* + * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol + * minor revision instead of sbrn + */ + minor_rev = xhci->usb3_rhub.min_rev; + if (minor_rev) { hcd->speed = HCD_USB31; hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; } + xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n", + minor_rev, + minor_rev ? "Enhanced" : ""); + /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 04d618f74ff9dafae82fda7b3fcb41efba2c16fe..f4e046001a268b48dfe24e376e8095b97141effe 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1838,7 +1838,7 @@ struct xhci_hcd { /* For controller with a broken Port Disable implementation */ #define XHCI_BROKEN_PORT_PED (1 << 25) #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) -/* Reserved. It was XHCI_U2_DISABLE_WAKE */ +#define XHCI_U2_DISABLE_WAKE (1 << 27) #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) #define XHCI_SUSPEND_DELAY (1 << 30) diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 297ad1cfcd666195263bb056b738c3a8fb7900da..9321891e4533825e3531ea05668c8c5e6052d3da 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -24,6 +24,7 @@ #include #include #include +#include enum core_ldo_levels { CORE_LEVEL_NONE = 0, @@ -79,6 +80,9 @@ enum core_ldo_levels { #define USB3_MODE BIT(0) /* enables USB3 mode */ #define DP_MODE BIT(1) /* enables DP mode */ +/* USB3 Gen2 link training indicator */ +#define RX_EQUALIZATION_IN_PROGRESS BIT(3) + enum qmp_phy_rev_reg { USB3_PHY_PCS_STATUS, USB3_PHY_AUTONOMOUS_MODE_CTRL, @@ -96,6 +100,9 @@ enum qmp_phy_rev_reg { USB3_DP_COM_TYPEC_CTRL, USB3_DP_COM_SWI_CTRL, USB3_PCS_MISC_CLAMP_ENABLE, + USB3_DP_PCS_PCS_STATUS2, + USB3_DP_PCS_INSIG_SW_CTRL3, + USB3_DP_PCS_INSIG_MX_CTRL3, /* TypeC port select configuration (optional) */ USB3_PHY_PCS_MISC_TYPEC_CTRL, USB3_PHY_REG_MAX, @@ -136,6 +143,7 @@ struct msm_ssphy_qmp { int reg_offset_cnt; u32 *qmp_phy_init_seq; int init_seq_len; + struct hrtimer timer; }; static const struct of_device_id msm_usb_id_table[] = { @@ -634,6 +642,7 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) /* Make sure above write completed with PHY */ wmb(); + hrtimer_cancel(&phy->timer); msm_ssphy_qmp_enable_clks(phy, false); phy->in_suspend = true; msm_ssphy_power_enable(phy, 0); @@ -658,6 +667,74 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) return 0; } +static enum hrtimer_restart timer_fn(struct hrtimer *timer) +{ + struct msm_ssphy_qmp *phy = + container_of(timer, struct msm_ssphy_qmp, timer); + u8 status2, status2_1, sw1, mx1, sw2, mx2; + int timeout = 15000; + + status2_1 = sw1 = sw2 = mx1 = mx2 = 0; + + status2 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]); + if (status2 & RX_EQUALIZATION_IN_PROGRESS) { + while (timeout > 0) { + status2_1 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_PCS_STATUS2]); + if (status2_1 & RX_EQUALIZATION_IN_PROGRESS) { + timeout -= 500; + udelay(500); + continue; + } + + writel_relaxed(0x08, phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]); + writel_relaxed(0x08, phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]); + sw1 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]); + mx1 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]); + udelay(1); + writel_relaxed(0x0, phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]); + writel_relaxed(0x0, phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]); + sw2 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_SW_CTRL3]); + mx2 = readl_relaxed(phy->base + + phy->phy_reg[USB3_DP_PCS_INSIG_MX_CTRL3]); + + break; + } + } + + dev_dbg(phy->phy.dev, + "st=%x st2=%x sw1=%x sw2=%x mx1=%x mx2=%x timeout=%d\n", + status2, status2_1, sw1, sw2, mx1, mx2, timeout); + + hrtimer_forward_now(timer, ms_to_ktime(1)); + + return HRTIMER_RESTART; +} + +static int msm_ssphy_qmp_link_training(struct usb_phy *uphy, bool start) +{ + struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, + phy); + + if (start) { + hrtimer_start(&phy->timer, 0, HRTIMER_MODE_REL); + dev_dbg(uphy->dev, "link training start\n"); + } else { + hrtimer_cancel(&phy->timer); + dev_dbg(uphy->dev, "link training stop\n"); + } + + return 0; +} + static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy, enum usb_device_speed speed) { @@ -680,6 +757,7 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy, phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + hrtimer_cancel(&phy->timer); dev_dbg(uphy->dev, "QMP phy disconnect notification\n"); dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected); phy->cable_connected = false; @@ -979,12 +1057,19 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override")) phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE; + hrtimer_init(&phy->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phy->timer.function = timer_fn; + phy->phy.dev = dev; phy->phy.init = msm_ssphy_qmp_init; phy->phy.set_suspend = msm_ssphy_qmp_set_suspend; phy->phy.notify_connect = msm_ssphy_qmp_notify_connect; phy->phy.notify_disconnect = msm_ssphy_qmp_notify_disconnect; + if (of_property_read_bool(dev->of_node, "qcom,link-training-reset")) + phy->phy.link_training = msm_ssphy_qmp_link_training; + + if (phy->phy.type == USB_PHY_TYPE_USB3_AND_DP) phy->phy.reset = msm_ssphy_qmp_dp_combo_reset; else diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig index eeefa29f8aa2bba09ad77892fb75306668d07fe9..a20b65cb6678f99821d17990cf873b9dbc6b58f5 100644 --- a/drivers/usb/usbip/Kconfig +++ b/drivers/usb/usbip/Kconfig @@ -27,7 +27,7 @@ config USBIP_VHCI_HCD config USBIP_VHCI_HC_PORTS int "Number of ports per USB/IP virtual host controller" - range 1 31 + range 1 15 default 8 depends on USBIP_VHCI_HCD ---help--- diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h index 910f027773aa0ebe696521e14113b04772b7591d..84c0599b45b7bc58e1c60971b15660b17ee1b75c 100644 --- a/drivers/usb/usbip/stub.h +++ b/drivers/usb/usbip/stub.h @@ -87,6 +87,7 @@ struct bus_id_priv { struct stub_device *sdev; struct usb_device *udev; char shutdown_busid; + spinlock_t busid_lock; }; /* stub_priv is allocated from stub_priv_cache */ @@ -97,6 +98,7 @@ extern struct usb_device_driver stub_driver; /* stub_main.c */ struct bus_id_priv *get_busid_priv(const char *busid); +void put_busid_priv(struct bus_id_priv *bid); int del_match_busid(char *busid); void stub_device_cleanup_urbs(struct stub_device *sdev); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index b8915513fc8459e23c3e540f8cfac6b597fa0119..cc847f2edf383d6f650d6e7c0d0ba7bdd6e3d119 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -314,9 +314,9 @@ static int stub_probe(struct usb_device *udev) struct stub_device *sdev = NULL; const char *udev_busid = dev_name(&udev->dev); struct bus_id_priv *busid_priv; - int rc; + int rc = 0; - dev_dbg(&udev->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter probe\n"); /* check we should claim or not by busid_table */ busid_priv = get_busid_priv(udev_busid); @@ -331,13 +331,15 @@ static int stub_probe(struct usb_device *udev) * other matched drivers by the driver core. * See driver_probe_device() in driver/base/dd.c */ - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", udev_busid); - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { @@ -345,13 +347,16 @@ static int stub_probe(struct usb_device *udev) "%s is attached on vhci_hcd... skip!\n", udev_busid); - return -ENODEV; + rc = -ENODEV; + goto call_put_busid_priv; } /* ok, this is my device */ sdev = stub_device_alloc(udev); - if (!sdev) - return -ENOMEM; + if (!sdev) { + rc = -ENOMEM; + goto call_put_busid_priv; + } dev_info(&udev->dev, "usbip-host: register new device (bus %u dev %u)\n", @@ -383,7 +388,9 @@ static int stub_probe(struct usb_device *udev) } busid_priv->status = STUB_BUSID_ALLOC; - return 0; + rc = 0; + goto call_put_busid_priv; + err_files: usb_hub_release_port(udev->parent, udev->portnum, (struct usb_dev_state *) udev); @@ -393,6 +400,9 @@ static int stub_probe(struct usb_device *udev) busid_priv->sdev = NULL; stub_device_free(sdev); + +call_put_busid_priv: + put_busid_priv(busid_priv); return rc; } @@ -418,7 +428,7 @@ static void stub_disconnect(struct usb_device *udev) struct bus_id_priv *busid_priv; int rc; - dev_dbg(&udev->dev, "Enter\n"); + dev_dbg(&udev->dev, "Enter disconnect\n"); busid_priv = get_busid_priv(udev_busid); if (!busid_priv) { @@ -431,7 +441,7 @@ static void stub_disconnect(struct usb_device *udev) /* get stub_device */ if (!sdev) { dev_err(&udev->dev, "could not get device"); - return; + goto call_put_busid_priv; } dev_set_drvdata(&udev->dev, NULL); @@ -446,12 +456,12 @@ static void stub_disconnect(struct usb_device *udev) (struct usb_dev_state *) udev); if (rc) { dev_dbg(&udev->dev, "unable to release port\n"); - return; + goto call_put_busid_priv; } /* If usb reset is called from event handler */ if (usbip_in_eh(current)) - return; + goto call_put_busid_priv; /* shutdown the current connection */ shutdown_busid(busid_priv); @@ -462,12 +472,11 @@ static void stub_disconnect(struct usb_device *udev) busid_priv->sdev = NULL; stub_device_free(sdev); - if (busid_priv->status == STUB_BUSID_ALLOC) { + if (busid_priv->status == STUB_BUSID_ALLOC) busid_priv->status = STUB_BUSID_ADDED; - } else { - busid_priv->status = STUB_BUSID_OTHER; - del_match_busid((char *)udev_busid); - } + +call_put_busid_priv: + put_busid_priv(busid_priv); } #ifdef CONFIG_PM diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index b59a253a8479dbbb97a5604e7402b87f57a3cd71..108dd65fbfbc580e5c6c9365cfc83a117ffbec3d 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -28,6 +28,7 @@ #define DRIVER_DESC "USB/IP Host Driver" struct kmem_cache *stub_priv_cache; + /* * busid_tables defines matching busids that usbip can grab. A user can change * dynamically what device is locally used and what device is exported to a @@ -39,6 +40,8 @@ static spinlock_t busid_table_lock; static void init_busid_table(void) { + int i; + /* * This also sets the bus_table[i].status to * STUB_BUSID_OTHER, which is 0. @@ -46,6 +49,9 @@ static void init_busid_table(void) memset(busid_table, 0, sizeof(busid_table)); spin_lock_init(&busid_table_lock); + + for (i = 0; i < MAX_BUSID; i++) + spin_lock_init(&busid_table[i].busid_lock); } /* @@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid) int i; int idx = -1; - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (busid_table[i].name[0]) if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { idx = i; + spin_unlock(&busid_table[i].busid_lock); break; } + spin_unlock(&busid_table[i].busid_lock); + } return idx; } +/* Returns holding busid_lock. Should call put_busid_priv() to unlock */ struct bus_id_priv *get_busid_priv(const char *busid) { int idx; @@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid) spin_lock(&busid_table_lock); idx = get_busid_idx(busid); - if (idx >= 0) + if (idx >= 0) { bid = &(busid_table[idx]); + /* get busid_lock before returning */ + spin_lock(&bid->busid_lock); + } spin_unlock(&busid_table_lock); return bid; } +void put_busid_priv(struct bus_id_priv *bid) +{ + if (bid) + spin_unlock(&bid->busid_lock); +} + static int add_match_busid(char *busid) { int i; @@ -92,15 +112,19 @@ static int add_match_busid(char *busid) goto out; } - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (!busid_table[i].name[0]) { strlcpy(busid_table[i].name, busid, BUSID_SIZE); if ((busid_table[i].status != STUB_BUSID_ALLOC) && (busid_table[i].status != STUB_BUSID_REMOV)) busid_table[i].status = STUB_BUSID_ADDED; ret = 0; + spin_unlock(&busid_table[i].busid_lock); break; } + spin_unlock(&busid_table[i].busid_lock); + } out: spin_unlock(&busid_table_lock); @@ -121,6 +145,8 @@ int del_match_busid(char *busid) /* found */ ret = 0; + spin_lock(&busid_table[idx].busid_lock); + if (busid_table[idx].status == STUB_BUSID_OTHER) memset(busid_table[idx].name, 0, BUSID_SIZE); @@ -128,6 +154,7 @@ int del_match_busid(char *busid) (busid_table[idx].status != STUB_BUSID_ADDED)) busid_table[idx].status = STUB_BUSID_REMOV; + spin_unlock(&busid_table[idx].busid_lock); out: spin_unlock(&busid_table_lock); @@ -140,9 +167,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf) char *out = buf; spin_lock(&busid_table_lock); - for (i = 0; i < MAX_BUSID; i++) + for (i = 0; i < MAX_BUSID; i++) { + spin_lock(&busid_table[i].busid_lock); if (busid_table[i].name[0]) out += sprintf(out, "%s ", busid_table[i].name); + spin_unlock(&busid_table[i].busid_lock); + } spin_unlock(&busid_table_lock); out += sprintf(out, "\n"); @@ -183,6 +213,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf, } static DRIVER_ATTR_RW(match_busid); +static int do_rebind(char *busid, struct bus_id_priv *busid_priv) +{ + int ret; + + /* device_attach() callers should hold parent lock for USB */ + if (busid_priv->udev->dev.parent) + device_lock(busid_priv->udev->dev.parent); + ret = device_attach(&busid_priv->udev->dev); + if (busid_priv->udev->dev.parent) + device_unlock(busid_priv->udev->dev.parent); + if (ret < 0) { + dev_err(&busid_priv->udev->dev, "rebind failed\n"); + return ret; + } + return 0; +} + +static void stub_device_rebind(void) +{ +#if IS_MODULE(CONFIG_USBIP_HOST) + struct bus_id_priv *busid_priv; + int i; + + /* update status to STUB_BUSID_OTHER so probe ignores the device */ + spin_lock(&busid_table_lock); + for (i = 0; i < MAX_BUSID; i++) { + if (busid_table[i].name[0] && + busid_table[i].shutdown_busid) { + busid_priv = &(busid_table[i]); + busid_priv->status = STUB_BUSID_OTHER; + } + } + spin_unlock(&busid_table_lock); + + /* now run rebind - no need to hold locks. driver files are removed */ + for (i = 0; i < MAX_BUSID; i++) { + if (busid_table[i].name[0] && + busid_table[i].shutdown_busid) { + busid_priv = &(busid_table[i]); + do_rebind(busid_table[i].name, busid_priv); + } + } +#endif +} + static ssize_t rebind_store(struct device_driver *dev, const char *buf, size_t count) { @@ -200,16 +275,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf, if (!bid) return -ENODEV; - /* device_attach() callers should hold parent lock for USB */ - if (bid->udev->dev.parent) - device_lock(bid->udev->dev.parent); - ret = device_attach(&bid->udev->dev); - if (bid->udev->dev.parent) - device_unlock(bid->udev->dev.parent); - if (ret < 0) { - dev_err(&bid->udev->dev, "rebind failed\n"); + /* mark the device for deletion so probe ignores it during rescan */ + bid->status = STUB_BUSID_OTHER; + /* release the busid lock */ + put_busid_priv(bid); + + ret = do_rebind((char *) buf, bid); + if (ret < 0) return ret; - } + + /* delete device from busid_table */ + del_match_busid((char *) buf); return count; } @@ -331,6 +407,9 @@ static void __exit usbip_host_exit(void) */ usb_deregister_device_driver(&stub_driver); + /* initiate scan to attach devices */ + stub_device_rebind(); + kmem_cache_destroy(stub_priv_cache); } diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c index 14400fa3162db84e855e0c7c77266936cd8784a8..46c77a83b6f170eee319eab3432f56a2aa46e2a4 100644 --- a/drivers/video/backlight/qcom-spmi-wled.c +++ b/drivers/video/backlight/qcom-spmi-wled.c @@ -19,12 +19,16 @@ #include #include #include +#include #include #include #include #include #include +#include #include +#include +#include "../../leds/leds.h" /* General definitions */ #define WLED_DEFAULT_BRIGHTNESS 2048 @@ -96,24 +100,30 @@ #define WLED_SINK_BRIGHT_LSB_REG(n) (0x57 + (n * 0x10)) #define WLED_SINK_BRIGHT_MSB_REG(n) (0x58 + (n * 0x10)) -enum wled_version { - WLED_PMI8998 = 4, - WLED_PM660L, - WLED_PM8150L, -}; +/* WLED5 specific control registers */ +#define WLED5_CTRL_STATUS 0x07 -static const int version_table[] = { - [0] = WLED_PMI8998, - [1] = WLED_PM660L, - [2] = WLED_PM8150L, -}; +#define WLED5_CTRL_SH_FOR_SOFTSTART_REG 0x58 +#define WLED5_SOFTSTART_EN_SH_SS BIT(0) -/* WLED5 specific control registers */ #define WLED5_CTRL_OVP_INT_CTL_REG 0x5f #define WLED5_OVP_INT_N_MASK GENMASK(6, 4) #define WLED5_OVP_INT_N_SHIFT 4 #define WLED5_OVP_INT_TIMER_MASK GENMASK(2, 0) +#define WLED5_CTRL_PRE_FLASH_BRT_REG 0x61 +#define WLED5_CTRL_PRE_FLASH_SYNC_REG 0x62 +#define WLED5_CTRL_FLASH_BRT_REG 0x63 +#define WLED5_CTRL_FLASH_SYNC_REG 0x64 + +#define WLED5_CTRL_FLASH_STEP_CTL_REG 0x65 +#define WLED5_CTRL_FLASH_STEP_MASK GENMASK(2, 0) + +#define WLED5_CTRL_FLASH_HDRM_REG 0x69 + +#define WLED5_CTRL_TEST4_REG 0xe5 +#define WLED5_TEST4_EN_SH_SS BIT(5) + /* WLED5 specific sink registers */ #define WLED5_SINK_MOD_A_EN_REG 0x50 #define WLED5_SINK_MOD_B_EN_REG 0x60 @@ -149,6 +159,45 @@ static const int version_table[] = { #define WLED5_SINK_SRC_SEL_MODB 1 #define WLED5_SINK_SRC_SEL_MASK GENMASK(1, 0) +#define WLED5_SINK_FLASH_CTL_REG 0xb0 +#define WLED5_SINK_FLASH_EN BIT(7) +#define WLED5_SINK_PRE_FLASH_EN BIT(6) + +#define WLED5_SINK_FLASH_SINK_EN_REG 0xb1 + +#define WLED5_SINK_FLASH_FSC_REG 0xb2 +#define WLED5_SINK_FLASH_FSC_MASK GENMASK(3, 0) + +#define WLED5_SINK_FLASH_SYNC_BIT_REG 0xb3 +#define WLED5_SINK_FLASH_FSC_SYNC_EN BIT(0) + +#define WLED5_SINK_FLASH_TIMER_CTL_REG 0xb5 +#define WLED5_DIS_PRE_FLASH_TIMER BIT(7) +#define WLED5_PRE_FLASH_SAFETY_TIME GENMASK(6, 4) +#define WLED5_PRE_FLASH_SAFETY_SHIFT 4 +#define WLED5_DIS_FLASH_TIMER BIT(3) +#define WLED5_FLASH_SAFETY_TIME GENMASK(2, 0) + +#define WLED5_SINK_FLASH_SHDN_CLR_REG 0xb6 + +enum wled_version { + WLED_PMI8998 = 4, + WLED_PM660L, + WLED_PM8150L, +}; + +enum wled_flash_mode { + WLED_FLASH_OFF, + WLED_PRE_FLASH, + WLED_FLASH, +}; + +static const int version_table[] = { + [0] = WLED_PMI8998, + [1] = WLED_PM660L, + [2] = WLED_PM8150L, +}; + struct wled_config { int boost_i_limit; int ovp; @@ -162,6 +211,12 @@ struct wled_config { bool auto_calib_enabled; }; +struct wled_flash_config { + int fs_current; + int step_delay; + int safety_timer; +}; + struct wled { const char *name; struct platform_device *pdev; @@ -180,12 +235,23 @@ struct wled { const int *version; int sc_irq; int ovp_irq; + int flash_irq; + int pre_flash_irq; bool prev_state; bool ovp_irq_disabled; bool auto_calib_done; bool force_mod_disable; bool cabc_disabled; int (*cabc_config)(struct wled *wled, bool enable); + + struct led_classdev flash_cdev; + struct led_classdev torch_cdev; + struct led_classdev switch_cdev; + struct wled_flash_config fparams; + struct wled_flash_config tparams; + spinlock_t flash_lock; + enum wled_flash_mode flash_mode; + u8 num_strings; }; enum wled5_mod_sel { @@ -284,6 +350,54 @@ static int wled_sync_toggle(struct wled *wled) return rc; } +static int wled5_sample_hold_control(struct wled *wled, u16 brightness, + bool enable) +{ + int rc; + u16 offset, threshold; + u8 val, mask; + + /* + * Control S_H only when module was disabled and a lower brightness + * of < 1% is set. + */ + if (wled->prev_state) + return 0; + + /* If CABC is enabled, then don't do anything for now */ + if (!wled->cabc_disabled) + return 0; + + /* 1 % threshold to enable the workaround */ + threshold = DIV_ROUND_UP(wled->max_brightness, 100); + + /* If brightness is > 1%, don't do anything */ + if (brightness > threshold) + return 0; + + /* Wait for ~5ms before enabling S_H */ + if (enable) + usleep_range(5000, 5010); + + /* Disable S_H if brightness is < 1% */ + if (wled->pmic_rev_id->rev4 == PM8150L_V3P0_REV4) { + offset = WLED5_CTRL_SH_FOR_SOFTSTART_REG; + val = enable ? WLED5_SOFTSTART_EN_SH_SS : 0; + mask = WLED5_SOFTSTART_EN_SH_SS; + } else { + offset = WLED5_CTRL_TEST4_REG; + val = enable ? WLED5_TEST4_EN_SH_SS : 0; + mask = WLED5_TEST4_EN_SH_SS; + } + + rc = regmap_update_bits(wled->regmap, + wled->ctrl_addr + offset, mask, val); + if (rc < 0) + pr_err("Error in writing offset 0x%02X rc=%d\n", offset, rc); + + return rc; +} + static int wled5_set_brightness(struct wled *wled, u16 brightness) { int rc, offset; @@ -376,6 +490,15 @@ static int wled_update_status(struct backlight_device *bl) goto unlock_mutex; } + if (is_wled5(wled)) { + rc = wled5_sample_hold_control(wled, brightness, false); + if (rc < 0) { + pr_err("wled disabling sample and hold failed rc:%d\n", + rc); + goto unlock_mutex; + } + } + if (!!brightness != wled->prev_state) { rc = wled_module_enable(wled, !!brightness); if (rc < 0) { @@ -383,6 +506,15 @@ static int wled_update_status(struct backlight_device *bl) goto unlock_mutex; } } + + if (is_wled5(wled)) { + rc = wled5_sample_hold_control(wled, brightness, true); + if (rc < 0) { + pr_err("wled enabling sample and hold failed rc:%d\n", + rc); + goto unlock_mutex; + } + } } else { rc = wled_module_enable(wled, brightness); if (rc < 0) { @@ -886,6 +1018,42 @@ static irqreturn_t wled_ovp_irq_handler(int irq, void *_wled) return IRQ_HANDLED; } +static irqreturn_t wled_flash_irq_handler(int irq, void *_wled) +{ + struct wled *wled = _wled; + int rc; + u32 val; + + if (irq == wled->flash_irq) + pr_debug("flash irq fired\n"); + else if (irq == wled->pre_flash_irq) + pr_debug("pre_flash irq fired\n"); + + rc = regmap_read(wled->regmap, + wled->ctrl_addr + WLED_CTRL_FAULT_STATUS, &val); + if (!rc) + pr_debug("WLED_FAULT_STATUS: 0x%x\n", val); + + rc = regmap_read(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_STATUS, &val); + if (!rc) + pr_debug("WLED_STATUS: 0x%x\n", val); + + return IRQ_HANDLED; +} + +static inline u8 get_wled_safety_time(int time_ms) +{ + int i, table[8] = {50, 100, 200, 400, 600, 800, 1000, 1200}; + + for (i = 0; i < ARRAY_SIZE(table); i++) { + if (time_ms == table[i]) + return i; + } + + return 0; +} + static int wled5_setup(struct wled *wled) { int rc, temp, i; @@ -1001,6 +1169,24 @@ static int wled5_setup(struct wled *wled) } } + if (wled->flash_irq >= 0) { + rc = devm_request_threaded_irq(&wled->pdev->dev, + wled->flash_irq, NULL, wled_flash_irq_handler, + IRQF_ONESHOT, "wled_flash_irq", wled); + if (rc < 0) + pr_err("Unable to request flash(%d) IRQ(err:%d)\n", + wled->flash_irq, rc); + } + + if (wled->pre_flash_irq >= 0) { + rc = devm_request_threaded_irq(&wled->pdev->dev, + wled->pre_flash_irq, NULL, + wled_flash_irq_handler, IRQF_ONESHOT, + "wled_pre_flash_irq", wled); + if (rc < 0) + pr_err("Unable to request pre_flash(%d) IRQ(err:%d)\n", + wled->pre_flash_irq, rc); + } return 0; } @@ -1249,6 +1435,527 @@ static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx) return idx; } +static int wled_get_max_current(struct led_classdev *led_cdev, + int *max_current) +{ + struct wled *wled; + bool flash; + + if (!strcmp(led_cdev->name, "wled_flash")) { + wled = container_of(led_cdev, struct wled, flash_cdev); + flash = true; + } else if (!strcmp(led_cdev->name, "wled_torch")) { + wled = container_of(led_cdev, struct wled, torch_cdev); + flash = false; + } else { + return -ENODEV; + } + + if (flash) + *max_current = wled->flash_cdev.max_brightness; + else + *max_current = wled->torch_cdev.max_brightness; + + return 0; +} + +static int wled_get_max_avail_current(struct led_classdev *led_cdev, + int *max_current) +{ + struct wled *wled; + + if (!strcmp(led_cdev->name, "wled_switch")) + wled = container_of(led_cdev, struct wled, switch_cdev); + else + return -ENODEV; + + /* + * For now, return the max brightness. Later this will be replaced with + * the available current predicted based on battery parameters. + */ + + *max_current = max(wled->flash_cdev.max_brightness, + wled->torch_cdev.max_brightness); + return 0; +} + +static ssize_t wled_flash_max_avail_current_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + int rc, max_current = 0; + + rc = wled_get_max_avail_current(led_cdev, &max_current); + if (rc < 0) + pr_err("query max current failed, rc=%d\n", rc); + + return snprintf(buf, PAGE_SIZE, "%d\n", max_current); +} + +static struct device_attribute wled_flash_attrs[] = { + __ATTR(max_avail_current, 0664, wled_flash_max_avail_current_show, + NULL), +}; + +int wled_flash_led_prepare(struct led_trigger *trig, int options, + int *max_current) +{ + struct led_classdev *led_cdev; + int rc; + + if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) { + pr_err("Invalid options %d\n", options); + return -EINVAL; + } + + if (!trig) { + pr_err("Invalid led_trigger provided\n"); + return -EINVAL; + } + + led_cdev = trigger_to_lcdev(trig); + if (!led_cdev) { + pr_err("Invalid led_cdev in trigger %s\n", trig->name); + return -EINVAL; + } + + switch (options) { + case QUERY_MAX_CURRENT: + rc = wled_get_max_current(led_cdev, max_current); + if (rc < 0) { + pr_err("Error in getting max_current for %s\n", + led_cdev->name); + return rc; + } + case QUERY_MAX_AVAIL_CURRENT: + rc = wled_get_max_avail_current(led_cdev, max_current); + if (rc < 0) { + pr_err("Error in getting max_avail_current for %s\n", + led_cdev->name); + return rc; + } + break; + case ENABLE_REGULATOR: + case DISABLE_REGULATOR: + /* Not supported */ + return 0; + default: + return -EINVAL; + }; + + return 0; +} +EXPORT_SYMBOL(wled_flash_led_prepare); + +static int wled_flash_set_step_delay(struct wled *wled, int step_delay) +{ + int rc, table[8] = {50, 100, 150, 200, 250, 300, 350, 400}; + u8 val; + + if (step_delay < table[0]) + val = 0; + else if (step_delay > table[7]) + val = 7; + else + val = DIV_ROUND_CLOSEST(step_delay, 50) - 1; + + rc = regmap_update_bits(wled->regmap, wled->ctrl_addr + + WLED5_CTRL_FLASH_STEP_CTL_REG, + WLED5_CTRL_FLASH_STEP_MASK, val); + if (rc < 0) + pr_err("Error in configuring step delay, rc:%d\n", rc); + + return rc; +} + +static int wled_flash_set_fsc(struct wled *wled, enum led_brightness brightness, + int fs_current_max) +{ + int rc, fs_current; + u8 val; + + if (!wled->num_strings) { + pr_err("Incorrect number of strings\n"); + return -EINVAL; + } + + fs_current = (int)brightness / wled->num_strings; + if (fs_current > fs_current_max) + fs_current = fs_current_max; + + /* Each LSB is 5 mA */ + val = DIV_ROUND_CLOSEST(fs_current, 5); + rc = regmap_update_bits(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_FSC_REG, + WLED5_SINK_FLASH_FSC_MASK, val); + if (rc < 0) { + pr_err("Error in configuring flash_fsc, rc:%d\n", rc); + return rc; + } + + /* Write 0 followed by 1 to sync FSC */ + rc = regmap_write(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_SYNC_BIT_REG, 0); + if (rc < 0) { + pr_err("Error in configuring flash_sync, rc:%d\n", rc); + return rc; + } + + rc = regmap_write(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_SYNC_BIT_REG, + WLED5_SINK_FLASH_FSC_SYNC_EN); + if (rc < 0) + pr_err("Error in configuring flash_sync, rc:%d\n", rc); + + return rc; +} + +static void wled_flash_brightness_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct wled *wled = container_of(cdev, struct wled, flash_cdev); + int rc; + + spin_lock(&wled->flash_lock); + if (brightness) { + rc = wled_flash_set_step_delay(wled, wled->fparams.step_delay); + if (rc < 0) + goto out; + } + + rc = wled_flash_set_fsc(wled, brightness, wled->fparams.fs_current); + if (rc < 0) + goto out; + + wled->flash_mode = brightness ? WLED_FLASH : WLED_FLASH_OFF; +out: + spin_unlock(&wled->flash_lock); +} + +static void wled_torch_brightness_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct wled *wled = container_of(cdev, struct wled, torch_cdev); + int rc; + + spin_lock(&wled->flash_lock); + if (brightness) { + rc = wled_flash_set_step_delay(wled, wled->tparams.step_delay); + if (rc < 0) + goto out; + } + + rc = wled_flash_set_fsc(wled, brightness, wled->tparams.fs_current); + if (rc < 0) + goto out; + + wled->flash_mode = brightness ? WLED_PRE_FLASH : WLED_FLASH_OFF; +out: + spin_unlock(&wled->flash_lock); +} + +static void wled_switch_brightness_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct wled *wled = container_of(cdev, struct wled, switch_cdev); + int rc; + u8 val; + + if (brightness && wled->flash_mode == WLED_FLASH_OFF) + return; + + spin_lock(&wled->flash_lock); + if (wled->flash_mode == WLED_PRE_FLASH) + val = brightness ? WLED5_SINK_PRE_FLASH_EN : 0; + else + val = brightness ? WLED5_SINK_FLASH_EN : 0; + + rc = regmap_write(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_CTL_REG, val); + if (rc < 0) + pr_err("Error in configuring flash_ctl, rc:%d\n", rc); + + if (!brightness) { + rc = regmap_write(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_SHDN_CLR_REG, + 1); + if (rc < 0) + pr_err("Error in configuring flash_shdn_clr, rc:%d\n", + rc); + } + + spin_unlock(&wled->flash_lock); +} + +static int wled_flash_device_register(struct wled *wled) +{ + int rc, i, max_brightness = 0; + + /* Not supported */ + if (is_wled4(wled)) + return 0; + + spin_lock_init(&wled->flash_lock); + + /* flash */ + for (i = 0; (wled->cfg.string_cfg >> i) != 0; i++) + max_brightness += wled->fparams.fs_current; + + wled->flash_cdev.name = "wled_flash"; + wled->flash_cdev.max_brightness = max_brightness; + wled->flash_cdev.brightness_set = wled_flash_brightness_set; + rc = devm_led_classdev_register(&wled->pdev->dev, &wled->flash_cdev); + if (rc < 0) + return rc; + + /* torch */ + for (max_brightness = 0, i = 0; (wled->cfg.string_cfg >> i) != 0; i++) + max_brightness += wled->tparams.fs_current; + + wled->torch_cdev.name = "wled_torch"; + wled->torch_cdev.max_brightness = max_brightness; + wled->torch_cdev.brightness_set = wled_torch_brightness_set; + rc = devm_led_classdev_register(&wled->pdev->dev, &wled->torch_cdev); + if (rc < 0) + return rc; + + /* switch */ + wled->switch_cdev.name = "wled_switch"; + wled->switch_cdev.brightness_set = wled_switch_brightness_set; + wled->switch_cdev.flags |= LED_KEEP_TRIGGER; + rc = devm_led_classdev_register(&wled->pdev->dev, &wled->switch_cdev); + if (rc < 0) + return rc; + + for (i = 0; i < ARRAY_SIZE(wled_flash_attrs); i++) { + rc = sysfs_create_file(&wled->switch_cdev.dev->kobj, + &wled_flash_attrs[i].attr); + if (rc < 0) { + pr_err("sysfs creation failed, rc=%d\n", rc); + goto sysfs_fail; + } + } + + return 0; + +sysfs_fail: + for (--i; i >= 0; i--) + sysfs_remove_file(&wled->switch_cdev.dev->kobj, + &wled_flash_attrs[i].attr); + + return rc; +} + +static int wled_flash_configure(struct wled *wled) +{ + int rc; + struct device_node *temp; + struct device *dev = &wled->pdev->dev; + const char *cdev_name; + + /* Not supported */ + if (is_wled4(wled)) + return 0; + + for_each_available_child_of_node(wled->pdev->dev.of_node, temp) { + rc = of_property_read_string(temp, "label", &cdev_name); + if (rc < 0) + continue; + + if (!strcmp(cdev_name, "flash")) { + /* Value read in mA */ + wled->fparams.fs_current = 40; + rc = of_property_read_u32(temp, "qcom,wled-flash-fsc", + &wled->fparams.fs_current); + if (!rc) { + if (wled->fparams.fs_current <= 0 || + wled->fparams.fs_current > 60) { + dev_err(dev, "Incorrect WLED flash FSC rc:%d\n", + rc); + return rc; + } + } + + /* + * As per the hardware recommendation, FS current should + * be limited to 20 mA for PM8150L v1.0. + */ + if (wled->pmic_rev_id->rev4 == PM8150L_V1P0_REV4) + wled->fparams.fs_current = 20; + + /* Value read in us */ + wled->fparams.step_delay = 200; + rc = of_property_read_u32(temp, "qcom,wled-flash-step", + &wled->fparams.step_delay); + if (!rc) { + if (wled->fparams.step_delay < 50 || + wled->fparams.step_delay > 400) { + dev_err(dev, "Incorrect WLED flash step delay rc:%d\n", + rc); + return rc; + } + } + + /* Value read in ms */ + wled->fparams.safety_timer = 100; + rc = of_property_read_u32(temp, "qcom,wled-flash-timer", + &wled->fparams.safety_timer); + if (!rc) { + if (wled->fparams.safety_timer < 50 || + wled->fparams.safety_timer > 1200) { + dev_err(dev, "Incorrect WLED flash safety time rc:%d\n", + rc); + return rc; + } + } + + rc = of_property_read_string(temp, + "qcom,default-led-trigger", + &wled->flash_cdev.default_trigger); + if (rc < 0) + wled->flash_cdev.default_trigger = "wled_flash"; + } else if (!strcmp(cdev_name, "torch")) { + /* Value read in mA */ + wled->tparams.fs_current = 30; + rc = of_property_read_u32(temp, "qcom,wled-torch-fsc", + &wled->tparams.fs_current); + if (!rc) { + if (wled->tparams.fs_current <= 0 || + wled->tparams.fs_current > 60) { + dev_err(dev, "Incorrect WLED torch FSC rc:%d\n", + rc); + return rc; + } + } + + /* + * As per the hardware recommendation, FS current should + * be limited to 20 mA for PM8150L v1.0. + */ + if (wled->pmic_rev_id->rev4 == PM8150L_V1P0_REV4) + wled->tparams.fs_current = 20; + + /* Value read in us */ + wled->tparams.step_delay = 200; + rc = of_property_read_u32(temp, "qcom,wled-torch-step", + &wled->tparams.step_delay); + if (!rc) { + if (wled->tparams.step_delay < 50 || + wled->tparams.step_delay > 400) { + dev_err(dev, "Incorrect WLED torch step delay rc:%d\n", + rc); + return rc; + } + } + + /* Value read in ms */ + wled->tparams.safety_timer = 600; + rc = of_property_read_u32(temp, "qcom,wled-torch-timer", + &wled->tparams.safety_timer); + if (!rc) { + if (wled->tparams.safety_timer < 50 || + wled->tparams.safety_timer > 1200) { + dev_err(dev, "Incorrect WLED torch safety time rc:%d\n", + rc); + return rc; + } + } + + rc = of_property_read_string(temp, + "qcom,default-led-trigger", + &wled->torch_cdev.default_trigger); + if (rc < 0) + wled->torch_cdev.default_trigger = "wled_torch"; + } else if (!strcmp(cdev_name, "switch")) { + rc = of_property_read_string(temp, + "qcom,default-led-trigger", + &wled->switch_cdev.default_trigger); + if (rc < 0) + wled->switch_cdev.default_trigger = + "wled_switch"; + } else { + return -EINVAL; + } + } + + return 0; +} + +static int wled_flash_setup(struct wled *wled) +{ + int rc, i; + u8 val; + + /* Not supported */ + if (is_wled4(wled)) + return 0; + + /* Set FLASH_VREF_ADIM_HDIM to maximum */ + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_FLASH_HDRM_REG, 0xF); + if (rc < 0) + return rc; + + /* Write a full brightness value */ + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_PRE_FLASH_BRT_REG, 0xFF); + if (rc < 0) + return rc; + + /* Sync the brightness by writing a 0 followed by 1 */ + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_PRE_FLASH_SYNC_REG, 0); + if (rc < 0) + return rc; + + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_PRE_FLASH_SYNC_REG, 1); + if (rc < 0) + return rc; + + /* Write a full brightness value */ + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_FLASH_BRT_REG, 0xFF); + if (rc < 0) + return rc; + + /* Sync the brightness by writing a 0 followed by 1 */ + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_FLASH_SYNC_REG, 0); + if (rc < 0) + return rc; + + rc = regmap_write(wled->regmap, + wled->ctrl_addr + WLED5_CTRL_FLASH_SYNC_REG, 1); + if (rc < 0) + return rc; + + for (val = 0, i = 0; (wled->cfg.string_cfg >> i) != 0; i++) { + if (wled->cfg.string_cfg & BIT(i)) { + val |= 1 << (i + WLED_SINK_CURR_SINK_SHFT); + wled->num_strings++; + } + } + + /* Enable current sinks for flash */ + rc = regmap_update_bits(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_SINK_EN_REG, + WLED_SINK_CURR_SINK_MASK, val); + if (rc < 0) + return rc; + + /* Enable flash and pre_flash safety timers */ + val = get_wled_safety_time(wled->tparams.safety_timer) << + WLED5_PRE_FLASH_SAFETY_SHIFT; + val |= get_wled_safety_time(wled->fparams.safety_timer); + rc = regmap_write(wled->regmap, + wled->sink_addr + WLED5_SINK_FLASH_TIMER_CTL_REG, val); + if (rc < 0) + return rc; + + return 0; +} + static int wled_configure(struct wled *wled, struct device *dev) { struct wled_config *cfg = &wled->cfg; @@ -1427,6 +2134,15 @@ static int wled_configure(struct wled *wled, struct device *dev) if (wled->ovp_irq < 0) dev_dbg(&wled->pdev->dev, "ovp irq is not used\n"); + wled->flash_irq = platform_get_irq_byname(wled->pdev, "flash-irq"); + if (wled->flash_irq < 0) + dev_dbg(&wled->pdev->dev, "flash irq is not used\n"); + + wled->pre_flash_irq = platform_get_irq_byname(wled->pdev, + "pre-flash-irq"); + if (wled->pre_flash_irq < 0) + dev_dbg(&wled->pdev->dev, "pre_flash irq is not used\n"); + return 0; } @@ -1465,7 +2181,13 @@ static int wled_probe(struct platform_device *pdev) rc = wled_configure(wled, &pdev->dev); if (rc < 0) { - pr_err("wled configure failed rc:%d\n", rc); + dev_err(&pdev->dev, "wled configure failed rc:%d\n", rc); + return rc; + } + + rc = wled_flash_configure(wled); + if (rc < 0) { + dev_err(&pdev->dev, "wled configure failed rc:%d\n", rc); return rc; } @@ -1494,7 +2216,7 @@ static int wled_probe(struct platform_device *pdev) else rc = wled5_setup(wled); if (rc < 0) { - pr_err("wled setup failed rc:%d\n", rc); + dev_err(&pdev->dev, "wled setup failed rc:%d\n", rc); return rc; } @@ -1508,7 +2230,27 @@ static int wled_probe(struct platform_device *pdev) bl = devm_backlight_device_register(&pdev->dev, wled->name, &pdev->dev, wled, &wled_ops, &props); - return PTR_ERR_OR_ZERO(bl); + if (IS_ERR_OR_NULL(bl)) { + rc = PTR_ERR_OR_ZERO(bl); + if (!rc) + rc = -ENODEV; + dev_err(&pdev->dev, "failed to register backlight rc:%d\n", rc); + return rc; + } + + rc = wled_flash_device_register(wled); + if (rc < 0) { + dev_err(&pdev->dev, "failed to register WLED flash/torch rc:%d\n", + rc); + return rc; + } + + rc = wled_flash_setup(wled); + if (rc < 0) + dev_err(&pdev->dev, "failed to setup WLED flash/torch rc:%d\n", + rc); + + return rc; } static const struct of_device_id wled_match_table[] = { diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index d7fbe57a963f74b1e231647977768bb7e59d64d7..a122e8ccae38be4767ec83c83a7e565e031def61 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -123,18 +123,6 @@ static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd, static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd, int type); -int of_led_classdev_register(struct device *parent, struct device_node *np, - struct led_classdev *led_cdev) -{ - /* Stub out till LEDS CLASS defconfig is enabled */ - return 0; -} - -void led_classdev_unregister(struct led_classdev *led_cdev) -{ - /* Stub out till LEDS CLASS defconfig is enabled */ -} - void mdss_fb_no_update_notify_timer_cb(unsigned long data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; @@ -1341,10 +1329,7 @@ static int mdss_fb_probe(struct platform_device *pdev) if (!lcd_backlight_registered) { backlight_led.brightness = mfd->panel_info->brightness_max; backlight_led.max_brightness = mfd->panel_info->brightness_max; - if (led_classdev_register(&pdev->dev, &backlight_led)) - pr_err("led_classdev_register failed\n"); - else - lcd_backlight_registered = 1; + lcd_backlight_registered = 1; } mdss_fb_init_panel_modes(mfd, pdata); @@ -1441,7 +1426,6 @@ static int mdss_fb_remove(struct platform_device *pdev) if (lcd_backlight_registered) { lcd_backlight_registered = 0; - led_classdev_unregister(&backlight_led); } return 0; @@ -2171,14 +2155,6 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size) } if (mfd->mdp.fb_mem_get_iommu_domain) { - fd = dma_buf_fd(mfd->fbmem_buf, 0); - if (IS_ERR(mfd->fbmem_buf)) { - rc = PTR_ERR(mfd->fbmem_buf); - goto fb_mmap_failed; - } - - mfd->fbmem_buf = dma_buf_get(fd); - domain = mfd->mdp.fb_mem_get_iommu_domain(); mfd->fb_attachment = mdss_smmu_dma_buf_attach(mfd->fbmem_buf, diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index fd99a0e2ad2d651297e7c0be926169c4802c9ed4..ad87a5eb425b524b6c2fd6ffa4b1416f3cad130a 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -276,12 +276,6 @@ static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev); static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev); static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev); -static int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) -{ - /* This function doesn't seem to be applicbale for 4.14 */ - return 0; -} - static inline u32 is_mdp_irq_enabled(void) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c index 35843afbe05dd0d23180123b564046492be169f1..f2731355e524d6941a0443bc85fb1f369d159883 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c +++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c @@ -39,8 +39,6 @@ static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd, struct msm_fb_splash_info *sinfo; unsigned long buf_size = size; struct mdss_data_type *mdata; - struct dma_buf *dma_buf = NULL; - int fd = 0; if (!mfd || !size) return -EINVAL; @@ -51,19 +49,10 @@ static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd, if (!mdata || sinfo->splash_buffer) return -EINVAL; - dma_buf = ion_alloc(size, ION_HEAP(ION_SYSTEM_HEAP_ID), 0); - if (IS_ERR_OR_NULL(dma_buf)) { + sinfo->dma_buf = ion_alloc(size, ION_HEAP(ION_SYSTEM_HEAP_ID), 0); + if (IS_ERR_OR_NULL(sinfo->dma_buf)) { pr_err("ion memory allocation failed\n"); - rc = PTR_RET(dma_buf); - goto end; - } - - fd = dma_buf_fd(dma_buf, 0); - - sinfo->size = size; - sinfo->dma_buf = dma_buf_get(fd); - if (IS_ERR(sinfo->dma_buf)) { - rc = PTR_ERR(sinfo->dma_buf); + rc = PTR_RET(sinfo->dma_buf); goto end; } diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index af6fc97f4ba4a5fac8cf2f100616f3cdf33a8aae..a436d44f1b7fbf4e2fe2447de21a6aa5a4903cde 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c @@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned char __user *ured; unsigned char __user *ugreen; unsigned char __user *ublue; - int index, count, i; + unsigned int index, count, i; if (get_user(index, &c->index) || __get_user(count, &c->count) || @@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, unsigned char __user *ugreen; unsigned char __user *ublue; struct fb_cmap *cmap = &info->cmap; - int index, count, i; + unsigned int index, count, i; u8 red, green, blue; if (get_user(index, &c->index) || diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c index 7dd0da644a7f68fc7b1163d1eb12d07fd8c04f3b..2cf56b459d84c37411a9647d5fbf92927f5091a1 100644 --- a/drivers/watchdog/asm9260_wdt.c +++ b/drivers/watchdog/asm9260_wdt.c @@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev) if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); - ret = asm9260_wdt_get_dt_clks(priv); - if (ret) - return ret; - priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst"); if (IS_ERR(priv->rst)) return PTR_ERR(priv->rst); + ret = asm9260_wdt_get_dt_clks(priv); + if (ret) + return ret; + wdd = &priv->wdd; wdd->info = &asm9260_wdt_ident; wdd->ops = &asm9260_wdt_ops; diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index 79cc766cd30fdc411e112c1a318d32589627c7b6..fd91007b4e41b38a342cad024510edce46c06612 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_RELOAD_VALUE 0x04 #define WDT_RESTART 0x08 #define WDT_CTRL 0x0C +#define WDT_CTRL_BOOT_SECONDARY BIT(7) #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5) @@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd, { struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY; aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000); mdelay(1000); @@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev) wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM; } else { if (!strcmp(reset_type, "cpu")) - wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU; + wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU | + WDT_CTRL_RESET_SYSTEM; else if (!strcmp(reset_type, "soc")) - wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC; + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | + WDT_CTRL_RESET_SYSTEM; else if (!strcmp(reset_type, "system")) - wdt->ctrl |= WDT_CTRL_RESET_SYSTEM; + wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP | + WDT_CTRL_RESET_SYSTEM; else if (strcmp(reset_type, "none")) return -EINVAL; } if (of_property_read_bool(np, "aspeed,external-signal")) wdt->ctrl |= WDT_CTRL_WDT_EXT; + if (of_property_read_bool(np, "aspeed,alt-boot")) + wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY; writel(wdt->ctrl, wdt->base + WDT_CTRL); diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 2f46487af86d05a72216030e188feb74777b546e..6d9a5d8c3c8dbafc0cffaedf85a48553d7fb6a99 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -198,15 +198,22 @@ static int davinci_wdt_probe(struct platform_device *pdev) wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem); - if (IS_ERR(davinci_wdt->base)) - return PTR_ERR(davinci_wdt->base); + if (IS_ERR(davinci_wdt->base)) { + ret = PTR_ERR(davinci_wdt->base); + goto err_clk_disable; + } ret = watchdog_register_device(wdd); - if (ret < 0) { - clk_disable_unprepare(davinci_wdt->clk); + if (ret) { dev_err(dev, "cannot register watchdog device\n"); + goto err_clk_disable; } + return 0; + +err_clk_disable: + clk_disable_unprepare(davinci_wdt->clk); + return ret; } diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index c2f4ff51623015ca32aca20d3ad74ff05fe38e29..918357bccf5e937c75db7712b2f73ce639475013 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -34,6 +34,7 @@ #define WDOG_CONTROL_REG_OFFSET 0x00 #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 +#define WDOG_CONTROL_REG_RESP_MODE_MASK 0x02 #define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04 #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4 #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 @@ -121,14 +122,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s) return 0; } +static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt) +{ + u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); + + /* Disable interrupt mode; always perform system reset. */ + val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; + /* Enable watchdog. */ + val |= WDOG_CONTROL_REG_WDT_EN_MASK; + writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); +} + static int dw_wdt_start(struct watchdog_device *wdd) { struct dw_wdt *dw_wdt = to_dw_wdt(wdd); dw_wdt_set_timeout(wdd, wdd->timeout); - - writel(WDOG_CONTROL_REG_WDT_EN_MASK, - dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); + dw_wdt_arm_system_reset(dw_wdt); return 0; } @@ -152,16 +162,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd, unsigned long action, void *data) { struct dw_wdt *dw_wdt = to_dw_wdt(wdd); - u32 val; writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); - val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); - if (val & WDOG_CONTROL_REG_WDT_EN_MASK) + if (dw_wdt_is_enabled(dw_wdt)) writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET); else - writel(WDOG_CONTROL_REG_WDT_EN_MASK, - dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); + dw_wdt_arm_system_reset(dw_wdt); /* wait for reset to assert... */ mdelay(500); diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e682bf046e5040b2620ebeda51b3d74e11dac60c..88cd2a52d8d3244318c738e354129fcd3f65c8fe 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf, char c; if (get_user(c, buf + i)) return -EFAULT; - expect_close = (c == 'V'); + if (c == 'V') + expect_close = true; } /* Properly order writes across fork()ed processes */ diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 316c2eb122d23d335d738947a63fc2f9db2e4f1b..e8bd9887c56638aaf81659c45d7505c424266b55 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -50,6 +50,7 @@ */ #include +#include #include #include #include @@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); - timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - + timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - arch_counter_get_cntvct(); do_div(timeleft, gwdt->clk); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1ab4bd11f5f3f01f34b1a0055f836c9a0ce755ba..762378f1811cc9069dc6171edb55aaa3610b82fa 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -755,8 +755,8 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: - for (; i >= 0; i--) - __unbind_from_irq(irq + i); + while (nvec--) + __unbind_from_irq(irq + nvec); mutex_unlock(&irq_mapping_update_lock); return ret; } diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index b209cd44bb8dce76fe44e10e0349af51fee7880c..169293c25a91544d342367926d29b238505557f3 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -424,7 +424,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev, sock); if (!map) { ret = -EFAULT; - sock_release(map->sock); + sock_release(sock); } out: diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 82fc54f8eb77234253dae40572a817231bdd57b0..f98b8c135db91f858088331e149eb588d4922bcb 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -365,7 +365,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, * physical address */ phys = xen_bus_to_phys(dev_addr); - if (((dev_addr + size - 1 > dma_mask)) || + if (((dev_addr + size - 1 <= dma_mask)) || range_straddles_page_boundary(phys, size)) xen_destroy_contiguous_region(phys, order); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 23e391d3ec015d0c5b38b21619898c282826f59c..22863f5f247419ebf7524d7a4d1b8d50ffa62370 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) } /* There are more ACPI Processor objects than in x2APIC or MADT. * This can happen with incorrect ACPI SSDT declerations. */ - if (acpi_id > nr_acpi_bits) { - pr_debug("We only have %u, trying to set %u\n", - nr_acpi_bits, acpi_id); + if (acpi_id >= nr_acpi_bits) { + pr_debug("max acpi id %u, trying to set %u\n", + nr_acpi_bits - 1, acpi_id); return AE_OK; } /* OK, There is a ACPI Processor object */ diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 74888cacd0b0bdcd250135e6436e0c1e39fc330f..ec9eb4fba59c7e88f746f01aa9fe66c2ed590482 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus, /* Register with generic device framework. */ err = device_register(&xendev->dev); - if (err) + if (err) { + put_device(&xendev->dev); + xendev = NULL; goto fail; + } return 0; fail: diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index cc1b1ac57d61e8b75ffde0ae2ea9c8f3a66f04be..47728477297e8c663c0d07f551d004f823b17360 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) z->dev.parent = &bus->dev; z->dev.bus = &zorro_bus_type; z->dev.id = i; + switch (z->rom.er_Type & ERT_TYPEMASK) { + case ERT_ZORROIII: + z->dev.coherent_dma_mask = DMA_BIT_MASK(32); + break; + + case ERT_ZORROII: + default: + z->dev.coherent_dma_mask = DMA_BIT_MASK(24); + break; + } + z->dev.dma_mask = &z->dev.coherent_dma_mask; } /* ... then register them */ diff --git a/fs/affs/namei.c b/fs/affs/namei.c index d8aa0ae3d037c8b91ab13f7d3f6d209bd2326797..1ed0fa4c4d481842c22ac4a3a95f5b867c84f53d 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) affs_lock_dir(dir); bh = affs_find_entry(dir, dentry); - affs_unlock_dir(dir); - if (IS_ERR(bh)) + if (IS_ERR(bh)) { + affs_unlock_dir(dir); return ERR_CAST(bh); + } if (bh) { u32 ino = bh->b_blocknr; @@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) } affs_brelse(bh); inode = affs_iget(sb, ino); - if (IS_ERR(inode)) + if (IS_ERR(inode)) { + affs_unlock_dir(dir); return ERR_CAST(inode); + } } d_add(dentry, inode); + affs_unlock_dir(dir); return NULL; } diff --git a/fs/aio.c b/fs/aio.c index c3ace7833a035a68410af4ca76164bc2894bb47a..3a749c3a92e3221920a229468fc3dbe2a1356a86 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref) while (!list_empty(&ctx->active_reqs)) { req = list_first_entry(&ctx->active_reqs, struct aio_kiocb, ki_list); - - list_del_init(&req->ki_list); kiocb_cancel(req); + list_del_init(&req->ki_list); } spin_unlock_irq(&ctx->ctx_lock); @@ -1087,8 +1086,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) ctx = rcu_dereference(table->table[id]); if (ctx && ctx->user_id == ctx_id) { - percpu_ref_get(&ctx->users); - ret = ctx; + if (percpu_ref_tryget_live(&ctx->users)) + ret = ctx; } out: rcu_read_unlock(); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 21cc27509993628e6c12e27a951893c7a6644d26..d1b9900ebc9b9ea41d454ce5ebf53403e267d1a6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, if (p->reada != READA_NONE) reada_for_search(fs_info, p, level, slot, key->objectid); - btrfs_release_path(p); - ret = -EAGAIN; - tmp = read_tree_block(fs_info, blocknr, 0); + tmp = read_tree_block(fs_info, blocknr, gen); if (!IS_ERR(tmp)) { /* * If the read above didn't mark this buffer up to date, @@ -2514,6 +2512,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, } else { ret = PTR_ERR(tmp); } + + btrfs_release_path(p); return ret; } @@ -5454,12 +5454,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root, down_read(&fs_info->commit_root_sem); left_level = btrfs_header_level(left_root->commit_root); left_root_level = left_level; - left_path->nodes[left_level] = left_root->commit_root; + left_path->nodes[left_level] = + btrfs_clone_extent_buffer(left_root->commit_root); + if (!left_path->nodes[left_level]) { + up_read(&fs_info->commit_root_sem); + ret = -ENOMEM; + goto out; + } extent_buffer_get(left_path->nodes[left_level]); right_level = btrfs_header_level(right_root->commit_root); right_root_level = right_level; - right_path->nodes[right_level] = right_root->commit_root; + right_path->nodes[right_level] = + btrfs_clone_extent_buffer(right_root->commit_root); + if (!right_path->nodes[right_level]) { + up_read(&fs_info->commit_root_sem); + ret = -ENOMEM; + goto out; + } extent_buffer_get(right_path->nodes[right_level]); up_read(&fs_info->commit_root_sem); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8fc690384c585779a9a6838b08228b41468a49b6..588760c49fe28b8137d7a4136f035dd6d947e804 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2969,7 +2969,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); security_free_mnt_opts(&fs_info->security_opts); - kfree(fs_info); + kvfree(fs_info); } /* tree mod log functions from ctree.c */ @@ -3150,6 +3150,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, u64 *ram_bytes); +void __btrfs_del_delalloc_inode(struct btrfs_root *root, + struct btrfs_inode *inode); struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); int btrfs_unlink_inode(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 79f0f282a0ef0a22b83e22159b76ea3d3e6fc10f..27d59cf36341c69e1dc680e3ddf9f9004f8eb267 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1276,7 +1276,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void) if (!writers) return ERR_PTR(-ENOMEM); - ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL); + ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS); if (ret < 0) { kfree(writers); return ERR_PTR(ret); @@ -3896,7 +3896,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) btrfs_err(fs_info, "commit super ret %d", ret); } - if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) + if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) || + test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) btrfs_error_commit_super(fs_info); kthread_stop(fs_info->transaction_kthread); @@ -3905,6 +3906,7 @@ void close_ctree(struct btrfs_fs_info *fs_info) set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); btrfs_free_qgroup_config(fs_info); + ASSERT(list_empty(&fs_info->delalloc_roots)); if (percpu_counter_sum(&fs_info->delalloc_bytes)) { btrfs_info(fs_info, "at unmount delalloc count %lld", @@ -4203,15 +4205,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info) static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) { + /* cleanup FS via transaction */ + btrfs_cleanup_transaction(fs_info); + mutex_lock(&fs_info->cleaner_mutex); btrfs_run_delayed_iputs(fs_info); mutex_unlock(&fs_info->cleaner_mutex); down_write(&fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem); - - /* cleanup FS via transaction */ - btrfs_cleanup_transaction(fs_info); } static void btrfs_destroy_ordered_extents(struct btrfs_root *root) @@ -4334,19 +4336,23 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) list_splice_init(&root->delalloc_inodes, &splice); while (!list_empty(&splice)) { + struct inode *inode = NULL; btrfs_inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes); - - list_del_init(&btrfs_inode->delalloc_inodes); - clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &btrfs_inode->runtime_flags); + __btrfs_del_delalloc_inode(root, btrfs_inode); spin_unlock(&root->delalloc_lock); - btrfs_invalidate_inodes(btrfs_inode->root); - + /* + * Make sure we get a live inode and that it'll not disappear + * meanwhile. + */ + inode = igrab(&btrfs_inode->vfs_inode); + if (inode) { + invalidate_inode_pages2(inode->i_mapping); + iput(inode); + } spin_lock(&root->delalloc_lock); } - spin_unlock(&root->delalloc_lock); } @@ -4362,7 +4368,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) while (!list_empty(&splice)) { root = list_first_entry(&splice, struct btrfs_root, delalloc_root); - list_del_init(&root->delalloc_root); root = btrfs_grab_fs_root(root); BUG_ON(!root); spin_unlock(&fs_info->delalloc_root_lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d227d8514b256749e0ad90ea0f4c715f632b587f..53487102081d6c8c2adfd46c2e9f83926166cb30 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3171,7 +3171,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root, struct btrfs_transaction *cur_trans; int ret = 0; + spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; + if (cur_trans) + refcount_inc(&cur_trans->use_count); + spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) return 0; @@ -3180,6 +3184,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (!head) { spin_unlock(&delayed_refs->lock); + btrfs_put_transaction(cur_trans); return 0; } @@ -3196,6 +3201,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); + btrfs_put_transaction(cur_trans); return -EAGAIN; } spin_unlock(&delayed_refs->lock); @@ -3223,6 +3229,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root, } spin_unlock(&head->lock); mutex_unlock(&head->mutex); + btrfs_put_transaction(cur_trans); return ret; } @@ -4668,6 +4675,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, if (wait_for_alloc) { mutex_unlock(&fs_info->chunk_mutex); wait_for_alloc = 0; + cond_resched(); goto again; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9f21c29d02599a0176d6ba6779f4a6abcdb86268..8ecbac3b862ed6b4d1f94b969202601a7cba2a5a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1754,12 +1754,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root, spin_unlock(&root->delalloc_lock); } -static void btrfs_del_delalloc_inode(struct btrfs_root *root, - struct btrfs_inode *inode) + +void __btrfs_del_delalloc_inode(struct btrfs_root *root, + struct btrfs_inode *inode) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); - spin_lock(&root->delalloc_lock); if (!list_empty(&inode->delalloc_inodes)) { list_del_init(&inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, @@ -1772,6 +1772,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root, spin_unlock(&fs_info->delalloc_root_lock); } } +} + +static void btrfs_del_delalloc_inode(struct btrfs_root *root, + struct btrfs_inode *inode) +{ + spin_lock(&root->delalloc_lock); + __btrfs_del_delalloc_inode(root, inode); spin_unlock(&root->delalloc_lock); } @@ -6657,8 +6664,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock_inode; } else { btrfs_update_inode(trans, root, inode); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); } out_unlock: @@ -6735,8 +6741,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock_inode; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); out_unlock: btrfs_end_transaction(trans); @@ -6883,12 +6888,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) goto out_fail_inode; - d_instantiate(dentry, inode); - /* - * mkdir is special. We're unlocking after we call d_instantiate - * to avoid a race with nfsd calling d_instantiate. - */ - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); drop_on_err = 0; out_fail: @@ -10566,8 +10566,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock_inode; } - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); out_unlock: btrfs_end_transaction(trans); diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c index f6a05f8366297ef791f74de39f2a884972dfb7a7..cbabc6f2b3221b44a9b05988106348b6cc14c1f9 100644 --- a/fs/btrfs/props.c +++ b/fs/btrfs/props.c @@ -400,6 +400,7 @@ static int prop_compression_apply(struct inode *inode, const char *value, size_t len) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); int type; if (len == 0) { @@ -410,14 +411,17 @@ static int prop_compression_apply(struct inode *inode, return 0; } - if (!strncmp("lzo", value, 3)) + if (!strncmp("lzo", value, 3)) { type = BTRFS_COMPRESS_LZO; - else if (!strncmp("zlib", value, 4)) + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); + } else if (!strncmp("zlib", value, 4)) { type = BTRFS_COMPRESS_ZLIB; - else if (!strncmp("zstd", value, len)) + } else if (!strncmp("zstd", value, len)) { type = BTRFS_COMPRESS_ZSTD; - else + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); + } else { return -EINVAL; + } BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 2c35717a3470a66deb9b93d898e3e3b7971b22b5..baf5a4cd7ffcfbe4c93ef0f293fc22c495a9f6dd 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5008,6 +5008,9 @@ static int send_hole(struct send_ctx *sctx, u64 end) u64 len; int ret = 0; + if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) + return send_update_extent(sctx, offset, end - offset); + p = fs_path_alloc(); if (!p) return -ENOMEM; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index e8f5e24325f338d5ab9770c9fa1c9aa50ef37ad2..8e3ce81d3f44c5da51b5efe31d61ea17f7fa6f1b 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1581,7 +1581,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, * it for searching for existing supers, so this lets us do that and * then open_ctree will properly initialize everything later. */ - fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); + fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); if (!fs_info) { error = -ENOMEM; goto error_sec_opts; diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 0f4ce970d195177220b57f1ea33b060ceb3854c8..578fd045e85974caf9f80087a5277c89e188b600 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr, btrfs_set_extent_generation(leaf, item, 1); btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK); block_info = (struct btrfs_tree_block_info *)(item + 1); - btrfs_set_tree_block_level(leaf, block_info, 1); + btrfs_set_tree_block_level(leaf, block_info, 0); iref = (struct btrfs_extent_inline_ref *)(block_info + 1); if (parent > 0) { btrfs_set_extent_inline_ref_type(leaf, iref, diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f615d59b0489e2af52a711b896f758e13d05bd46..27638b96079dabfce4cd6f40ac7ecc29e7e95b81 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -319,7 +319,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && root->last_trans < trans->transid) || force) { WARN_ON(root == fs_info->extent_root); - WARN_ON(root->commit_root != root->node); + WARN_ON(!force && root->commit_root != root->node); /* * see below for IN_TRANS_SETUP usage rules @@ -1365,6 +1365,14 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) return 0; + /* + * Ensure dirty @src will be commited. Or, after comming + * commit_fs_roots() and switch_commit_roots(), any dirty but not + * recorded root will never be updated again, causing an outdated root + * item. + */ + record_root_in_trans(trans, src, 1); + /* * We are going to commit transaction, see btrfs_commit_transaction() * comment for reason locking tree_log_mutex diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b6dfe7af7a1fa217796a6602e157f9f0857fdb3b..fc4c14a72366a90d54d52cd0b8f4122be8afb96c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2272,8 +2272,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); - if (ret) + if (ret == 1) break; + else if (ret < 0) + goto out; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); @@ -2377,13 +2379,41 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, if (ret) break; - /* for regular files, make sure corresponding - * orphan item exist. extents past the new EOF - * will be truncated later by orphan cleanup. + /* + * Before replaying extents, truncate the inode to its + * size. We need to do it now and not after log replay + * because before an fsync we can have prealloc extents + * added beyond the inode's i_size. If we did it after, + * through orphan cleanup for example, we would drop + * those prealloc extents just after replaying them. */ if (S_ISREG(mode)) { - ret = insert_orphan_item(wc->trans, root, - key.objectid); + struct inode *inode; + u64 from; + + inode = read_one_inode(root, key.objectid); + if (!inode) { + ret = -EIO; + break; + } + from = ALIGN(i_size_read(inode), + root->fs_info->sectorsize); + ret = btrfs_drop_extents(wc->trans, root, inode, + from, (u64)-1, 1); + /* + * If the nlink count is zero here, the iput + * will free the inode. We bump it to make + * sure it doesn't get freed until the link + * count fixup is done. + */ + if (!ret) { + if (inode->i_nlink == 0) + inc_nlink(inode); + /* Update link count and nbytes. */ + ret = btrfs_update_inode(wc->trans, + root, inode); + } + iput(inode); if (ret) break; } @@ -3432,8 +3462,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * from this directory and from this transaction */ ret = btrfs_next_leaf(root, path); - if (ret == 1) { - last_offset = (u64)-1; + if (ret) { + if (ret == 1) + last_offset = (u64)-1; + else + err = ret; goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); @@ -3885,6 +3918,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ASSERT(ret == 0); src = src_path->nodes[0]; i = 0; + need_find_last_extent = true; } btrfs_item_key_to_cpu(src, &key, i); @@ -4234,6 +4268,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, num++; } + /* + * Add all prealloc extents beyond the inode's i_size to make sure we + * don't lose them after doing a fast fsync and replaying the log. + */ + if (inode->flags & BTRFS_INODE_PREALLOC) { + struct rb_node *node; + + for (node = rb_last(&tree->map); node; node = rb_prev(node)) { + em = rb_entry(node, struct extent_map, rb_node); + if (em->start < i_size_read(&inode->vfs_inode)) + break; + if (!list_empty(&em->list)) + continue; + /* Same as above loop. */ + if (++num > 32768) { + list_del_init(&tree->modified_extents); + ret = -EFBIG; + goto process; + } + refcount_inc(&em->refs); + set_bit(EXTENT_FLAG_LOGGING, &em->flags); + list_add_tail(&em->list, &extents); + } + } + list_sort(NULL, &extents, extent_cmp); btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end); /* @@ -4669,6 +4728,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct extent_map_tree *em_tree = &inode->extent_tree; u64 logged_isize = 0; bool need_log_inode_item = true; + bool xattrs_logged = false; path = btrfs_alloc_path(); if (!path) @@ -4971,6 +5031,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); if (err) goto out_unlock; + xattrs_logged = true; if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { btrfs_release_path(path); btrfs_release_path(dst_path); @@ -4983,6 +5044,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, btrfs_release_path(dst_path); if (need_log_inode_item) { err = log_inode_item(trans, log, dst_path, inode); + if (!err && !xattrs_logged) { + err = btrfs_log_all_xattrs(trans, root, inode, path, + dst_path); + btrfs_release_path(path); + } if (err) goto out_unlock; } @@ -5881,7 +5947,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, * this will force the logging code to walk the dentry chain * up for the file */ - if (S_ISREG(inode->vfs_inode.i_mode)) + if (!S_ISDIR(inode->vfs_inode.i_mode)) inode->last_unlink_trans = trans->transid; /* diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b983e7fb200bb35708dd2812f700d740c7a14652..08afafb6ecf763eb37bc579eeec11d9801fd63e2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3964,6 +3964,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) return 0; } + /* + * A ro->rw remount sequence should continue with the paused balance + * regardless of who pauses it, system or the user as of now, so set + * the resume flag. + */ + spin_lock(&fs_info->balance_lock); + fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; + spin_unlock(&fs_info->balance_lock); + tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); return PTR_ERR_OR_ZERO(tsk); } diff --git a/fs/buffer.c b/fs/buffer.c index b96f3b98a6ef9f8fc9a3ac22d1e60996eabf4d50..ff13810b2931a610f7beb9e083838b6c3514d67f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1451,12 +1451,48 @@ static bool has_bh_in_lru(int cpu, void *dummy) return 0; } +static void __evict_bh_lru(void *arg) +{ + struct bh_lru *b = &get_cpu_var(bh_lrus); + struct buffer_head *bh = arg; + int i; + + for (i = 0; i < BH_LRU_SIZE; i++) { + if (b->bhs[i] == bh) { + brelse(b->bhs[i]); + b->bhs[i] = NULL; + goto out; + } + } +out: + put_cpu_var(bh_lrus); +} + +static bool bh_exists_in_lru(int cpu, void *arg) +{ + struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); + struct buffer_head *bh = arg; + int i; + + for (i = 0; i < BH_LRU_SIZE; i++) { + if (b->bhs[i] == bh) + return 1; + } + + return 0; + +} void invalidate_bh_lrus(void) { on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); } EXPORT_SYMBOL_GPL(invalidate_bh_lrus); +static void evict_bh_lrus(struct buffer_head *bh) +{ + on_each_cpu_cond(bh_exists_in_lru, __evict_bh_lru, bh, 1, GFP_ATOMIC); +} + void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { @@ -3284,8 +3320,15 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) bh = head; do { - if (buffer_busy(bh)) - goto failed; + if (buffer_busy(bh)) { + /* + * Check if the busy failure was due to an + * outstanding LRU reference + */ + evict_bh_lrus(bh); + if (buffer_busy(bh)) + goto failed; + } bh = bh->b_this_page; } while (bh != head); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index e4082afedcb15a447ee7fd344aa2a3def391d43c..48ffe720bf09c1fb0d8d713cf5477fd415bcbde4 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -224,6 +224,7 @@ static int parse_fsopt_token(char *c, void *private) return -ENOMEM; break; case Opt_mds_namespace: + kfree(fsopt->mds_namespace); fsopt->mds_namespace = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, GFP_KERNEL); @@ -231,6 +232,7 @@ static int parse_fsopt_token(char *c, void *private) return -ENOMEM; break; case Opt_fscache_uniq: + kfree(fsopt->fscache_uniq); fsopt->fscache_uniq = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, GFP_KERNEL); @@ -710,14 +712,17 @@ static int __init init_caches(void) goto bad_dentry; ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD); - if (!ceph_file_cachep) goto bad_file; - if ((error = ceph_fscache_register())) - goto bad_file; + error = ceph_fscache_register(); + if (error) + goto bad_fscache; return 0; + +bad_fscache: + kmem_cache_destroy(ceph_file_cachep); bad_file: kmem_cache_destroy(ceph_dentry_cachep); bad_dentry: @@ -835,7 +840,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) int err; unsigned long started = jiffies; /* note the start time */ struct dentry *root; - int first = 0; /* first vfsmount for this super_block */ dout("mount start %p\n", fsc); mutex_lock(&fsc->client->mount_mutex); @@ -860,17 +864,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) path = fsc->mount_options->server_path + 1; dout("mount opening path %s\n", path); } + + err = ceph_fs_debugfs_init(fsc); + if (err < 0) + goto out; + root = open_root_dentry(fsc, path, started); if (IS_ERR(root)) { err = PTR_ERR(root); goto out; } fsc->sb->s_root = dget(root); - first = 1; - - err = ceph_fs_debugfs_init(fsc); - if (err < 0) - goto fail; } else { root = dget(fsc->sb->s_root); } @@ -880,11 +884,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc) mutex_unlock(&fsc->client->mount_mutex); return root; -fail: - if (first) { - dput(fsc->sb->s_root); - fsc->sb->s_root = NULL; - } out: mutex_unlock(&fsc->client->mount_mutex); return ERR_PTR(err); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 4012558f611530a9ed644e3282efff31d6fde534..073165db5641c96918361c1bcbf0124eaace8f83 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -91,6 +91,10 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) return true; + if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS && + filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS) + return true; + return false; } diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 05f5ee1f07057ab6484c84c4dc4b66d0f3a1e934..6e1b0281dabaea11b0d07de062feab4eef028e09 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -135,6 +135,8 @@ static const struct { FS_AES_128_CBC_KEY_SIZE }, [FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))", FS_AES_128_CTS_KEY_SIZE }, + [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)", 64 }, + [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))", 32 }, }; static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode, diff --git a/fs/dcache.c b/fs/dcache.c index 801179170794863348cc0e9f0d582f73089d03e0..b88eee4dbbc5b877b3cf5042a7918d1ccbc32428 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1867,6 +1867,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode) } EXPORT_SYMBOL(d_instantiate); +/* + * This should be equivalent to d_instantiate() + unlock_new_inode(), + * with lockdep-related part of unlock_new_inode() done before + * anything else. Use that instead of open-coding d_instantiate()/ + * unlock_new_inode() combinations. + */ +void d_instantiate_new(struct dentry *entry, struct inode *inode) +{ + BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); + BUG_ON(!inode); + lockdep_annotate_inode_mutex_key(inode); + security_d_instantiate(entry, inode); + spin_lock(&inode->i_lock); + __d_instantiate(entry, inode); + WARN_ON(!(inode->i_state & I_NEW)); + inode->i_state &= ~I_NEW; + smp_mb(); + wake_up_bit(&inode->i_state, __I_NEW); + spin_unlock(&inode->i_lock); +} +EXPORT_SYMBOL(d_instantiate_new); + /** * d_instantiate_no_diralias - instantiate a non-aliased dentry * @entry: dentry to complete @@ -2460,7 +2482,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, retry: rcu_read_lock(); - seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; + seq = smp_load_acquire(&parent->d_inode->i_dir_seq); r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { @@ -2481,8 +2503,14 @@ struct dentry *d_alloc_parallel(struct dentry *parent, rcu_read_unlock(); goto retry; } + + if (unlikely(seq & 1)) { + rcu_read_unlock(); + goto retry; + } + hlist_bl_lock(b); - if (unlikely(parent->d_inode->i_dir_seq != seq)) { + if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { hlist_bl_unlock(b); rcu_read_unlock(); goto retry; diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index efc2db42d17513d089f16cfaaab38c81a4ed216c..bda65a73079059293c5b83076c0e85c702459716 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -284,8 +284,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, iget_failed(ecryptfs_inode); goto out; } - unlock_new_inode(ecryptfs_inode); - d_instantiate(ecryptfs_dentry, ecryptfs_inode); + d_instantiate_new(ecryptfs_dentry, ecryptfs_inode); out: return rc; } diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 1442a4c734c8f7b20ffbaf5b37312a94113d95c8..a7c87d593083759becae63b528455200faf155a5 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1261,21 +1261,11 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset) static void ext2_truncate_blocks(struct inode *inode, loff_t offset) { - /* - * XXX: it seems like a bug here that we don't allow - * IS_APPEND inode to have blocks-past-i_size trimmed off. - * review and fix this. - * - * Also would be nice to be able to handle IO errors and such, - * but that's probably too much to ask. - */ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; if (ext2_inode_is_fast_symlink(inode)) return; - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) - return; dax_sem_down_write(EXT2_I(inode)); __ext2_truncate_blocks(inode, offset); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index e078075dc66faaa80ce377118c3750246fc64da8..aa6ec191cac08d42c8d83039800f19c46d0844d6 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); if (!err) { - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; } inode_dec_link_count(inode); @@ -269,8 +268,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) if (err) goto out_fail; - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); out: return err; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 5c20f9b6e85658b80f5a852533479482834ae36b..6506ddeeaf8a04e0b50b7d7adf337557b3b323e4 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2420,8 +2420,7 @@ static int ext4_add_nondir(handle_t *handle, int err = ext4_add_entry(handle, dentry, inode); if (!err) { ext4_mark_inode_dirty(handle, inode); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; } drop_nlink(inode); @@ -2660,8 +2659,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) err = ext4_mark_inode_dirty(handle, dir); if (err) goto out_clear_inode; - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index bbb5cb5212fd8084674e8f081e087f0f48554ffb..db56316e6aa8e3c56ec8403b34bd3a2fa6723720 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3660,6 +3660,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { + /* + * If we're probing be silent, if this looks like + * it's actually an ext[34] filesystem. + */ + if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) + goto failed_mount; ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; @@ -3671,6 +3677,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { + /* + * If we're probing be silent, if this looks like + * it's actually an ext4 filesystem. + */ + if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb))) + goto failed_mount; ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index f32a0c79702f998d251230871018594f1c1faa07..534872d064bc9d11242a76a81a6f1495042f2d45 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -176,15 +176,12 @@ enum { #define CP_DISCARD 0x00000010 #define CP_TRIMMED 0x00000020 -#define DEF_BATCHED_TRIM_SECTIONS 2048 -#define BATCHED_TRIM_SEGMENTS(sbi) \ - (GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections)) -#define BATCHED_TRIM_BLOCKS(sbi) \ - (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg) #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi) #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ +#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */ #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ +#define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ #define DEF_CP_INTERVAL 60 /* 60 secs */ #define DEF_IDLE_INTERVAL 5 /* 5 secs */ @@ -694,7 +691,8 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs, static inline bool __is_discard_mergeable(struct discard_info *back, struct discard_info *front) { - return back->lstart + back->len == front->lstart; + return (back->lstart + back->len == front->lstart) && + (back->len + front->len < DEF_MAX_DISCARD_LEN); } static inline bool __is_discard_back_mergeable(struct discard_info *cur, @@ -1080,6 +1078,7 @@ enum { enum fsync_mode { FSYNC_MODE_POSIX, /* fsync follows posix semantics */ FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ + FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ }; #ifdef CONFIG_F2FS_FS_ENCRYPTION @@ -2774,8 +2773,6 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); -void init_discard_policy(struct discard_policy *dpolicy, int discard_type, - unsigned int granularity); void drop_discard_cmd(struct f2fs_sb_info *sbi); void stop_discard_thread(struct f2fs_sb_info *sbi); bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index cb231b004e6104b2bf50e7c438ffc24676be0217..ed72fc2cc68de5fda4430da920114a0dd27b35a6 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -306,7 +306,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, remove_ino_entry(sbi, ino, APPEND_INO); clear_inode_flag(inode, FI_APPEND_WRITE); flush_out: - if (!atomic) + if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ret = f2fs_issue_flush(sbi, inode->i_ino); if (!ret) { remove_ino_entry(sbi, ino, UPDATE_INO); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 3a7ed962d2f74d75a56bcea943b66aa4a4cd5c37..75e37fd720b2ba999c05dbb0219f7f4850d6f25e 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -294,8 +294,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, alloc_nid_done(sbi, ino); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); @@ -597,8 +596,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, err = page_symlink(inode, disk_link.name, disk_link.len); err_out: - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); /* * Let's flush symlink data in order to avoid broken symlink as much as @@ -661,8 +659,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) alloc_nid_done(sbi, inode->i_ino); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); @@ -713,8 +710,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, alloc_nid_done(sbi, inode->i_ino); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1e365e9138586a1c945a36689b53609c57e27c1e..bef74d628f66990853632041b5b54b460bb71219 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -915,6 +915,39 @@ static void __check_sit_bitmap(struct f2fs_sb_info *sbi, #endif } +static void __init_discard_policy(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + int discard_type, unsigned int granularity) +{ + /* common policy */ + dpolicy->type = discard_type; + dpolicy->sync = true; + dpolicy->granularity = granularity; + + dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; + dpolicy->io_aware_gran = MAX_PLIST_NUM; + + if (discard_type == DPOLICY_BG) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->io_aware = true; + dpolicy->sync = false; + if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) { + dpolicy->granularity = 1; + dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME; + } + } else if (discard_type == DPOLICY_FORCE) { + dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; + dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; + dpolicy->io_aware = false; + } else if (discard_type == DPOLICY_FSTRIM) { + dpolicy->io_aware = false; + } else if (discard_type == DPOLICY_UMOUNT) { + dpolicy->io_aware = false; + } +} + + /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy, @@ -1130,68 +1163,6 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi, return 0; } -static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi, - struct discard_policy *dpolicy, - unsigned int start, unsigned int end) -{ - struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct discard_cmd *prev_dc = NULL, *next_dc = NULL; - struct rb_node **insert_p = NULL, *insert_parent = NULL; - struct discard_cmd *dc; - struct blk_plug plug; - int issued; - -next: - issued = 0; - - mutex_lock(&dcc->cmd_lock); - f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); - - dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, - NULL, start, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true); - if (!dc) - dc = next_dc; - - blk_start_plug(&plug); - - while (dc && dc->lstart <= end) { - struct rb_node *node; - - if (dc->len < dpolicy->granularity) - goto skip; - - if (dc->state != D_PREP) { - list_move_tail(&dc->list, &dcc->fstrim_list); - goto skip; - } - - __submit_discard_cmd(sbi, dpolicy, dc); - - if (++issued >= dpolicy->max_requests) { - start = dc->lstart + dc->len; - - blk_finish_plug(&plug); - mutex_unlock(&dcc->cmd_lock); - - schedule(); - - goto next; - } -skip: - node = rb_next(&dc->rb_node); - dc = rb_entry_safe(node, struct discard_cmd, rb_node); - - if (fatal_signal_pending(current)) - break; - } - - blk_finish_plug(&plug); - mutex_unlock(&dcc->cmd_lock); -} - static int __issue_discard_cmd(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy) { @@ -1332,7 +1303,18 @@ static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi, static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi, struct discard_policy *dpolicy) { - __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); + struct discard_policy dp; + + if (dpolicy) { + __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX); + return; + } + + /* wait all */ + __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1); + __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); + __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1); + __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX); } /* This should be covered by global mutex, &sit_i->sentry_lock */ @@ -1377,11 +1359,13 @@ bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) struct discard_policy dpolicy; bool dropped; - init_discard_policy(&dpolicy, DPOLICY_UMOUNT, dcc->discard_granularity); + __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, + dcc->discard_granularity); __issue_discard_cmd(sbi, &dpolicy); dropped = __drop_discard_cmd(sbi); - __wait_all_discard_cmd(sbi, &dpolicy); + /* just to make sure there is no pending discard commands */ + __wait_all_discard_cmd(sbi, NULL); return dropped; } @@ -1397,7 +1381,7 @@ static int issue_discard_thread(void *data) set_freezable(); do { - init_discard_policy(&dpolicy, DPOLICY_BG, + __init_discard_policy(sbi, &dpolicy, DPOLICY_BG, dcc->discard_granularity); wait_event_interruptible_timeout(*q, @@ -1415,7 +1399,7 @@ static int issue_discard_thread(void *data) dcc->discard_wake = 0; if (sbi->gc_thread && sbi->gc_thread->gc_urgent) - init_discard_policy(&dpolicy, DPOLICY_FORCE, 1); + __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); sb_start_intwrite(sbi->sb); @@ -1708,32 +1692,6 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) wake_up_discard_thread(sbi, false); } -void init_discard_policy(struct discard_policy *dpolicy, - int discard_type, unsigned int granularity) -{ - /* common policy */ - dpolicy->type = discard_type; - dpolicy->sync = true; - dpolicy->granularity = granularity; - - dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST; - dpolicy->io_aware_gran = MAX_PLIST_NUM; - - if (discard_type == DPOLICY_BG) { - dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; - dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; - dpolicy->io_aware = true; - } else if (discard_type == DPOLICY_FORCE) { - dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; - dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; - dpolicy->io_aware = false; - } else if (discard_type == DPOLICY_FSTRIM) { - dpolicy->io_aware = false; - } else if (discard_type == DPOLICY_UMOUNT) { - dpolicy->io_aware = false; - } -} - static int create_discard_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; @@ -2373,11 +2331,72 @@ bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) return has_candidate; } +static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi, + struct discard_policy *dpolicy, + unsigned int start, unsigned int end) +{ + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct discard_cmd *prev_dc = NULL, *next_dc = NULL; + struct rb_node **insert_p = NULL, *insert_parent = NULL; + struct discard_cmd *dc; + struct blk_plug plug; + int issued; + +next: + issued = 0; + + mutex_lock(&dcc->cmd_lock); + f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); + + dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, + NULL, start, + (struct rb_entry **)&prev_dc, + (struct rb_entry **)&next_dc, + &insert_p, &insert_parent, true); + if (!dc) + dc = next_dc; + + blk_start_plug(&plug); + + while (dc && dc->lstart <= end) { + struct rb_node *node; + + if (dc->len < dpolicy->granularity) + goto skip; + + if (dc->state != D_PREP) { + list_move_tail(&dc->list, &dcc->fstrim_list); + goto skip; + } + + __submit_discard_cmd(sbi, dpolicy, dc); + + if (++issued >= dpolicy->max_requests) { + start = dc->lstart + dc->len; + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); + __wait_all_discard_cmd(sbi, NULL); + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto next; + } +skip: + node = rb_next(&dc->rb_node); + dc = rb_entry_safe(node, struct discard_cmd, rb_node); + + if (fatal_signal_pending(current)) + break; + } + + blk_finish_plug(&plug); + mutex_unlock(&dcc->cmd_lock); +} + int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; - unsigned int start_segno, end_segno, cur_segno; + unsigned int start_segno, end_segno; block_t start_block, end_block; struct cp_control cpc; struct discard_policy dpolicy; @@ -2403,40 +2422,27 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) cpc.reason = CP_DISCARD; cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); + cpc.trim_start = start_segno; + cpc.trim_end = end_segno; - /* do checkpoint to issue discard commands safely */ - for (cur_segno = start_segno; cur_segno <= end_segno; - cur_segno = cpc.trim_end + 1) { - cpc.trim_start = cur_segno; - - if (sbi->discard_blks == 0) - break; - else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) - cpc.trim_end = end_segno; - else - cpc.trim_end = min_t(unsigned int, - rounddown(cur_segno + - BATCHED_TRIM_SEGMENTS(sbi), - sbi->segs_per_sec) - 1, end_segno); - - mutex_lock(&sbi->gc_mutex); - err = write_checkpoint(sbi, &cpc); - mutex_unlock(&sbi->gc_mutex); - if (err) - break; + if (sbi->discard_blks == 0) + goto out; - schedule(); - } + mutex_lock(&sbi->gc_mutex); + err = write_checkpoint(sbi, &cpc); + mutex_unlock(&sbi->gc_mutex); + if (err) + goto out; start_block = START_BLOCK(sbi, start_segno); - end_block = START_BLOCK(sbi, min(cur_segno, end_segno) + 1); + end_block = START_BLOCK(sbi, end_segno + 1); - init_discard_policy(&dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); + __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); trimmed = __wait_discard_cmd_range(sbi, &dpolicy, start_block, end_block); -out: range->len = F2FS_BLK_TO_BYTES(trimmed); +out: return err; } @@ -3823,8 +3829,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->min_ssr_sections = reserved_sections(sbi); - sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; - INIT_LIST_HEAD(&sm_info->sit_entry_set); init_rwsem(&sm_info->curseg_lock); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 7cbddecdf41f7669d9fff9311a9875eda50af2d5..2b79c1a7a2f2a020e52c1274734712501dfdf2a0 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -740,6 +740,10 @@ static int parse_options(struct super_block *sb, char *options) } else if (strlen(name) == 6 && !strncmp(name, "strict", 6)) { F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; + } else if (strlen(name) == 9 && + !strncmp(name, "nobarrier", 9)) { + F2FS_OPTION(sbi).fsync_mode = + FSYNC_MODE_NOBARRIER; } else { kfree(name); return -EINVAL; diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index f33a56d6e6dd7916feee0206bfadce16f873dc78..2c53de9251becae26f05e13d06acc23239beb3ca 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -245,6 +245,9 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, return count; } + if (!strcmp(a->attr.name, "trim_sections")) + return -EINVAL; + *ui = t; if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0) diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 0ad3fd3ad0b477c3e0ee2b74d4af78977e405f7b..ae9470f3643c243fab43650e1bd9cbaa6294811a 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op) _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); +again: spin_lock(&object->lock); cookie = object->cookie; @@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; page = results[0]; _debug("gang %d [%lx]", n, page->index); - if (page->index >= op->store_limit) { - fscache_stat(&fscache_n_store_pages_over_limit); - goto superseded; - } radix_tree_tag_set(&cookie->stores, page->index, FSCACHE_COOKIE_STORING_TAG); @@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op) spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); + if (page->index >= op->store_limit) + goto discard_page; + fscache_stat(&fscache_n_store_pages); fscache_stat(&fscache_n_cop_write_page); ret = object->cache->ops->write_page(op, page); @@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op) _leave(""); return; +discard_page: + fscache_stat(&fscache_n_store_pages_over_limit); + fscache_end_page_write(object, page); + goto again; + superseded: /* this writer is going away and there aren't any more things to * write */ diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 2a29cf3371f69697e47579b4b58930447dc879f6..10f0fac031f43f7e974964cb83e2b1922cc6793f 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -803,7 +803,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned int data_blocks = 0, ind_blocks = 0, rblocks; - loff_t bytes, max_bytes, max_blks = UINT_MAX; + loff_t bytes, max_bytes, max_blks; int error; const loff_t pos = offset; const loff_t count = len; @@ -855,7 +855,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t return error; /* ap.allowed tells us how many blocks quota will allow * us to write. Check if this reduces max_blks */ - if (ap.allowed && ap.allowed < max_blks) + max_blks = UINT_MAX; + if (ap.allowed) max_blks = ap.allowed; error = gfs2_inplace_reserve(ip, &ap); diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 5e47c935a51518d51e771cb0a2fd379e37523526..836f29480be6719c61dc2380a42b8556c6c88a99 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); int ret; + + ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) return 0; ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index e5bb2de2262ae68c64a061673f93220ae8872383..3cba08c931eebe7a70403fe892a76aa8e355f0fd 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) return 0; out_put_hidden_dir: + cancel_delayed_work_sync(&sbi->sync_work); iput(sbi->hidden_dir); out_put_root: dput(sb->s_root); diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 0a754f38462e9623e5608e65f95606abfef453be..e5a6deb38e1e1be47803250b3de5d115ce5c7e88 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, __func__, inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->pino_nlink, inode->i_mapping->nrpages); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; fail: @@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; fail: @@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; fail: @@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; fail: diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index b41596d71858b4a1fcaf4b3e133ebc7b72d48afd..56c3fcbfe80ed0b69156bcab2f981b4d322b0aba 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); - d_instantiate(dentry, ip); + d_instantiate_new(dentry, ip); } out2: @@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); - d_instantiate(dentry, ip); + d_instantiate_new(dentry, ip); } out2: @@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); - d_instantiate(dentry, ip); + d_instantiate_new(dentry, ip); } out2: @@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, unlock_new_inode(ip); iput(ip); } else { - unlock_new_inode(ip); - d_instantiate(dentry, ip); + d_instantiate_new(dentry, ip); } out1: diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index e9bea90dc0179770b9fbfa39fe6651c5949f0d62..fb85d04fdc4c21fadeb0fd21b622caf65fb4ac22 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -858,8 +858,10 @@ static int nfs4_set_client(struct nfs_server *server, if (IS_ERR(clp)) return PTR_ERR(clp); - if (server->nfs_client == clp) + if (server->nfs_client == clp) { + nfs_put_client(clp); return -ELOOP; + } /* * Query for the lease time on clientid setup or renewal @@ -1217,11 +1219,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, clp->cl_proto, clnt->cl_timeout, clp->cl_minorversion, net); clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); - nfs_put_client(clp); if (error != 0) { nfs_server_insert_lists(server); return error; } + nfs_put_client(clp); if (server->nfs_client->cl_hostname == NULL) server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 515d13c196daf81f69dc0b5501b4b9780c8d2743..1ba4719de70dad2d860a05eb0e5889d74f8d3115 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) int err = nilfs_add_link(dentry, inode); if (!err) { - d_instantiate(dentry, inode); - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); return 0; } inode_dec_link_count(inode); @@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) goto out_fail; nilfs_mark_inode_dirty(inode); - d_instantiate(dentry, inode); - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); out: if (!err) err = nilfs_transaction_commit(dir->i_sb); diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index a2b19fbdcf469597041e9cb5a70c7c648b0cede8..6099a8034b1747cd8cc237d512a905c251384b2f 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -676,20 +676,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm) spin_unlock(&dlm->spinlock); } -int dlm_shutting_down(struct dlm_ctxt *dlm) -{ - int ret = 0; - - spin_lock(&dlm_domain_lock); - - if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) - ret = 1; - - spin_unlock(&dlm_domain_lock); - - return ret; -} - void dlm_unregister_domain(struct dlm_ctxt *dlm) { int leave = 0; diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h index fd6122a38dbdf04f11266af5fa094b2e00c4173c..8a9281411c18ff688cead233989606805523986f 100644 --- a/fs/ocfs2/dlm/dlmdomain.h +++ b/fs/ocfs2/dlm/dlmdomain.h @@ -28,7 +28,30 @@ extern spinlock_t dlm_domain_lock; extern struct list_head dlm_domains; -int dlm_shutting_down(struct dlm_ctxt *dlm); +static inline int dlm_joined(struct dlm_ctxt *dlm) +{ + int ret = 0; + + spin_lock(&dlm_domain_lock); + if (dlm->dlm_state == DLM_CTXT_JOINED) + ret = 1; + spin_unlock(&dlm_domain_lock); + + return ret; +} + +static inline int dlm_shutting_down(struct dlm_ctxt *dlm) +{ + int ret = 0; + + spin_lock(&dlm_domain_lock); + if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) + ret = 1; + spin_unlock(&dlm_domain_lock); + + return ret; +} + void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, int node_num); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index ec8f75813beb48c3faf2f3182a117d00b0dc4b77..505ab4281f36c0eda9f54353da7024d3b77d736b 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, if (!dlm_grab(dlm)) return -EINVAL; + if (!dlm_joined(dlm)) { + mlog(ML_ERROR, "Domain %s not joined! " + "lockres %.*s, master %u\n", + dlm->name, mres->lockname_len, + mres->lockname, mres->master); + dlm_put(dlm); + return -EINVAL; + } + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); real_master = mres->master; diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c index 7e9e5d0ea3bc24a9b8f270497c3319a1f5bd0848..f8f3c73d266476eab2298a15a807d2829398b0ff 100644 --- a/fs/orangefs/namei.c +++ b/fs/orangefs/namei.c @@ -71,8 +71,7 @@ static int orangefs_create(struct inode *dir, get_khandle_from_ino(inode), dentry); - d_instantiate(dentry, inode); - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); ORANGEFS_I(inode)->getattr_time = jiffies - 1; ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; @@ -320,8 +319,7 @@ static int orangefs_symlink(struct inode *dir, "Assigned symlink inode new number of %pU\n", get_khandle_from_ino(inode)); - d_instantiate(dentry, inode); - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); ORANGEFS_I(inode)->getattr_time = jiffies - 1; ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; @@ -385,8 +383,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode "Assigned dir inode new number of %pU\n", get_khandle_from_ino(inode)); - d_instantiate(dentry, inode); - unlock_new_inode(inode); + d_instantiate_new(dentry, inode); orangefs_set_timeout(dentry); ORANGEFS_I(inode)->getattr_time = jiffies - 1; ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; diff --git a/fs/proc/array.c b/fs/proc/array.c index e6094a15ef3078490002e41605384040a05077a8..4ac811e1a26c18742a957908a85daba596d0c8a3 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -85,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -347,6 +348,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) #ifdef CONFIG_SECCOMP seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); #endif + seq_printf(m, "\nSpeculation_Store_Bypass:\t"); + switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { + case -EINVAL: + seq_printf(m, "unknown"); + break; + case PR_SPEC_NOT_AFFECTED: + seq_printf(m, "not vulnerable"); + break; + case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: + seq_printf(m, "thread force mitigated"); + break; + case PR_SPEC_PRCTL | PR_SPEC_DISABLE: + seq_printf(m, "thread mitigated"); + break; + case PR_SPEC_PRCTL | PR_SPEC_ENABLE: + seq_printf(m, "thread vulnerable"); + break; + case PR_SPEC_DISABLE: + seq_printf(m, "globally mitigated"); + break; + default: + seq_printf(m, "vulnerable"); + break; + } seq_putc(m, '\n'); } diff --git a/fs/proc/base.c b/fs/proc/base.c index ca189083ab2091e4d63f0e13b85a3b53b23bab0e..292cf94949cf260a56bbf9f222795a445e3d03d6 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -264,7 +264,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, * Inherently racy -- command line shares address space * with code and data. */ - rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); + rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON); if (rv <= 0) goto out_free_page; @@ -282,7 +282,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, int nr_read; _count = min3(count, len, PAGE_SIZE); - nr_read = access_remote_vm(mm, p, page, _count, 0); + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); if (nr_read < 0) rv = nr_read; if (nr_read <= 0) @@ -328,7 +328,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, bool final; _count = min3(count, len, PAGE_SIZE); - nr_read = access_remote_vm(mm, p, page, _count, 0); + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON); if (nr_read < 0) rv = nr_read; if (nr_read <= 0) @@ -947,7 +947,7 @@ static ssize_t environ_read(struct file *file, char __user *buf, max_len = min_t(size_t, PAGE_SIZE, count); this_len = min(max_len, this_len); - retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); + retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON); if (retval <= 0) { ret = retval; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index c5cbbdff3c3d683df9b651c457e54cc6ab14f6c4..82ac5f682b73a88352f44170dcd338f9fd8cfbdb 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -707,7 +707,10 @@ static bool proc_sys_link_fill_cache(struct file *file, struct ctl_table *table) { bool ret = true; + head = sysctl_head_grab(head); + if (IS_ERR(head)) + return false; if (S_ISLNK(table->mode)) { /* It is not an error if we can not follow the link ignore it */ diff --git a/fs/proc/uid.c b/fs/proc/uid.c index 9e15be510d7102d2a312c9f437cc0314d7489442..6a096d25109d1cf14943c3ab73a1ac767af95e3e 100644 --- a/fs/proc/uid.c +++ b/fs/proc/uid.c @@ -174,7 +174,7 @@ static int proc_uid_base_readdir(struct file *file, struct dir_context *ctx) return 0; for (u = uid_base_stuff + (ctx->pos - 2); - u <= uid_base_stuff + nents - 1; u++) { + u < uid_base_stuff + nents; u++) { if (!proc_fill_cache(file, ctx, u->name, u->len, proc_uident_instantiate, NULL, u)) break; diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index bd39a998843da62db4b634800813c3de29d969c7..5089dac0266020d705e54dcb8f06ca1a998ccec2 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(dir); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); retval = journal_end(&th); out_failed: @@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode goto out_failed; } - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); retval = journal_end(&th); out_failed: @@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode /* the above add_entry did not update dir's stat data */ reiserfs_update_sd(&th, dir); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); retval = journal_end(&th); out_failed: reiserfs_write_unlock(dir->i_sb); @@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir, goto out_failed; } - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); retval = journal_end(&th); out_failed: reiserfs_write_unlock(parent_dir->i_sb); diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c index 166f14b2400bff12d22df44918186b93eb91766d..776d549b397b3a3a5d838ea5e3d246119cdd0189 100644 --- a/fs/sdcardfs/dentry.c +++ b/fs/sdcardfs/dentry.c @@ -51,7 +51,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) * whether the base obbpath has been changed or not */ if (is_obbpath_invalid(dentry)) { - d_drop(dentry); return 0; } @@ -65,7 +64,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) { err = lower_dentry->d_op->d_revalidate(lower_dentry, flags); if (err == 0) { - d_drop(dentry); goto out; } } @@ -73,14 +71,12 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) spin_lock(&lower_dentry->d_lock); if (d_unhashed(lower_dentry)) { spin_unlock(&lower_dentry->d_lock); - d_drop(dentry); err = 0; goto out; } spin_unlock(&lower_dentry->d_lock); if (parent_lower_dentry != lower_cur_parent_dentry) { - d_drop(dentry); err = 0; goto out; } @@ -94,7 +90,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) } if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) { - __d_drop(dentry); err = 0; } @@ -113,7 +108,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags) if (inode) { data = top_data_get(SDCARDFS_I(inode)); if (!data || data->abandoned) { - d_drop(dentry); err = 0; } if (data) diff --git a/fs/super.c b/fs/super.c index 4cac2efc81cb59c9e21d09f93fb123166b804cf3..589f919ec20b56e369c13cb136607600a48abe61 100644 --- a/fs/super.c +++ b/fs/super.c @@ -120,13 +120,23 @@ static unsigned long super_cache_count(struct shrinker *shrink, sb = container_of(shrink, struct super_block, s_shrink); /* - * Don't call trylock_super as it is a potential - * scalability bottleneck. The counts could get updated - * between super_cache_count and super_cache_scan anyway. - * Call to super_cache_count with shrinker_rwsem held - * ensures the safety of call to list_lru_shrink_count() and - * s_op->nr_cached_objects(). + * We don't call trylock_super() here as it is a scalability bottleneck, + * so we're exposed to partial setup state. The shrinker rwsem does not + * protect filesystem operations backing list_lru_shrink_count() or + * s_op->nr_cached_objects(). Counts can change between + * super_cache_count and super_cache_scan, so we really don't need locks + * here. + * + * However, if we are currently mounting the superblock, the underlying + * filesystem might be in a state of partial construction and hence it + * is dangerous to access it. trylock_super() uses a SB_BORN check to + * avoid this situation, so do the same here. The memory barrier is + * matched with the one in mount_fs() as we don't hold locks here. */ + if (!(sb->s_flags & SB_BORN)) + return 0; + smp_rmb(); + if (sb->s_op && sb->s_op->nr_cached_objects) total_objects = sb->s_op->nr_cached_objects(sb, sc); @@ -1250,6 +1260,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, struct vfsm sb = root->d_sb; BUG_ON(!sb); WARN_ON(!sb->s_bdi); + + /* + * Write barrier is for super_cache_count(). We place it before setting + * SB_BORN as the data dependency between the two functions is the + * superblock structure contents that we just set up, not the SB_BORN + * flag. + */ + smp_wmb(); sb->s_flags |= SB_BORN; error = security_sb_kern_mount(sb, flags, secdata); diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index aab87340d3de8883c12bd888056d51efe454b9c8..16f03d9929e5ed7d90366992726793db16d1177d 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -175,7 +175,6 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, int lnum, int offs) { - lnum = lnum; dbg_scan("stop scanning LEB %d at offset %d", lnum, offs); ubifs_assert(offs % c->min_io_size == 0); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 885198dfd9f8ea8fa923edfcfccb3254802a3086..041bf34f781f8a5e3d0ccbbff99a4af7d59a7645 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -621,8 +621,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode) if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; } @@ -732,8 +731,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) inc_nlink(dir); dir->i_ctime = dir->i_mtime = current_time(dir); mark_inode_dirty(dir); - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); diff --git a/fs/udf/super.c b/fs/udf/super.c index 08bf097507f6dc5df6427a871050f888956b9243..9b0d6562d0a1077937bbba46b489895a2727c00e 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -2091,8 +2091,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) bool lvid_open = false; uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); - uopt.uid = INVALID_UID; - uopt.gid = INVALID_GID; + /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ + uopt.uid = make_kuid(current_user_ns(), overflowuid); + uopt.gid = make_kgid(current_user_ns(), overflowgid); uopt.umask = 0; uopt.fmode = UDF_INVALID_MODE; uopt.dmode = UDF_INVALID_MODE; diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 32545cd00cebf0fb50a403a7275efc194ef6bb4f..d5f43ba76c598dea592339f8926327401b181483 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ufs_add_link(dentry, inode); if (!err) { - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; } inode_dec_link_count(inode); @@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) if (err) goto out_fail; - unlock_new_inode(inode); - d_instantiate(dentry, inode); + d_instantiate_new(dentry, inode); return 0; out_fail: diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index f965ce832bc0561acb50a67ce28373fc7fb0b091..516e0c57cf9c4257e8e3389bc98ec199840dd814 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -52,6 +52,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *); STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *); +/* + * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in + * the beginning of the block for a proper header with the location information + * and CRC. + */ +unsigned int +xfs_agfl_size( + struct xfs_mount *mp) +{ + unsigned int size = mp->m_sb.sb_sectsize; + + if (xfs_sb_version_hascrc(&mp->m_sb)) + size -= sizeof(struct xfs_agfl); + + return size / sizeof(xfs_agblock_t); +} + unsigned int xfs_refc_block( struct xfs_mount *mp) @@ -540,7 +557,7 @@ xfs_agfl_verify( if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) return false; - for (i = 0; i < XFS_AGFL_SIZE(mp); i++) { + for (i = 0; i < xfs_agfl_size(mp); i++) { if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK && be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks) return false; @@ -2039,6 +2056,93 @@ xfs_alloc_space_available( return true; } +/* + * Check the agfl fields of the agf for inconsistency or corruption. The purpose + * is to detect an agfl header padding mismatch between current and early v5 + * kernels. This problem manifests as a 1-slot size difference between the + * on-disk flcount and the active [first, last] range of a wrapped agfl. This + * may also catch variants of agfl count corruption unrelated to padding. Either + * way, we'll reset the agfl and warn the user. + * + * Return true if a reset is required before the agfl can be used, false + * otherwise. + */ +static bool +xfs_agfl_needs_reset( + struct xfs_mount *mp, + struct xfs_agf *agf) +{ + uint32_t f = be32_to_cpu(agf->agf_flfirst); + uint32_t l = be32_to_cpu(agf->agf_fllast); + uint32_t c = be32_to_cpu(agf->agf_flcount); + int agfl_size = xfs_agfl_size(mp); + int active; + + /* no agfl header on v4 supers */ + if (!xfs_sb_version_hascrc(&mp->m_sb)) + return false; + + /* + * The agf read verifier catches severe corruption of these fields. + * Repeat some sanity checks to cover a packed -> unpacked mismatch if + * the verifier allows it. + */ + if (f >= agfl_size || l >= agfl_size) + return true; + if (c > agfl_size) + return true; + + /* + * Check consistency between the on-disk count and the active range. An + * agfl padding mismatch manifests as an inconsistent flcount. + */ + if (c && l >= f) + active = l - f + 1; + else if (c) + active = agfl_size - f + l + 1; + else + active = 0; + + return active != c; +} + +/* + * Reset the agfl to an empty state. Ignore/drop any existing blocks since the + * agfl content cannot be trusted. Warn the user that a repair is required to + * recover leaked blocks. + * + * The purpose of this mechanism is to handle filesystems affected by the agfl + * header padding mismatch problem. A reset keeps the filesystem online with a + * relatively minor free space accounting inconsistency rather than suffer the + * inevitable crash from use of an invalid agfl block. + */ +static void +xfs_agfl_reset( + struct xfs_trans *tp, + struct xfs_buf *agbp, + struct xfs_perag *pag) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); + + ASSERT(pag->pagf_agflreset); + trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_); + + xfs_warn(mp, + "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. " + "Please unmount and run xfs_repair.", + pag->pag_agno, pag->pagf_flcount); + + agf->agf_flfirst = 0; + agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1); + agf->agf_flcount = 0; + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST | + XFS_AGF_FLCOUNT); + + pag->pagf_flcount = 0; + pag->pagf_agflreset = false; +} + /* * Decide whether to use this allocation group for this allocation. * If so, fix up the btree freelist's size. @@ -2100,6 +2204,10 @@ xfs_alloc_fix_freelist( } } + /* reset a padding mismatched agfl before final free space check */ + if (pag->pagf_agflreset) + xfs_agfl_reset(tp, agbp, pag); + /* If there isn't enough total space or single-extent, reject it. */ need = xfs_alloc_min_freelist(mp, pag); if (!xfs_alloc_space_available(args, need, flags)) @@ -2252,10 +2360,11 @@ xfs_alloc_get_freelist( bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); be32_add_cpu(&agf->agf_flfirst, 1); xfs_trans_brelse(tp, agflbp); - if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) + if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) agf->agf_flfirst = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); pag->pagf_flcount--; @@ -2363,10 +2472,11 @@ xfs_alloc_put_freelist( be32_to_cpu(agf->agf_seqno), &agflbp))) return error; be32_add_cpu(&agf->agf_fllast, 1); - if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) + if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp)) agf->agf_fllast = 0; pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); pag->pagf_flcount++; @@ -2381,7 +2491,7 @@ xfs_alloc_put_freelist( xfs_alloc_log_agf(tp, agbp, logflags); - ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); + ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)); agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp); blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; @@ -2414,9 +2524,9 @@ xfs_agf_verify( if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && - be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && - be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && - be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp))) + be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) && + be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) && + be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return false; if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || @@ -2572,6 +2682,7 @@ xfs_alloc_read_agf( pag->pagb_count = 0; pag->pagb_tree = RB_ROOT; pag->pagf_init = 1; + pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf); } #ifdef DEBUG else if (!XFS_FORCED_SHUTDOWN(mp)) { diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index ef26edc2e938349b918157d20557fac6ab9d10b0..346ba8ab68b57e25ba4282e62309636667e45815 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -26,6 +26,8 @@ struct xfs_trans; extern struct workqueue_struct *xfs_alloc_wq; +unsigned int xfs_agfl_size(struct xfs_mount *mp); + /* * Freespace allocation types. Argument to xfs_alloc_[v]extent. */ diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 23229f0c5b15d95f1a8be937902eec725ea5e4d0..ed4481b2f1131f5c2259420367ca5a85757e6bbd 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -798,24 +798,13 @@ typedef struct xfs_agi { &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \ (__be32 *)(bp)->b_addr) -/* - * Size of the AGFL. For CRC-enabled filesystes we steal a couple of - * slots in the beginning of the block for a proper header with the - * location information and CRC. - */ -#define XFS_AGFL_SIZE(mp) \ - (((mp)->m_sb.sb_sectsize - \ - (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \ - sizeof(struct xfs_agfl) : 0)) / \ - sizeof(xfs_agblock_t)) - typedef struct xfs_agfl { __be32 agfl_magicnum; __be32 agfl_seqno; uuid_t agfl_uuid; __be64 agfl_lsn; __be32 agfl_crc; - __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */ + __be32 agfl_bno[]; /* actually xfs_agfl_size(mp) */ } __attribute__((packed)) xfs_agfl_t; #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc) diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index b2cde54261822a514e5bbabb6d10ab3d4a1f9806..7b68e6c9a474ba367cbebe3ebe550f8e3c22450b 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -50,19 +50,19 @@ xfs_trim_extents( pag = xfs_perag_get(mp, agno); - error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); - if (error || !agbp) - goto out_put_perag; - - cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); - /* * Force out the log. This means any transactions that might have freed - * space before we took the AGF buffer lock are now on disk, and the + * space before we take the AGF buffer lock are now on disk, and the * volatile disk cache is flushed. */ xfs_log_force(mp, XFS_LOG_SYNC); + error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); + if (error || !agbp) + goto out_put_perag; + + cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); + /* * Look up the longest btree in the AGF and start with it. */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8f22fc579dbba4abf9b609040802d24e9cf2f732..40783a313df957b3cd4ce45f83864ceb87dc1162 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -294,7 +294,7 @@ xfs_growfs_data_private( } agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); - for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) + for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++) agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); error = xfs_bwrite(bp); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index e0792d036be202d18bd479b5c6cfff904f996778..d359a88ea2490079fa9158f37ee766b5ab475f68 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -353,6 +353,7 @@ typedef struct xfs_perag { char pagi_inodeok; /* The agi is ok for inodes */ uint8_t pagf_levels[XFS_BTNUM_AGF]; /* # of levels in bno & cnt btree */ + bool pagf_agflreset; /* agfl requires reset before use */ uint32_t pagf_flcount; /* count of blocks in freelist */ xfs_extlen_t pagf_freeblks; /* total free blocks */ xfs_extlen_t pagf_longest; /* longest free space */ diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index bb5514688d470b046e0f125b2832ba2d30b3d34d..06bc87369632f33f8081a44166d16ed0d49d5f0e 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -1513,7 +1513,7 @@ TRACE_EVENT(xfs_extent_busy_trim, __entry->tlen) ); -TRACE_EVENT(xfs_agf, +DECLARE_EVENT_CLASS(xfs_agf_class, TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, unsigned long caller_ip), TP_ARGS(mp, agf, flags, caller_ip), @@ -1569,6 +1569,13 @@ TRACE_EVENT(xfs_agf, __entry->longest, (void *)__entry->caller_ip) ); +#define DEFINE_AGF_EVENT(name) \ +DEFINE_EVENT(xfs_agf_class, name, \ + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \ + unsigned long caller_ip), \ + TP_ARGS(mp, agf, flags, caller_ip)) +DEFINE_AGF_EVENT(xfs_agf); +DEFINE_AGF_EVENT(xfs_agfl_reset); TRACE_EVENT(xfs_free_extent, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 49be4bba1e9641de9713ebc06532e581f934e40b..34a028a7bcc53c095a2028f2d0cee89cd3835292 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -244,4 +244,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_inc_not_zero(l) \ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) +#define atomic_long_cond_read_acquire(v, c) \ + ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index af2cc94a61bf9e1e7f4e29d6bd1fd1ad997bbf5e..ae1a33aa8955af39bb6bb2255bd31e5ecbfa188c 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -50,6 +50,7 @@ struct bug_entry { #ifndef HAVE_ARCH_BUG #define BUG() do { \ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + barrier_before_unreachable(); \ panic("BUG!"); \ } while (0) #endif diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7d026bf277131f7bc4c79529cc26488e3a8281ee..c39a93a6d91d14ce00a636385736b9dc895bcd10 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -26,30 +26,17 @@ /* * Writer states & reader shift and bias. - * - * | +0 | +1 | +2 | +3 | - * ----+----+----+----+----+ - * LE | 78 | 56 | 34 | 12 | 0x12345678 - * ----+----+----+----+----+ - * | wr | rd | - * +----+----+----+----+ - * - * ----+----+----+----+----+ - * BE | 12 | 34 | 56 | 78 | 0x12345678 - * ----+----+----+----+----+ - * | rd | wr | - * +----+----+----+----+ */ -#define _QW_WAITING 1 /* A writer is waiting */ -#define _QW_LOCKED 0xff /* A writer holds the lock */ -#define _QW_WMASK 0xff /* Writer mask */ -#define _QR_SHIFT 8 /* Reader count shift */ +#define _QW_WAITING 0x100 /* A writer is waiting */ +#define _QW_LOCKED 0x0ff /* A writer holds the lock */ +#define _QW_WMASK 0x1ff /* Writer mask */ +#define _QR_SHIFT 9 /* Reader count shift */ #define _QR_BIAS (1U << _QR_SHIFT) /* * External function declarations */ -extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts); +extern void queued_read_lock_slowpath(struct qrwlock *lock); extern void queued_write_lock_slowpath(struct qrwlock *lock); /** @@ -118,7 +105,7 @@ static inline void queued_read_lock(struct qrwlock *lock) return; /* The slowpath will decrement the reader count, if necessary. */ - queued_read_lock_slowpath(lock, cnts); + queued_read_lock_slowpath(lock); } /** @@ -146,23 +133,13 @@ static inline void queued_read_unlock(struct qrwlock *lock) (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); } -/** - * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock - * @lock : Pointer to queue rwlock structure - * Return: the write byte address of a queue rwlock - */ -static inline u8 *__qrwlock_write_byte(struct qrwlock *lock) -{ - return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN); -} - /** * queued_write_unlock - release write lock of a queue rwlock * @lock : Pointer to queue rwlock structure */ static inline void queued_write_unlock(struct qrwlock *lock) { - smp_store_release(__qrwlock_write_byte(lock), 0); + smp_store_release(&lock->wlocked, 0); } /* diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h index d93573eff16294aa1a9efa0a79a1f428ddd08c23..137ecdd16daa01c9ced213a9ee38de7b9f16181a 100644 --- a/include/asm-generic/qrwlock_types.h +++ b/include/asm-generic/qrwlock_types.h @@ -10,12 +10,23 @@ */ typedef struct qrwlock { - atomic_t cnts; + union { + atomic_t cnts; + struct { +#ifdef __LITTLE_ENDIAN + u8 wlocked; /* Locked for write? */ + u8 __lstate[3]; +#else + u8 __lstate[3]; + u8 wlocked; /* Locked for write? */ +#endif + }; + }; arch_spinlock_t wait_lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { \ - .cnts = ATOMIC_INIT(0), \ + { .cnts = ATOMIC_INIT(0), }, \ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ } diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index bb7250f644d7ba36145582aff3b800af8ac444dd..1318968793422e84fa8b825215cb47a663788b21 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -708,6 +708,11 @@ struct drm_cmdline_mode { * @hdr_avg_luminance: desired avg luminance obtained from HDR block * @hdr_min_luminance: desired min luminance obtained from HDR block * @hdr_supported: does the sink support HDR content + * @max_tmds_char: indicates the maximum TMDS Character Rate supported + * @scdc_present: when set the sink supports SCDC functionality + * @rr_capable: when set the sink is capable of initiating an SCDC read request + * @supports_scramble: when set the sink supports less than 340Mcsc scrambling + * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h (DRM_EDID_3D_*) * @edid_corrupt: indicates whether the last read EDID was corrupt * @debugfs_entry: debugfs directory for this connector * @has_tile: is this connector connected to a tiled monitor @@ -892,6 +897,13 @@ struct drm_connector { u32 hdr_min_luminance; bool hdr_supported; + /* EDID bits HDMI 2.0 */ + int max_tmds_char; /* in Mcsc */ + bool scdc_present; + bool rr_capable; + bool supports_scramble; + int flags_3d; + /* Flag for raw EDID header corruption - used in Displayport * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 */ diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index a992434ded999d1a6b77cf34dd26a2c0f5a8fbd9..44d22ad182dd8c4eebb9519ed46041f9b2d61ef6 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -279,6 +279,11 @@ struct detailed_timing { #define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) +/* HDMI 2.0 */ +#define DRM_EDID_3D_INDEPENDENT_VIEW (1 << 2) +#define DRM_EDID_3D_DUAL_VIEW (1 << 1) +#define DRM_EDID_3D_OSD_DISPARITY (1 << 0) + struct edid { u8 header[8]; /* Vendor & product info */ diff --git a/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h b/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..cc9d2cfcfec0cd66f7f4457a51be5519ae3c5d0b --- /dev/null +++ b/include/dt-bindings/clock/qcom,camcc-sdmmagpie.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SDMMAGPIE_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SDMMAGPIE_H + +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_AXI_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 6 +#define CAM_CC_CAMNOC_DCD_XO_CLK 7 +#define CAM_CC_CCI_0_CLK 8 +#define CAM_CC_CCI_0_CLK_SRC 9 +#define CAM_CC_CCI_1_CLK 10 +#define CAM_CC_CCI_1_CLK_SRC 11 +#define CAM_CC_CORE_AHB_CLK 12 +#define CAM_CC_CPAS_AHB_CLK 13 +#define CAM_CC_CPHY_RX_CLK_SRC 14 +#define CAM_CC_CSI0PHYTIMER_CLK 15 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16 +#define CAM_CC_CSI1PHYTIMER_CLK 17 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18 +#define CAM_CC_CSI2PHYTIMER_CLK 19 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20 +#define CAM_CC_CSI3PHYTIMER_CLK 21 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22 +#define CAM_CC_CSIPHY0_CLK 23 +#define CAM_CC_CSIPHY1_CLK 24 +#define CAM_CC_CSIPHY2_CLK 25 +#define CAM_CC_CSIPHY3_CLK 26 +#define CAM_CC_FAST_AHB_CLK_SRC 27 +#define CAM_CC_FD_CORE_CLK 28 +#define CAM_CC_FD_CORE_CLK_SRC 29 +#define CAM_CC_FD_CORE_UAR_CLK 30 +#define CAM_CC_GDSC_CLK 31 +#define CAM_CC_ICP_AHB_CLK 32 +#define CAM_CC_ICP_CLK 33 +#define CAM_CC_ICP_CLK_SRC 34 +#define CAM_CC_IFE_0_AXI_CLK 35 +#define CAM_CC_IFE_0_CLK 36 +#define CAM_CC_IFE_0_CLK_SRC 37 +#define CAM_CC_IFE_0_CPHY_RX_CLK 38 +#define CAM_CC_IFE_0_CSID_CLK 39 +#define CAM_CC_IFE_0_CSID_CLK_SRC 40 +#define CAM_CC_IFE_0_DSP_CLK 41 +#define CAM_CC_IFE_1_AXI_CLK 42 +#define CAM_CC_IFE_1_CLK 43 +#define CAM_CC_IFE_1_CLK_SRC 44 +#define CAM_CC_IFE_1_CPHY_RX_CLK 45 +#define CAM_CC_IFE_1_CSID_CLK 46 +#define CAM_CC_IFE_1_CSID_CLK_SRC 47 +#define CAM_CC_IFE_1_DSP_CLK 48 +#define CAM_CC_IFE_LITE_CLK 49 +#define CAM_CC_IFE_LITE_CLK_SRC 50 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51 +#define CAM_CC_IFE_LITE_CSID_CLK 52 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53 +#define CAM_CC_IPE_0_AHB_CLK 54 +#define CAM_CC_IPE_0_AREG_CLK 55 +#define CAM_CC_IPE_0_AXI_CLK 56 +#define CAM_CC_IPE_0_CLK 57 +#define CAM_CC_IPE_0_CLK_SRC 58 +#define CAM_CC_IPE_1_AHB_CLK 59 +#define CAM_CC_IPE_1_AREG_CLK 60 +#define CAM_CC_IPE_1_AXI_CLK 61 +#define CAM_CC_IPE_1_CLK 62 +#define CAM_CC_JPEG_CLK 63 +#define CAM_CC_JPEG_CLK_SRC 64 +#define CAM_CC_LRME_CLK 65 +#define CAM_CC_LRME_CLK_SRC 66 +#define CAM_CC_MCLK0_CLK 67 +#define CAM_CC_MCLK0_CLK_SRC 68 +#define CAM_CC_MCLK1_CLK 69 +#define CAM_CC_MCLK1_CLK_SRC 70 +#define CAM_CC_MCLK2_CLK 71 +#define CAM_CC_MCLK2_CLK_SRC 72 +#define CAM_CC_MCLK3_CLK 73 +#define CAM_CC_MCLK3_CLK_SRC 74 +#define CAM_CC_PLL0 75 +#define CAM_CC_PLL0_OUT_EVEN 76 +#define CAM_CC_PLL0_OUT_ODD 77 +#define CAM_CC_PLL1 78 +#define CAM_CC_PLL1_OUT_EVEN 79 +#define CAM_CC_PLL2 80 +#define CAM_CC_PLL2_OUT_AUX 81 +#define CAM_CC_PLL2_OUT_MAIN 82 +#define CAM_CC_PLL3 83 +#define CAM_CC_PLL3_OUT_EVEN 84 +#define CAM_CC_PLL4 85 +#define CAM_CC_PLL4_OUT_EVEN 86 +#define CAM_CC_PLL_TEST_CLK 87 +#define CAM_CC_QDSS_DEBUG_CLK 88 +#define CAM_CC_QDSS_DEBUG_CLK_SRC 89 +#define CAM_CC_QDSS_DEBUG_XO_CLK 90 +#define CAM_CC_SLEEP_CLK 91 +#define CAM_CC_SLEEP_CLK_SRC 92 +#define CAM_CC_SLOW_AHB_CLK_SRC 93 +#define CAM_CC_SPDM_BPS_CLK 94 +#define CAM_CC_SPDM_IFE_0_CLK 95 +#define CAM_CC_SPDM_IFE_0_CSID_CLK 96 +#define CAM_CC_SPDM_IPE_0_CLK 97 +#define CAM_CC_SPDM_IPE_1_CLK 98 +#define CAM_CC_SPDM_JPEG_CLK 99 +#define CAM_CC_XO_CLK_SRC 100 + +#define BPS_GDSC 0 +#define IFE_0_GDSC 1 +#define IFE_1_GDSC 2 +#define IPE_0_GDSC 3 +#define IPE_1_GDSC 4 +#define TITAN_TOP_GDSC 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,cpu-qcs405.h b/include/dt-bindings/clock/qcom,cpu-qcs405.h index 16ab3845695c15a219d17ccea0a38badbb874fde..b207e3ec76b9312dcc79387a1af2c7bba87e5dc0 100644 --- a/include/dt-bindings/clock/qcom,cpu-qcs405.h +++ b/include/dt-bindings/clock/qcom,cpu-qcs405.h @@ -11,9 +11,10 @@ * GNU General Public License for more details. */ -#ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_QCS405_H -#define _DT_BINDINGS_CLK_MSM_CPU_CC_QCS405_H +#ifndef _DT_BINDINGS_CLK_QCOM_CPU_QCS405_H +#define _DT_BINDINGS_CLK_QCOM_CPU_QCS405_H -#define A53_PERF_CL_CLK 0 +#define APCS_CPU_PLL 0 +#define APCS_MUX_CLK 1 #endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sdmmagpie.h b/include/dt-bindings/clock/qcom,dispcc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..0a119e75cf85d624d060796d178dc89c47ef8dd1 --- /dev/null +++ b/include/dt-bindings/clock/qcom,dispcc-sdmmagpie.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SDMMAGPIE_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SDMMAGPIE_H + +#define DISP_CC_DEBUG_CLK 0 +#define DISP_CC_MDSS_AHB_CLK 1 +#define DISP_CC_MDSS_AHB_CLK_SRC 2 +#define DISP_CC_MDSS_BYTE0_CLK 3 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 5 +#define DISP_CC_MDSS_BYTE1_CLK 6 +#define DISP_CC_MDSS_BYTE1_CLK_SRC 7 +#define DISP_CC_MDSS_BYTE1_INTF_CLK 8 +#define DISP_CC_MDSS_DP_AUX_CLK 9 +#define DISP_CC_MDSS_DP_AUX_CLK_SRC 10 +#define DISP_CC_MDSS_DP_CRYPTO_CLK 11 +#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 12 +#define DISP_CC_MDSS_DP_LINK_CLK 13 +#define DISP_CC_MDSS_DP_LINK_CLK_SRC 14 +#define DISP_CC_MDSS_DP_LINK_INTF_CLK 15 +#define DISP_CC_MDSS_DP_PIXEL1_CLK 16 +#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 17 +#define DISP_CC_MDSS_DP_PIXEL_CLK 18 +#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 19 +#define DISP_CC_MDSS_ESC0_CLK 20 +#define DISP_CC_MDSS_ESC0_CLK_SRC 21 +#define DISP_CC_MDSS_ESC1_CLK 22 +#define DISP_CC_MDSS_ESC1_CLK_SRC 23 +#define DISP_CC_MDSS_MDP_CLK 24 +#define DISP_CC_MDSS_MDP_CLK_SRC 25 +#define DISP_CC_MDSS_MDP_LUT_CLK 26 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 27 +#define DISP_CC_MDSS_PCLK0_CLK 28 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 29 +#define DISP_CC_MDSS_PCLK1_CLK 30 +#define DISP_CC_MDSS_PCLK1_CLK_SRC 31 +#define DISP_CC_MDSS_ROT_CLK 32 +#define DISP_CC_MDSS_ROT_CLK_SRC 33 +#define DISP_CC_MDSS_RSCC_AHB_CLK 34 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 35 +#define DISP_CC_MDSS_VSYNC_CLK 36 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 37 +#define DISP_CC_PLL0 38 +#define DISP_CC_PLL_TEST_CLK 39 +#define DISP_CC_XO_CLK 40 +#define DISP_CC_XO_CLK_SRC 41 + +#define MDSS_CORE_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-qcs405.h b/include/dt-bindings/clock/qcom,gcc-qcs405.h index 335c91724e02f1769b860f50a32f71b14470c594..6c2bed052f0430f878ddbedae73bd8c0b63f6431 100644 --- a/include/dt-bindings/clock/qcom,gcc-qcs405.h +++ b/include/dt-bindings/clock/qcom,gcc-qcs405.h @@ -143,6 +143,17 @@ #define GPLL6_OUT_AUX 126 #define MDSS_MDP_VOTE_CLK 127 #define MDSS_ROTATOR_VOTE_CLK 128 +#define GCC_BIMC_GPU_CLK 129 +#define GCC_GTCU_AHB_CLK 130 +#define GCC_GFX_TCU_CLK 131 +#define GCC_GFX_TBU_CLK 132 +#define GCC_SMMU_CFG_CLK 133 +#define GCC_APSS_TCU_CLK 134 +#define GCC_CRYPTO_AHB_CLK 135 +#define GCC_CRYPTO_AXI_CLK 136 +#define GCC_CRYPTO_CLK 137 +#define GCC_MDP_TBU_CLK 138 +#define GCC_QDSS_DAP_CLK 139 #define GCC_GENI_IR_BCR 0 #define GCC_USB_HS_BCR 1 diff --git a/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h b/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..823506bea7b7fdcff1324aec1c68688c53e5047a --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdmmagpie.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDMMAGPIE_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SDMMAGPIE_H + +#define GCC_AGGRE_NOC_PCIE_TBU_CLK 0 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 1 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 2 +#define GCC_APC_VS_CLK 3 +#define GCC_BOOT_ROM_AHB_CLK 4 +#define GCC_CAMERA_AHB_CLK 5 +#define GCC_CAMERA_HF_AXI_CLK 6 +#define GCC_CAMERA_SF_AXI_CLK 7 +#define GCC_CAMERA_XO_CLK 8 +#define GCC_CE1_AHB_CLK 9 +#define GCC_CE1_AXI_CLK 10 +#define GCC_CE1_CLK 11 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 12 +#define GCC_CPUSS_AHB_CLK 13 +#define GCC_CPUSS_AHB_CLK_SRC 14 +#define GCC_CPUSS_GNOC_CLK 15 +#define GCC_CPUSS_RBCPR_CLK 16 +#define GCC_CPUSS_RBCPR_CLK_SRC 17 +#define GCC_DDRSS_GPU_AXI_CLK 18 +#define GCC_DISP_AHB_CLK 19 +#define GCC_DISP_GPLL0_CLK_SRC 20 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 21 +#define GCC_DISP_HF_AXI_CLK 22 +#define GCC_DISP_SF_AXI_CLK 23 +#define GCC_DISP_XO_CLK 24 +#define GCC_GP1_CLK 25 +#define GCC_GP1_CLK_SRC 26 +#define GCC_GP2_CLK 27 +#define GCC_GP2_CLK_SRC 28 +#define GCC_GP3_CLK 29 +#define GCC_GP3_CLK_SRC 30 +#define GCC_GPU_CFG_AHB_CLK 31 +#define GCC_GPU_GPLL0_CLK_SRC 32 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 33 +#define GCC_GPU_MEMNOC_GFX_CLK 34 +#define GCC_GPU_SNOC_DVM_GFX_CLK 35 +#define GCC_GPU_VS_CLK 36 +#define GCC_MSS_AXIS2_CLK 37 +#define GCC_MSS_CFG_AHB_CLK 38 +#define GCC_MSS_GPLL0_DIV_CLK_SRC 39 +#define GCC_MSS_MFAB_AXIS_CLK 40 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 41 +#define GCC_MSS_SNOC_AXI_CLK 42 +#define GCC_MSS_VS_CLK 43 +#define GCC_NPU_AXI_CLK 44 +#define GCC_NPU_CFG_AHB_CLK 45 +#define GCC_NPU_GPLL0_CLK_SRC 46 +#define GCC_NPU_GPLL0_DIV_CLK_SRC 47 +#define GCC_PCIE_0_AUX_CLK 48 +#define GCC_PCIE_0_AUX_CLK_SRC 49 +#define GCC_PCIE_0_CFG_AHB_CLK 50 +#define GCC_PCIE_0_MSTR_AXI_CLK 51 +#define GCC_PCIE_0_PIPE_CLK 52 +#define GCC_PCIE_0_SLV_AXI_CLK 53 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 54 +#define GCC_PCIE_PHY_AUX_CLK 55 +#define GCC_PCIE_PHY_REFGEN_CLK 56 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM2_CLK_SRC 59 +#define GCC_PDM_AHB_CLK 60 +#define GCC_PDM_XO4_CLK 61 +#define GCC_PRNG_AHB_CLK 62 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 63 +#define GCC_QUPV3_WRAP0_CORE_CLK 64 +#define GCC_QUPV3_WRAP0_S0_CLK 65 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 66 +#define GCC_QUPV3_WRAP0_S1_CLK 67 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 68 +#define GCC_QUPV3_WRAP0_S2_CLK 69 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 70 +#define GCC_QUPV3_WRAP0_S3_CLK 71 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 72 +#define GCC_QUPV3_WRAP0_S4_CLK 73 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 74 +#define GCC_QUPV3_WRAP0_S5_CLK 75 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 76 +#define GCC_QUPV3_WRAP0_S6_CLK 77 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 78 +#define GCC_QUPV3_WRAP0_S7_CLK 79 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 80 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 81 +#define GCC_QUPV3_WRAP1_CORE_CLK 82 +#define GCC_QUPV3_WRAP1_S0_CLK 83 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 84 +#define GCC_QUPV3_WRAP1_S1_CLK 85 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 86 +#define GCC_QUPV3_WRAP1_S2_CLK 87 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 88 +#define GCC_QUPV3_WRAP1_S3_CLK 89 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 90 +#define GCC_QUPV3_WRAP1_S4_CLK 91 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 92 +#define GCC_QUPV3_WRAP1_S5_CLK 93 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 94 +#define GCC_QUPV3_WRAP1_S6_CLK 95 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 96 +#define GCC_QUPV3_WRAP1_S7_CLK 97 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 98 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 99 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 100 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 101 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 102 +#define GCC_SDCC1_AHB_CLK 103 +#define GCC_SDCC1_APPS_CLK 104 +#define GCC_SDCC1_APPS_CLK_SRC 105 +#define GCC_SDCC1_ICE_CORE_CLK 106 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 107 +#define GCC_SDCC2_AHB_CLK 108 +#define GCC_SDCC2_APPS_CLK 109 +#define GCC_SDCC2_APPS_CLK_SRC 110 +#define GCC_SDCC4_AHB_CLK 111 +#define GCC_SDCC4_APPS_CLK 112 +#define GCC_SDCC4_APPS_CLK_SRC 113 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 114 +#define GCC_TSIF_AHB_CLK 115 +#define GCC_TSIF_INACTIVITY_TIMERS_CLK 116 +#define GCC_TSIF_REF_CLK 117 +#define GCC_TSIF_REF_CLK_SRC 118 +#define GCC_UFS_PHY_AHB_CLK 119 +#define GCC_UFS_PHY_AXI_CLK 120 +#define GCC_UFS_PHY_AXI_CLK_SRC 121 +#define GCC_UFS_PHY_ICE_CORE_CLK 122 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 123 +#define GCC_UFS_PHY_PHY_AUX_CLK 124 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 125 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 126 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 127 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 128 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 129 +#define GCC_USB30_PRIM_MASTER_CLK 130 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 131 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 132 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 133 +#define GCC_USB30_PRIM_SLEEP_CLK 134 +#define GCC_USB3_PRIM_PHY_AUX_CLK 135 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 136 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 137 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 138 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 139 +#define GCC_VDDA_VS_CLK 140 +#define GCC_VDDCX_VS_CLK 141 +#define GCC_VDDMX_VS_CLK 142 +#define GCC_VIDEO_AHB_CLK 143 +#define GCC_VIDEO_AXI_CLK 144 +#define GCC_VIDEO_XO_CLK 145 +#define GCC_VS_CTRL_AHB_CLK 146 +#define GCC_VS_CTRL_CLK 147 +#define GCC_VS_CTRL_CLK_SRC 148 +#define GCC_VSENSOR_CLK_SRC 149 +#define GPLL0 150 +#define GPLL0_OUT_EVEN 151 +#define GPLL6 152 +#define GPLL7 153 +#define GCC_USB3_PRIM_CLKREF_CLK 154 + +#define PCIE_0_GDSC 0 +#define PCIE_TBU_GDSC 1 +#define UFS_PHY_GDSC 2 +#define USB30_PRIM_GDSC 3 + +#define GCC_PCIE_0_BCR 1 +#define GCC_PCIE_PHY_BCR 2 +#define GCC_UFS_PHY_BCR 3 +#define GCC_USB30_PRIM_BCR 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h b/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..d7160420a7d367418460fb7d389217db11ce325f --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sdmmagpie.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SDMMAGPIE_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SDMMAGPIE_H + +#define GPU_CC_ACD_AHB_CLK 0 +#define GPU_CC_ACD_CXO_CLK 1 +#define GPU_CC_AHB_CLK 2 +#define GPU_CC_CRC_AHB_CLK 3 +#define GPU_CC_CX_APB_CLK 4 +#define GPU_CC_CX_GFX3D_CLK 5 +#define GPU_CC_CX_GFX3D_SLV_CLK 6 +#define GPU_CC_CX_GMU_CLK 7 +#define GPU_CC_CX_SNOC_DVM_CLK 8 +#define GPU_CC_CXO_AON_CLK 9 +#define GPU_CC_CXO_CLK 10 +#define GPU_CC_GMU_CLK_SRC 11 +#define GPU_CC_GX_CXO_CLK 12 +#define GPU_CC_GX_GFX3D_CLK 13 +#define GPU_CC_GX_GFX3D_CLK_SRC 14 +#define GPU_CC_GX_GMU_CLK 15 +#define GPU_CC_GX_VSENSE_CLK 16 +#define GPU_CC_PLL0 17 +#define GPU_CC_PLL0_OUT_EVEN 18 +#define GPU_CC_SLEEP_CLK 19 + +#define CX_GDSC 0 +#define GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,npucc-sdmmagpie.h b/include/dt-bindings/clock/qcom,npucc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..6e9b6c377f5bd048cf4e8ac948a1182483173a3e --- /dev/null +++ b/include/dt-bindings/clock/qcom,npucc-sdmmagpie.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_NPU_CC_SM7150_H +#define _DT_BINDINGS_CLK_QCOM_NPU_CC_SM7150_H + +#define NPU_CC_ARMWIC_CORE_CLK 0 +#define NPU_CC_BTO_CORE_CLK 1 +#define NPU_CC_BWMON_CLK 2 +#define NPU_CC_CAL_DP_CDC_CLK 3 +#define NPU_CC_CAL_DP_CLK 4 +#define NPU_CC_CAL_DP_CLK_SRC 5 +#define NPU_CC_COMP_NOC_AXI_CLK 6 +#define NPU_CC_CONF_NOC_AHB_CLK 7 +#define NPU_CC_NPU_CORE_APB_CLK 8 +#define NPU_CC_NPU_CORE_ATB_CLK 9 +#define NPU_CC_NPU_CORE_CLK 10 +#define NPU_CC_NPU_CORE_CLK_SRC 11 +#define NPU_CC_NPU_CORE_CTI_CLK 12 +#define NPU_CC_NPU_CPC_CLK 13 +#define NPU_CC_NPU_CPC_TIMER_CLK 14 +#define NPU_CC_PERF_CNT_CLK 15 +#define NPU_CC_PLL0 16 +#define NPU_CC_PLL0_OUT_EVEN 17 +#define NPU_CC_PLL_TEST_CLK 18 +#define NPU_CC_QTIMER_CORE_CLK 19 +#define NPU_CC_SLEEP_CLK 20 +#define NPU_CC_XO_CLK 21 + +#define NPU_CORE_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index d3f1bb7383753e00a87c8634d1f3aab50ca528c3..33891f2b7153f65ffb27521302af70adf23a5eef 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -105,31 +105,38 @@ #define RPM_SMD_QPIC_A_CLK 65 #define RPM_SMD_CE1_CLK 66 #define RPM_SMD_CE1_A_CLK 67 -#define PNOC_MSMBUS_CLK 68 -#define PNOC_MSMBUS_A_CLK 69 -#define PNOC_KEEPALIVE_A_CLK 70 -#define SNOC_MSMBUS_CLK 71 -#define SNOC_MSMBUS_A_CLK 72 -#define BIMC_MSMBUS_CLK 73 -#define BIMC_MSMBUS_A_CLK 74 -#define PNOC_USB_CLK 77 -#define PNOC_USB_A_CLK 78 -#define SNOC_USB_CLK 79 -#define SNOC_USB_A_CLK 80 -#define BIMC_USB_CLK 81 -#define BIMC_USB_A_CLK 82 -#define SNOC_WCNSS_A_CLK 83 -#define BIMC_WCNSS_A_CLK 84 -#define MCD_CE1_CLK 85 -#define QCEDEV_CE1_CLK 86 -#define QCRYPTO_CE1_CLK 87 -#define QSEECOM_CE1_CLK 88 -#define SCM_CE1_CLK 89 -#define CXO_SMD_OTG_CLK 90 -#define CXO_SMD_LPM_CLK 91 -#define CXO_SMD_PIL_PRONTO_CLK 92 -#define CXO_SMD_PIL_MSS_CLK 93 -#define CXO_SMD_WLAN_CLK 94 -#define CXO_SMD_PIL_LPASS_CLK 95 +#define RPM_SMD_BIMC_GPU_CLK 68 +#define RPM_SMD_BIMC_GPU_A_CLK 69 +#define RPM_SMD_LN_BB_CLK 70 +#define RPM_SMD_LN_BB_CLK_A 71 +#define RPM_SMD_LN_BB_CLK_PIN 72 +#define RPM_SMD_LN_BB_CLK_A_PIN 73 +#define PNOC_MSMBUS_CLK 74 +#define PNOC_MSMBUS_A_CLK 75 +#define PNOC_KEEPALIVE_A_CLK 76 +#define SNOC_MSMBUS_CLK 77 +#define SNOC_MSMBUS_A_CLK 78 +#define BIMC_MSMBUS_CLK 79 +#define BIMC_MSMBUS_A_CLK 80 +#define PNOC_USB_CLK 81 +#define PNOC_USB_A_CLK 82 +#define SNOC_USB_CLK 83 +#define SNOC_USB_A_CLK 84 +#define BIMC_USB_CLK 85 +#define BIMC_USB_A_CLK 86 +#define SNOC_WCNSS_A_CLK 87 +#define BIMC_WCNSS_A_CLK 88 +#define MCD_CE1_CLK 89 +#define QCEDEV_CE1_CLK 90 +#define QCRYPTO_CE1_CLK 91 +#define QSEECOM_CE1_CLK 92 +#define SCM_CE1_CLK 93 +#define CXO_SMD_OTG_CLK 94 +#define CXO_SMD_LPM_CLK 95 +#define CXO_SMD_PIL_PRONTO_CLK 96 +#define CXO_SMD_PIL_MSS_CLK 97 +#define CXO_SMD_WLAN_CLK 98 +#define CXO_SMD_PIL_LPASS_CLK 99 +#define CXO_SMD_PIL_CDSP_CLK 100 #endif diff --git a/include/dt-bindings/clock/qcom,videocc-sdmmagpie.h b/include/dt-bindings/clock/qcom,videocc-sdmmagpie.h new file mode 100644 index 0000000000000000000000000000000000000000..de287ee7e8ee399ca6c4be1258dcb2bde4fae545 --- /dev/null +++ b/include/dt-bindings/clock/qcom,videocc-sdmmagpie.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SDMMAGPIE_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SDMMAGPIE_H + +#define VIDEO_CC_APB_CLK 0 +#define VIDEO_CC_AT_CLK 1 +#define VIDEO_CC_IRIS_AHB_CLK 3 +#define VIDEO_CC_IRIS_CLK_SRC 4 +#define VIDEO_CC_MVS0_AXI_CLK 5 +#define VIDEO_CC_MVS0_CORE_CLK 6 +#define VIDEO_CC_MVS1_AXI_CLK 7 +#define VIDEO_CC_MVS1_CORE_CLK 8 +#define VIDEO_CC_MVSC_CORE_CLK 9 +#define VIDEO_CC_MVSC_CTL_AXI_CLK 10 +#define VIDEO_CC_SLEEP_CLK 13 +#define VIDEO_CC_SLEEP_CLK_SRC 14 +#define VIDEO_CC_VENUS_AHB_CLK 15 +#define VIDEO_CC_XO_CLK 16 +#define VIDEO_CC_XO_CLK_SRC 17 +#define VIDEO_PLL0 18 + +#define MVS0_GDSC 0 +#define MVS1_GDSC 1 +#define MVSC_GDSC 2 + +#endif diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h index 911bbc3d7f291d0a19d55d67d00cc5f3b9b81361..0d35fdadefb2c9af3e8126ef3d86c15b8018d178 100644 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ b/include/dt-bindings/msm/msm-bus-ids.h @@ -361,6 +361,7 @@ #define MSM_BUS_SLAVE_GEM_NOC_SNOC 10071 #define MSM_BUS_SLAVE_SNOC_GEM_NOC_GC 10072 #define MSM_BUS_SLAVE_SNOC_GEM_NOC_SF 10073 +#define MSM_BUS_PNOC_SLV_10 10074 #define MSM_BUS_INT_TEST_ID 20000 #define MSM_BUS_INT_TEST_LAST 20050 @@ -817,6 +818,7 @@ #define ICBID_MASTER_CNOC_A2NOC 146 #define ICBID_MASTER_WLAN 147 #define ICBID_MASTER_MSS_CE 148 +#define ICBID_MASTER_PCNOC_S_10 149 #define ICBID_SLAVE_EBI1 0 #define ICBID_SLAVE_APPSS_L2 1 @@ -1045,4 +1047,6 @@ #define ICBID_SLAVE_TLMM_EAST 213 #define ICBID_SLAVE_TLMM_NORTH 214 #define ICBID_SLAVE_TLMM_WEST 215 +#define ICBID_SLAVE_TLMM_SOUTH 216 +#define ICBID_SLAVE_PCNOC_S_10 217 #endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h index 8283d82654b61b9066895d713edfaa2fcf0726ac..a5c4b3f91cbd35ff81d0577e0150843765a67caa 100644 --- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -12,9 +12,14 @@ #define PMIC_GPIO_PULL_UP_1P5_30 3 #define PMIC_GPIO_STRENGTH_NO 0 -#define PMIC_GPIO_STRENGTH_HIGH 1 +#define PMIC_GPIO_STRENGTH_LOW 1 #define PMIC_GPIO_STRENGTH_MED 2 -#define PMIC_GPIO_STRENGTH_LOW 3 +#define PMIC_GPIO_STRENGTH_HIGH 3 + +#define PM8921_GPIO_STRENGTH_NO 0 +#define PM8921_GPIO_STRENGTH_HIGH 1 +#define PM8921_GPIO_STRENGTH_MED 2 +#define PM8921_GPIO_STRENGTH_LOW 3 /* * Note: PM8018 GPIO3 and GPIO4 are supporting diff --git a/include/linux/atomic.h b/include/linux/atomic.h index cd18203d6ff3278e477e24001fe2af058c5b00b0..8b276fd9a127317e5ff0d83082eecc9cfa68dd38 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -654,6 +654,8 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + #ifdef CONFIG_GENERIC_ATOMIC64 #include #endif @@ -1073,6 +1075,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v } #endif +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) + #include #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 45f00dd6323c90abbde48255cc018bb7befd7f5f..5aa40f4712ff19e932c9bfadeacdcf300a17073d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -501,6 +501,7 @@ void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); extern unsigned int bvec_nr_vecs(unsigned short idx); +extern const char *bio_devname(struct bio *bio, char *buffer); #define bio_set_dev(bio, bdev) \ do { \ @@ -519,9 +520,6 @@ do { \ #define bio_dev(bio) \ disk_devt((bio)->bi_disk) -#define bio_devname(bio, buf) \ - __bdevname(bio_dev(bio), (buf)) - #ifdef CONFIG_BLK_CGROUP int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_current(struct bio *bio); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index b78b31af36f889c69823e12f6eee8ebca40fd770..f43113b8890b760ac73b0a370781e67998e055d7 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -211,6 +211,15 @@ #endif #endif +/* + * calling noreturn functions, __builtin_unreachable() and __builtin_trap() + * confuse the stack allocation in gcc, leading to overly large stack + * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 + * + * Adding an empty inline assembly before it works around the problem + */ +#define barrier_before_unreachable() asm volatile("") + /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer @@ -221,7 +230,11 @@ * unreleased. Really, we need to have autoconf for the kernel. */ #define unreachable() \ - do { annotate_unreachable(); __builtin_unreachable(); } while (0) + do { \ + annotate_unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ + } while (0) /* Mark a function definition as prohibited from being cloned. */ #define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index e8c9cd18bb054aeb08e5969660595f187fed084e..853929f989625956f529d60eeaa26ded526df031 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define barrier_data(ptr) barrier() #endif +/* workaround for GCC PR82365 if needed */ +#ifndef barrier_before_unreachable +# define barrier_before_unreachable() do { } while (0) +#endif + /* Unreachable code */ #ifdef CONFIG_STACK_VALIDATION #define annotate_reachable() ({ \ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index aab2400c3bb2d978115e9820bf799b2405646af7..9f32244860693050db81354bb52978864b78fa44 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d8fcc02e378be17e2f83deaa974d7b700cdc6751..e79b7ba88a9b6bc24649fc3bd1de15436c08c63d 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -227,6 +227,7 @@ extern seqlock_t rename_lock; * These are the low-level FS interfaces to the dcache.. */ extern void d_instantiate(struct dentry *, struct inode *); +extern void d_instantiate_new(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index d9912e07b7a988410eb28f3cddb373e458f4aa5d..62f4fa8b4e50d94d5bf6e8626e8e12d81f2babe1 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -146,7 +146,7 @@ * a new RANGE of SSIDs to the msg_mask_tbl. */ #define MSG_MASK_TBL_CNT 26 -#define APPS_EVENT_LAST_ID 0x0C5B +#define APPS_EVENT_LAST_ID 0xC7A #define MSG_SSID_0 0 #define MSG_SSID_0_LAST 125 diff --git a/include/linux/efi.h b/include/linux/efi.h index 29fdf8029cf6fea785631ce65c99eb16c80bf926..b68b7d199feeab2dace5d7740af1accb203ff63b 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -395,8 +395,8 @@ typedef struct { u32 attributes; u32 get_bar_attributes; u32 set_bar_attributes; - uint64_t romsize; - void *romimage; + u64 romsize; + u32 romimage; } efi_pci_io_protocol_32; typedef struct { @@ -415,8 +415,8 @@ typedef struct { u64 attributes; u64 get_bar_attributes; u64 set_bar_attributes; - uint64_t romsize; - void *romimage; + u64 romsize; + u64 romimage; } efi_pci_io_protocol_64; typedef struct { diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index ab927383c99d69abafaf1e12bb365d9f51ad211d..87b8c20d5b27cf1b3f01ade2b0ae27fd2a9a61aa 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -300,32 +300,47 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * __vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_inner_tag - inner VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers * - * Inserts the VLAN tag into @skb as part of the payload + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline int __vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci, + unsigned int mac_len) { struct vlan_ethhdr *veth; if (skb_cow_head(skb, VLAN_HLEN) < 0) return -ENOMEM; - veth = skb_push(skb, VLAN_HLEN); + skb_push(skb, VLAN_HLEN); - /* Move the mac addresses to the beginning of the new header. */ - memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + /* Move the mac header sans proto to the beginning of the new header. */ + if (likely(mac_len > ETH_TLEN)) + memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); skb->mac_header -= VLAN_HLEN; + veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); + /* first, the ethernet type */ - veth->h_vlan_proto = vlan_proto; + if (likely(mac_len >= ETH_TLEN)) { + /* h_vlan_encapsulated_proto should already be populated, and + * skb->data has space for h_vlan_proto + */ + veth->h_vlan_proto = vlan_proto; + } else { + /* h_vlan_encapsulated_proto should not be populated, and + * skb->data has no space for h_vlan_proto + */ + veth->h_vlan_encapsulated_proto = skb->protocol; + } /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); @@ -334,12 +349,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload + * Returns error if skb_cow_head failes. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + +/** + * vlan_insert_inner_tag - inner VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * @mac_len: MAC header length including outer vlan headers + * + * Inserts the VLAN tag into @skb as part of the payload at offset mac_len * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. * * Following the skb_unshare() example, in case of error, the calling function @@ -347,12 +380,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb, * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci, + unsigned int mac_len) { int err; - err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); if (err) { dev_kfree_skb_any(skb); return NULL; @@ -360,6 +395,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, return skb; } +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); +} + /** * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index b9e22b7e2f2884fe6c2f473c1c3e8baefe61e469..d1171db23742733244aaba3864bdfcfc2cab06fd 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -53,7 +53,7 @@ struct iio_buffer_access_funcs { int (*request_update)(struct iio_buffer *buffer); int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); - int (*set_length)(struct iio_buffer *buffer, int length); + int (*set_length)(struct iio_buffer *buffer, unsigned int length); int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); @@ -72,10 +72,10 @@ struct iio_buffer_access_funcs { */ struct iio_buffer { /** @length: Number of datums in buffer. */ - int length; + unsigned int length; /** @bytes_per_datum: Size of individual datum including timestamp. */ - int bytes_per_datum; + size_t bytes_per_datum; /** * @access: Buffer access functions associated with the diff --git a/include/linux/ipa.h b/include/linux/ipa.h index a8513fdce0712bbb1f5aa70ab3f9ae144d611dcc..40a848c24edabff1895dfa546b44cd02dd6274c6 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -304,10 +304,16 @@ struct ipa_ep_cfg_route { * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us * IPAv2.5 support 32 bit HOLB timeout value, previous versions * supports 16 bit + * splitting timer value into 2 fields for IPA4.2 new timer value is: + * BASE_VALUE* (2^SCALE) + * @base_val : base value of the timer + * @scale : scale value for timer */ struct ipa_ep_cfg_holb { u16 en; u32 tmr_val; + u32 base_val; + u32 scale; }; /** diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h index 8cabb0d28bd547cf90d70090dfb7df748079af1c..2e0905b146053f18e66bc9a3c6c34cd5390bc3d3 100644 --- a/include/linux/ipa_uc_offload.h +++ b/include/linux/ipa_uc_offload.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -69,13 +69,29 @@ struct ipa_uc_offload_intf_params { enum ipa_client_type alt_dst_pipe; }; +/** + * struct ntn_buff_smmu_map - IPA iova->pa SMMU mapping + * @iova: virtual address of the data buffer + * @pa: physical address of the data buffer + */ +struct ntn_buff_smmu_map { + dma_addr_t iova; + phys_addr_t pa; +}; + /** * struct ipa_ntn_setup_info - NTN TX/Rx configuration * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS) + * @smmu_enabled: SMMU is enabled for uC or not * @ring_base_pa: physical address of the base of the Tx/Rx ring + * @ring_base_iova: virtual address of the base of the Tx/Rx ring + * @ring_base_sgt:Scatter table for ntn_rings,contains valid non NULL + * value when ENAC S1-SMMU enabed, else NULL. * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements) - * @buff_pool_base_pa: physical address of the base of the Tx/Rx - * buffer pool + * @buff_pool_base_pa: physical address of the base of the Tx/Rx buffer pool + * @buff_pool_base_iova: virtual address of the base of the Tx/Rx buffer pool + * @buff_pool_base_sgt: Scatter table for buffer pools,contains valid non NULL + * value when EMAC S1-SMMU enabed, else NULL. * @num_buffers: Rx/Tx buffer pool size (in terms of elements) * @data_buff_size: size of the each data buffer allocated in DDR * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's @@ -83,11 +99,21 @@ struct ipa_uc_offload_intf_params { */ struct ipa_ntn_setup_info { enum ipa_client_type client; + bool smmu_enabled; phys_addr_t ring_base_pa; + dma_addr_t ring_base_iova; + struct sg_table *ring_base_sgt; + u32 ntn_ring_size; phys_addr_t buff_pool_base_pa; + dma_addr_t buff_pool_base_iova; + struct sg_table *buff_pool_base_sgt; + + struct ntn_buff_smmu_map *data_buff_list; + u32 num_buffers; + u32 data_buff_size; phys_addr_t ntn_reg_base_ptr_pa; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5a8019befafdcbe1a2ff23b3b99c270e10cdb884..39f0489eb137b65620c3ea2ff6de70a44f84e6d7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1104,7 +1104,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) { } #endif -void kvm_arch_irq_routing_update(struct kvm *kvm); static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { @@ -1113,6 +1112,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) #endif /* CONFIG_HAVE_KVM_EVENTFD */ +void kvm_arch_irq_routing_update(struct kvm *kvm); + static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { /* diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h index b9887e3fe215a087d289aaabf79496347c532e41..0948d072435ed948a3ee42afe89290f3e4c21c3c 100644 --- a/include/linux/leds-qpnp-flash.h +++ b/include/linux/leds-qpnp-flash.h @@ -24,5 +24,15 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options, int *max_current); +#ifdef CONFIG_BACKLIGHT_QCOM_SPMI_WLED +int wled_flash_led_prepare(struct led_trigger *trig, int options, + int *max_current); +#else +static inline int wled_flash_led_prepare(struct led_trigger *trig, int options, + int *max_current) +{ + return -EINVAL; +} +#endif #endif diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 6439bf86802b4ba31923ac578f126733ce67123e..fc27fce6a13b4e3f82af7f3020208efdd2039ed2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -90,8 +90,6 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); ulong choose_memblock_flags(void); -unsigned long memblock_region_resize_late_begin(void); -void memblock_region_resize_late_end(unsigned long flags); /* Low level functions */ int memblock_add_range(struct memblock_type *type, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d207bbbab7828e48655b3e8bfc5fe6ee4a2d1845..911aa616461769ec18528d0b091f63b786dd6568 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -114,6 +114,7 @@ extern void __online_page_increment_counters(struct page *page); extern void __online_page_free(struct page *page); extern int try_online_node(int nid); +extern bool try_online_one_block(int nid); extern bool memhp_auto_online; /* If movable_node boot option specified */ @@ -278,6 +279,11 @@ static inline int try_online_node(int nid) return 0; } +static inline bool try_online_one_block(int nid) +{ + return false; +} + static inline void get_online_mems(void) {} static inline void put_online_mems(void) {} diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 3e2e2abbf2901fd7e9a9870593c3d98ab46fa2c6..006c253ca45b719798c0b810f495260119beae28 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -18,6 +18,8 @@ struct mhi_ctxt; struct mhi_cmd; struct image_info; struct bhi_vec_entry; +struct mhi_timesync; +struct mhi_buf_info; /** * enum MHI_CB - MHI callback @@ -58,6 +60,16 @@ enum MHI_FLAGS { MHI_CHAIN, }; +/** + * enum mhi_device_type - Device types + * @MHI_XFER_TYPE: Handles data transfer + * @MHI_TIMESYNC_TYPE: Use for timesync feature + */ +enum mhi_device_type { + MHI_XFER_TYPE, + MHI_TIMESYNC_TYPE, +}; + /** * struct image_info - firmware and rddm table table * @mhi_buf - Contain device firmware and rddm table @@ -110,6 +122,9 @@ struct image_info { * @link_status: Query link status in case of abnormal value read from device * @runtime_get: Async runtime resume function * @runtimet_put: Release votes + * @time_get: Return host time in us + * @lpm_disable: Request controller to disable link level low power modes + * @lpm_enable: Controller may enable link level low power modes again * @priv_data: Points to bus master's private data */ struct mhi_controller { @@ -201,10 +216,26 @@ struct mhi_controller { void (*wake_put)(struct mhi_controller *, bool); int (*runtime_get)(struct mhi_controller *, void *); void (*runtime_put)(struct mhi_controller *, void *); + u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); /* channel to control DTR messaging */ struct mhi_device *dtr_dev; + /* bounce buffer settings */ + bool bounce_buf; + size_t buffer_len; + + /* supports time sync feature */ + bool time_sync; + struct mhi_timesync *mhi_tsync; + struct mhi_device *tsync_dev; + /* kernel log level */ enum MHI_DEBUG_LEVEL klog_lvl; @@ -224,6 +255,7 @@ struct mhi_controller { * @mtu: Maximum # of bytes controller support * @ul_chan_id: MHI channel id for UL transfer * @dl_chan_id: MHI channel id for DL transfer + * @tiocm: Device current terminal settings * @priv: Driver private data */ struct mhi_device { @@ -237,12 +269,14 @@ struct mhi_device { int dl_chan_id; int ul_event_id; int dl_event_id; + u32 tiocm; const struct mhi_device_id *id; const char *chan_name; struct mhi_controller *mhi_cntrl; struct mhi_chan *ul_chan; struct mhi_chan *dl_chan; atomic_t dev_wake; + enum mhi_device_type dev_type; void *priv_data; int (*ul_xfer)(struct mhi_device *, struct mhi_chan *, void *, size_t, enum MHI_FLAGS); diff --git a/include/linux/mm.h b/include/linux/mm.h index 77b328516ccb7f975f8e058ceeae2e9ae74c2e44..d6f98baf9e9633f59b250bbb3ff7a5c51f3899d4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2386,6 +2386,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ +#define FOLL_ANON 0x8000 /* don't do file mappings */ static inline int vm_fault_to_errno(int vm_fault, int foll_flags) { diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h index c2986664e13e373098e376c367eca2cf5dc462db..9da641ed6ee77bd66a5b9a0f8d13bc22afeaa06b 100644 --- a/include/linux/msm-bus.h +++ b/include/linux/msm-bus.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -217,6 +217,7 @@ static inline int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle struct msm_bus_scale_pdata *msm_bus_pdata_from_node( struct platform_device *pdev, struct device_node *of_node); struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev); +struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev); void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata); #else static inline struct msm_bus_scale_pdata diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index a587beb2e978943f5ece3282ea4fd1eb71e3f429..c41fe65c5d9b7daf126b84a833d787ab37a880e4 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -19,6 +19,8 @@ enum gsi_ver { GSI_VER_1_2 = 2, GSI_VER_1_3 = 3, GSI_VER_2_0 = 4, + GSI_VER_2_2 = 5, + GSI_VER_2_5 = 6, GSI_VER_MAX, }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fa7ee2e6c827b40a9867356fa89d0afd6bfd72fc..933259a40671b2a4b873e5f271aeb4d806c6b219 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2137,13 +2137,10 @@ struct napi_gro_cb { /* Used in GRE, set in fou/gue_gro_receive */ u8 is_fou:1; - /* Used to determine if flush_id can be ignored */ - u8 is_atomic:1; - /* Number of gro_receive callbacks this packet already went through */ u8 recursion_counter:4; - /* 1 bit hole */ + /* 2 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; diff --git a/include/linux/nospec.h b/include/linux/nospec.h index e791ebc65c9c0776325cdc5e72de5d995038d7e0..0c5ef54fd4162830b55aa676c1ecae4ea6ac23f5 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -7,6 +7,8 @@ #define _LINUX_NOSPEC_H #include +struct task_struct; + /** * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * @index: array element index @@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, \ (typeof(_i)) (_i & _mask); \ }) + +/* Speculation control prctl */ +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl); +/* Speculation control for seccomp enforced mitigation */ +void arch_seccomp_spec_mitigate(struct task_struct *task); + #endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 3d8ded5c49b48dbc947cfc87c31cd162d5b0f673..bb4fda7317beca62d8c4d3d2d1941a40efa13d15 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -15,7 +15,6 @@ struct reserved_mem { phys_addr_t base; phys_addr_t size; void *priv; - int fixup; }; struct reserved_mem_ops { diff --git a/include/linux/oom.h b/include/linux/oom.h index 267573bb9b6772a7d4b4ce059334cc9aefaf0b65..efce1a078d88ae8758e33e4cfebfdd5082c35247 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -120,4 +120,8 @@ extern void dump_tasks(struct mem_cgroup *memcg, extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; extern int sysctl_panic_on_oom; +extern int sysctl_reap_mem_on_sigkill; + +/* calls for LMK reaper */ +extern void add_to_oom_reaper(struct task_struct *p); #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/include/linux/plist.h b/include/linux/plist.h index 97883604a3c5f7516ae2aa67574c5ce7ffda4cb8..0ea3e1bc7cccb1e335997cc9c50f4647db4f6d1f 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -266,6 +266,9 @@ static inline int plist_node_empty(const struct plist_node *node) #define plist_next(pos) \ list_next_entry(pos, node_list) +#define plist_next_entry(pos, type, member) \ + container_of(plist_next(pos), type, member) + /** * plist_prev - get the prev entry in list * @pos: the type * to cursor diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index e2e982ae2b8338fc0f16ad367b5063755280bbe1..a4fe711aea33d521f6b5ba72d43fb58b58a01900 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -115,6 +115,13 @@ enum { POWER_SUPPLY_PL_USBMID_USBMID, }; +enum { + POWER_SUPPLY_CHARGER_SEC_NONE = 0, + POWER_SUPPLY_CHARGER_SEC_CP, + POWER_SUPPLY_CHARGER_SEC_PL, + POWER_SUPPLY_CHARGER_SEC_CP_PL, +}; + enum { POWER_SUPPLY_CONNECTOR_TYPEC, POWER_SUPPLY_CONNECTOR_MICRO_USB, @@ -287,6 +294,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_BATT_FULL_CURRENT, POWER_SUPPLY_PROP_RECHARGE_SOC, POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED, + POWER_SUPPLY_PROP_SMB_EN_MODE, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 35d125569e6848e03992b5a5ca9d1413bbb185fa..e8b12b79a0de48a9f3ca9c9db8c338f1b66b9d52 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -450,7 +450,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, */ static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - if (size * sizeof(void *) > KMALLOC_MAX_SIZE) + if (size > KMALLOC_MAX_SIZE / sizeof(void *)) return NULL; return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); } diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h index fe2e9571188e9bc960ae3290a12e52a83747cc73..f998a8c8121436ceb31b006a1585726d84f0d8f5 100644 --- a/include/linux/qpnp/qpnp-revid.h +++ b/include/linux/qpnp/qpnp-revid.h @@ -245,6 +245,16 @@ #define PM8150L_V1P0_REV3 0x00 #define PM8150L_V1P0_REV4 0x01 +#define PM8150L_V2P0_REV1 0x00 +#define PM8150L_V2P0_REV2 0x00 +#define PM8150L_V2P0_REV3 0x00 +#define PM8150L_V2P0_REV4 0x02 + +#define PM8150L_V3P0_REV1 0x00 +#define PM8150L_V3P0_REV2 0x00 +#define PM8150L_V3P0_REV3 0x00 +#define PM8150L_V3P0_REV4 0x03 + /* PMI8998 FAB_ID */ #define PMI8998_FAB_ID_SMIC 0x11 #define PMI8998_FAB_ID_GF 0x30 diff --git a/include/linux/sched.h b/include/linux/sched.h index 3df6845100a6fe53d1218a790b22e586c5410c2b..2790c40436c02ec509f4bb3dba3d3f5e2a0e065b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1528,7 +1528,8 @@ static inline bool is_percpu_thread(void) #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ - +#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ +#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1553,6 +1554,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab) TASK_PFA_SET(SPREAD_SLAB, spread_slab) TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) +TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) +TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) + +TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) + static inline void current_restore_flags(unsigned long orig_flags, unsigned long flags) { diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 10f25f7e4304cc0330851a01ecd10353a4ad2ef6..a9d5c52de4ea24c372318db02af2106a5db700f6 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -4,8 +4,9 @@ #include -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ - SECCOMP_FILTER_FLAG_LOG) +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ + SECCOMP_FILTER_FLAG_LOG | \ + SECCOMP_FILTER_FLAG_SPEC_ALLOW) #ifdef CONFIG_SECCOMP diff --git a/include/linux/swap.h b/include/linux/swap.h index 933d7c0c35421687179d21b6e810c750c9b89117..4d128336ed6835dd95dea06e653271dca5323b87 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -178,6 +178,7 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX +#define SWAPFILE_CLUSTER 256 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ @@ -273,6 +274,8 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ + unsigned int write_pending; + unsigned int max_writes; }; #ifdef CONFIG_64BIT @@ -348,6 +351,8 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; +extern int sysctl_swap_ratio; +extern int sysctl_swap_ratio_enable; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; @@ -393,7 +398,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) extern struct address_space *swapper_spaces[]; -extern bool swap_vma_readahead; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) @@ -415,14 +419,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -extern struct page *swapin_readahead(swp_entry_t, gfp_t, - struct vm_area_struct *vma, unsigned long addr); - -extern struct page *swap_readahead_detect(struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); -extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); +extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); +extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; @@ -430,11 +430,6 @@ extern long total_swap_pages; extern atomic_t nr_rotate_swap; extern bool has_usable_swap(void); -static inline bool swap_use_vma_readahead(void) -{ - return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); -} - /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { @@ -463,9 +458,11 @@ extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct page *, struct block_device **); extern sector_t swapdev_block(int, pgoff_t); extern int page_swapcount(struct page *); +extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry); extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); +extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); extern bool reuse_swap_page(struct page *, int *); extern int try_to_free_swap(struct page *); struct backing_dev_info; @@ -474,6 +471,16 @@ extern void exit_swap_address_space(unsigned int type); #else /* CONFIG_SWAP */ +static inline int swap_readpage(struct page *page, bool do_poll) +{ + return 0; +} + +static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) +{ + return NULL; +} + #define swap_address_space(entry) (NULL) #define get_nr_swap_pages() 0L #define total_swap_pages 0L @@ -518,26 +525,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp) { } -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +static inline struct page *swap_cluster_readahead(swp_entry_t entry, + gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; } -static inline bool swap_use_vma_readahead(void) -{ - return false; -} - -static inline struct page *swap_readahead_detect( - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) -{ - return NULL; -} - -static inline struct page *do_swap_page_readahead( - swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) +static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, + struct vm_fault *vmf) { return NULL; } @@ -578,6 +573,11 @@ static inline int page_swapcount(struct page *page) return 0; } +static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry) +{ + return 0; +} + static inline int __swp_swapcount(swp_entry_t entry) { return 0; diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h index 06bd7b096167fdc3d4b1c69d3c178d47b26aeb60..d76c0644006f4e307552352279dde5771ce2f549 100644 --- a/include/linux/swapfile.h +++ b/include/linux/swapfile.h @@ -8,7 +8,12 @@ */ extern spinlock_t swap_lock; extern struct plist_head swap_active_head; +extern spinlock_t swap_avail_lock; +extern struct plist_head *swap_avail_heads; extern struct swap_info_struct *swap_info[]; extern int try_to_unuse(unsigned int, bool, unsigned long); +extern int swap_ratio(struct swap_info_struct **si, int node); +extern void setup_swap_ratio(struct swap_info_struct *p, int prio); +extern bool is_swap_ratio_group(int prio); #endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e8418fc77a43f9baf676932c26b869ed96e16688..fe322fa611e663e50a67f5153f7a1d683e573d34 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -334,7 +334,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u64 time; } rcvq_space; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 5bdbd9f49395f883ca2dc5aa0d7bbde11f379063..07ee0f84a46caa9e2b1c446f96009f63b3b99f50 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp) #endif } +static inline unsigned long +u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp) +{ + unsigned long flags = 0; + +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + local_irq_save(flags); + write_seqcount_begin(&syncp->seq); +#endif + return flags; +} + +static inline void +u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, + unsigned long flags) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + write_seqcount_end(&syncp->seq); + local_irq_restore(flags); +#endif +} + static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index ca2938c6723f1c05ae55cb7479c626ace6e85801..9a1fabc6ff03f1eee6848bd83d35b0be0984c72c 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -57,6 +57,9 @@ /* big enough to hold our biggest descriptor */ #define USB_COMP_EP0_BUFSIZ 4096 +/* OS feature descriptor length <= 4kB */ +#define USB_COMP_EP0_OS_DESC_BUFSIZ 4096 + #define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1) struct usb_configuration; diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index c42a0ba79523fcba668dfc429f462ca131f245d1..8642f030f05a6bc8f64e5848f987ef0024ae9c0b 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -163,6 +163,7 @@ struct usb_phy { enum usb_device_speed speed); int (*notify_disconnect)(struct usb_phy *x, enum usb_device_speed speed); + int (*link_training)(struct usb_phy *x, bool start); /* * Charger detection method can be implemented if you need to @@ -387,6 +388,24 @@ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed) return 0; } +static inline int +usb_phy_start_link_training(struct usb_phy *x) +{ + if (x && x->link_training) + return x->link_training(x, true); + else + return 0; +} + +static inline int +usb_phy_stop_link_training(struct usb_phy *x) +{ + if (x && x->link_training) + return x->link_training(x, false); + else + return 0; +} + static inline int usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed) { diff --git a/include/net/bonding.h b/include/net/bonding.h index b2e68657a2162cd55b957c9230779f5d066fe6f3..73799da57400f53d17ac4595e61ce97a38462fc7 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -198,6 +198,7 @@ struct bonding { struct slave __rcu *primary_slave; struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ bool force_primary; + u32 nest_level; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); diff --git a/include/net/ip.h b/include/net/ip.h index bc002f0b6b57f7bbac1576de379baa69d49a467c..cbd8a1d5409ab0a97239af97a0b6c9782765eba5 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -329,6 +329,13 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline int ip_mtu_locked(const struct dst_entry *dst) +{ + const struct rtable *rt = (const struct rtable *)dst; + + return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); +} + static inline int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) { @@ -336,7 +343,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) return pmtudisc == IP_PMTUDISC_DO || (pmtudisc == IP_PMTUDISC_WANT && - !(dst_metric_locked(dst, RTAX_MTU))); + !ip_mtu_locked(dst)); } static inline bool ip_sk_accept_pmtu(const struct sock *sk) @@ -362,7 +369,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); if (net->ipv4.sysctl_ip_fwd_use_pmtu || - dst_metric_locked(dst, RTAX_MTU) || + ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 1a7f7e4243202181be87cd0b58b0243693b2dafc..5c5d344c062934748c21a9679ba1ad3f565e0915 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -59,6 +59,7 @@ struct fib_nh_exception { int fnhe_genid; __be32 fnhe_daddr; u32 fnhe_pmtu; + bool fnhe_mtu_locked; __be32 fnhe_gw; unsigned long fnhe_expires; struct rtable __rcu *fnhe_rth_input; diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index ea985aa7a6c5e64b6802c2399ad47db23fdbd9bf..df528a6235487d3678ffdccb28facf977a1e7098 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); /* Access to a connection */ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 4f1d2dec43cef82c38f2231ef7ad00bc856aa4ba..87b62bae20af7e20bb907ff5a693a94dc91b3d0f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4141,7 +4141,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid); * The TX headroom reserved by mac80211 for its own tx_status functions. * This is enough for the radiotap header. */ -#define IEEE80211_TX_STATUS_HEADROOM 14 +#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4) /** * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames diff --git a/include/net/regulatory.h b/include/net/regulatory.h index ebc5a2ed86317dfdab8939d6259ed7ea7b0199fb..f83cacce33085e3922cd23cd51ccca393f85114b 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -78,7 +78,7 @@ struct regulatory_request { int wiphy_idx; enum nl80211_reg_initiator initiator; enum nl80211_user_reg_hint_type user_reg_hint_type; - char alpha2[2]; + char alpha2[3]; enum nl80211_dfs_regions dfs_region; bool intersect; bool processed; diff --git a/include/net/route.h b/include/net/route.h index d538e6db1afef1e7ab50d8b491949e8ccdeca4a8..6077a0fb304419b4a60c951b9db263ae7bfd2204 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -63,7 +63,8 @@ struct rtable { __be32 rt_gateway; /* Miscellaneous cached information */ - u32 rt_pmtu; + u32 rt_mtu_locked:1, + rt_pmtu:31; u32 rt_table_id; diff --git a/include/net/tls.h b/include/net/tls.h index df950383b8c187b3c3ecbca298052a068513b2dc..48940a883d9a379487af5eb00d59b95620145e83 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -98,6 +98,7 @@ struct tls_context { struct scatterlist *partially_sent_record; u16 partially_sent_offset; unsigned long flags; + bool in_tcp_sendpages; u16 pending_open_record_frags; int (*push_pending_record)(struct sock *sk, int flags); diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 23159dd5be184bc5db37f4d34a30653afe8b97fd..a1fd63871d17289ee61ac1e22e7f2aeed42d82cf 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -48,7 +48,6 @@ struct ib_umem { int writable; int hugetlb; struct work_struct work; - struct pid *pid; struct mm_struct *mm; unsigned long diff; struct ib_umem_odp *odp_data; diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 526888b4fcd12c4e78b4d64850c9bc22c2c0ce3c..ab890c09339c061ec744446f08e1e11167318734 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -48,6 +48,8 @@ static inline int scsi_status_is_good(int status) */ status &= 0xfe; return ((status == SAM_STAT_GOOD) || + (status == SAM_STAT_CONDITION_MET) || + /* Next two "intermediate" statuses are obsolete in SAM-4 */ (status == SAM_STAT_INTERMEDIATE) || (status == SAM_STAT_INTERMEDIATE_CONDITION_MET) || /* FIXME: this is obsolete in SAM-3 */ diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index c2d1b15da136e9aa9273a90471ac8be7a1b6b413..a91f25151a5b96ce43d61e2f58841aefa4190baa 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h @@ -15,6 +15,7 @@ #define ARC_REG_MCIP_BCR 0x0d0 #define ARC_REG_MCIP_IDU_BCR 0x0D5 +#define ARC_REG_GFRC_BUILD 0x0D6 #define ARC_REG_MCIP_CMD 0x600 #define ARC_REG_MCIP_WDATA 0x601 #define ARC_REG_MCIP_READBACK 0x602 @@ -36,10 +37,14 @@ struct mcip_cmd { #define CMD_SEMA_RELEASE 0x12 #define CMD_DEBUG_SET_MASK 0x34 +#define CMD_DEBUG_READ_MASK 0x35 #define CMD_DEBUG_SET_SELECT 0x36 +#define CMD_DEBUG_READ_SELECT 0x37 #define CMD_GFRC_READ_LO 0x42 #define CMD_GFRC_READ_HI 0x43 +#define CMD_GFRC_SET_CORE 0x47 +#define CMD_GFRC_READ_CORE 0x48 #define CMD_IDU_ENABLE 0x71 #define CMD_IDU_DISABLE 0x72 diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h new file mode 100644 index 0000000000000000000000000000000000000000..152f434210e776db78130cf9514ac9fbd07a0361 --- /dev/null +++ b/include/soc/qcom/qmi_rmnet.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _QMI_RMNET_H +#define _QMI_RMNET_H + +#include +#include + +#ifdef CONFIG_QCOM_QMI_DFC +void *qmi_rmnet_qos_init(struct net_device *real_dev, uint8_t mux_id); +void qmi_rmnet_qos_exit(struct net_device *dev); +void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt); +void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb); +#else +static inline void *qmi_rmnet_qos_init(struct net_device *real_dev, + uint8_t mux_id) +{ + return NULL; +} + +static inline void qmi_rmnet_qos_exit(struct net_device *dev) +{ +} + +static inline void qmi_rmnet_change_link(struct net_device *dev, + void *port, void *tcm_pt) +{ +} + +static inline void qmi_rmnet_burst_fc_check(struct net_device *dev, + struct sk_buff *skb) +{ +} +#endif + +#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE +int qmi_rmnet_reg_dereg_fc_ind(void *port, int reg); +#else +static inline int qmi_rmnet_reg_dereg_fc_ind(void *port, int reg) +{ + return 0; +} +#endif +#endif /*_QMI_RMNET_H*/ diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h index 6a25cc3aefc1708eef7f0104501141b1348c2e50..4e23ccf269a7145aeee9a1e8228eb36de79abef1 100644 --- a/include/soc/qcom/ramdump.h +++ b/include/soc/qcom/ramdump.h @@ -29,6 +29,8 @@ extern int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments); extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments); +extern int do_minidump(void *handle, struct ramdump_segment *segments, + int nsegments); #else static inline void *create_ramdump_device(const char *dev_name, diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h new file mode 100644 index 0000000000000000000000000000000000000000..532c69d008b08f652d0dd70fbf78d724a221fe89 --- /dev/null +++ b/include/soc/qcom/rmnet_qmi.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_QMI_H +#define _RMNET_QMI_H + +#include +#ifdef CONFIG_QCOM_QMI_DFC +void *rmnet_get_qmi_pt(void *port); +void *rmnet_get_qos_pt(struct net_device *dev); +void *rmnet_get_rmnet_port(struct net_device *dev); +struct net_device *rmnet_get_rmnet_dev(void *port, uint8_t mux_id); +void rmnet_reset_qmi_pt(void *port); +void rmnet_init_qmi_pt(void *port, void *qmi); +#else +static inline void *rmnet_get_qmi_pt(void *port) +{ + return NULL; +} + +static inline void *rmnet_get_qos_pt(struct net_device *dev) +{ + return NULL; +} + +static inline void *rmnet_get_rmnet_port(struct net_device *dev) +{ + return NULL; +} + +static inline struct net_device *rmnet_get_rmnet_dev(void *port, + uint8_t mux_id) +{ + return NULL; +} + +static inline void rmnet_reset_qmi_pt(void *port) +{ +} + +static inline void rmnet_init_qmi_pt(void *port, void *qmi) +{ +} +#endif /* CONFIG_QCOM_QMI_DFC */ +#endif /*_RMNET_QMI_H*/ diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h index 12fa374a8ba23716e838ab71e592a3bbb0eb587d..b9b333e266c8a38947449ba5f3fb315f6025b308 100644 --- a/include/soc/qcom/secure_buffer.h +++ b/include/soc/qcom/secure_buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -50,6 +50,8 @@ enum vmid { #define PERM_EXEC 0x1 #ifdef CONFIG_QCOM_SECURE_BUFFER +int msm_secure_table(struct sg_table *table); +int msm_unsecure_table(struct sg_table *table); int hyp_assign_table(struct sg_table *table, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, @@ -57,8 +59,19 @@ int hyp_assign_table(struct sg_table *table, extern int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vmlist, int source_nelems, int *dest_vmids, int *dest_perms, int dest_nelems); +bool msm_secure_v2_is_supported(void); const char *msm_secure_vmid_to_string(int secure_vmid); #else +static inline int msm_secure_table(struct sg_table *table) +{ + return -EINVAL; +} + +static inline int msm_unsecure_table(struct sg_table *table) +{ + return -EINVAL; +} + static inline int hyp_assign_table(struct sg_table *table, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, @@ -74,6 +87,11 @@ static inline int hyp_assign_phys(phys_addr_t addr, u64 size, return -EINVAL; } +static inline bool msm_secure_v2_is_supported(void) +{ + return false; +} + static inline const char *msm_secure_vmid_to_string(int secure_vmid) { return "N/A"; diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index e033efc8cd372b0f5cd957c1094962c31d65aef2..267cfedb508ccf8e265120142e89afcda20489d7 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -67,6 +67,10 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sm6150") #define early_machine_is_qcs405() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs405") +#define early_machine_is_sdxprairie() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxprairie") +#define early_machine_is_sdmmagpie() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmmagpie") #else #define of_board_is_sim() 0 #define of_board_is_rumi() 0 @@ -90,6 +94,8 @@ #define early_machine_is_sdmshrike() 0 #define early_machine_is_sm6150() 0 #define early_machine_is_qcs405() 0 +#define early_machine_is_sdxprairie() 0 +#define early_machine_is_sdmmagpie() 0 #endif #define PLATFORM_SUBTYPE_MDM 1 @@ -115,6 +121,8 @@ enum msm_cpu { MSM_CPU_SDMSHRIKE, MSM_CPU_SM6150, MSM_CPU_QCS405, + SDX_CPU_SDXPRAIRIE, + MSM_CPU_SDMMAGPIE, }; struct msm_soc_info { diff --git a/include/trace/events/mpm.h b/include/trace/events/mpm.h index 433fdbfb1e65a0c6a50fd784583d949b5f39e17b..6746377a9dd757b3052c4e400657137bca1c9c20 100644 --- a/include/trace/events/mpm.h +++ b/include/trace/events/mpm.h @@ -57,6 +57,28 @@ TRACE_EVENT(mpm_wakeup_pending_irqs, TP_printk("index:%u wakeup_irqs:0x%x", __entry->index, __entry->irqs) ); +TRACE_EVENT(mpm_wakeup_time, + + TP_PROTO(bool from_idle, u64 wakeup, u64 current_ticks), + + TP_ARGS(from_idle, wakeup, current_ticks), + + TP_STRUCT__entry( + __field(bool, from_idle) + __field(u64, wakeup) + __field(u64, current_ticks) + ), + + TP_fast_assign( + __entry->from_idle = from_idle; + __entry->wakeup = wakeup; + __entry->current_ticks = current_ticks; + ), + + TP_printk("idle:%d wakeup:0x%llx current:0x%llx", __entry->from_idle, + __entry->wakeup, __entry->current_ticks) +); + #endif #define TRACE_INCLUDE_FILE mpm #include diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h index 400e9598900618be94c3b55a1087e4aa4ea366f4..fca05480a31b453ed9e173c0a62d9d12060d441a 100644 --- a/include/trace/events/pdc.h +++ b/include/trace/events/pdc.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,7 +41,7 @@ TRACE_EVENT(irq_pin_config, ), TP_printk("%s hwirq:%u pin:%u type:%u enable:%u", - __entry->func, __entry->pin, __entry->hwirq, __entry->type, + __entry->func, __entry->hwirq, __entry->pin, __entry->type, __entry->enable) ); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 79188553ad4ac84cf63aa9a71cd4be2e4d266f3e..e63888bcf20891e250f407e6e783e43e32630660 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1372,6 +1372,10 @@ TRACE_EVENT(sched_isolate, __entry->time, __entry->isolate) ); +#include "walt.h" + +#endif /* CONFIG_SMP */ + TRACE_EVENT(sched_preempt_disable, TP_PROTO(u64 delta, bool irqs_disabled, @@ -1404,9 +1408,6 @@ TRACE_EVENT(sched_preempt_disable, __entry->caddr2, __entry->caddr3) ); -#include "walt.h" - -#endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h index d791863b62fc361b0ce8ddfed40fa28962798983..2ec9064a2bb7373ef4f138cfc22404bd06538c0b 100644 --- a/include/trace/events/xen.h +++ b/include/trace/events/xen.h @@ -349,22 +349,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd, DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); -TRACE_EVENT(xen_mmu_flush_tlb_all, - TP_PROTO(int x), - TP_ARGS(x), - TP_STRUCT__entry(__array(char, x, 0)), - TP_fast_assign((void)x), - TP_printk("%s", "") - ); - -TRACE_EVENT(xen_mmu_flush_tlb, - TP_PROTO(int x), - TP_ARGS(x), - TP_STRUCT__entry(__array(char, x, 0)), - TP_fast_assign((void)x), - TP_printk("%s", "") - ); - TRACE_EVENT(xen_mmu_flush_tlb_one_user, TP_PROTO(unsigned long addr), TP_ARGS(addr), diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index ebb984ed8461e532682961e7747613bef28df3e9..68c27a863b275bec1a82767296a7cf9658455d9e 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -84,7 +84,6 @@ extern "C" { #define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) #define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) #define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) -#define DRM_MODE_FLAG_SEAMLESS (1<<19) /* Picture aspect ratio options */ #define DRM_MODE_PICTURE_ASPECT_NONE 0 @@ -102,6 +101,7 @@ extern "C" { #define DRM_MODE_FLAG_SUPPORTS_RGB (1<<23) #define DRM_MODE_FLAG_SUPPORTS_YUV (1<<24) +#define DRM_MODE_FLAG_SEAMLESS (1<<31) /* DPMS flags */ /* bit compatible with the xorg definitions. */ diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 1c962fcb86194e386787245df8132e0efefe84fd..0c133c527437c0cbf88f12d460ec298e4c13ba07 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -381,6 +381,7 @@ struct drm_msm_event_resp { #define DRM_EVENT_SDE_POWER 0x80000004 #define DRM_EVENT_IDLE_NOTIFY 0x80000005 #define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */ +#define DRM_EVENT_SDE_HW_RECOVERY 0X80000007 #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h index 1a43659a857fbd7318353e25a75ddb061a79b2b0..ccc4f3a1466954c404673741419f4485df029e90 100644 --- a/include/uapi/drm/sde_drm.h +++ b/include/uapi/drm/sde_drm.h @@ -446,4 +446,11 @@ struct sde_drm_roi_v1 { #define SDE_MODE_DPMS_SUSPEND 4 #define SDE_MODE_DPMS_OFF 5 +/** + * sde recovery events for notifying client + */ +#define SDE_RECOVERY_SUCCESS 0 +#define SDE_RECOVERY_CAPTURE 1 +#define SDE_RECOVERY_HARD_RESET 2 + #endif /* _SDE_DRM_H_ */ diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index 91a31ffed828ddfbad55967022d3a1df32db5340..9a781f0611df0280be7d952258f486f805f27528 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer { }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ +#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ struct drm_virtgpu_getparam { __u64 param; diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h index a1ea9a0bd75e4bc6e351d820effbd6326c621a87..f48fa7fd14c76ba10cc782b13d375453955e30c1 100644 --- a/include/uapi/linux/esoc_ctrl.h +++ b/include/uapi/linux/esoc_ctrl.h @@ -27,6 +27,7 @@ struct esoc_link_data { #define ESOC_REQ_SEND_SHUTDOWN ESOC_REQ_SEND_SHUTDOWN #define ESOC_REQ_CRASH_SHUTDOWN ESOC_REQ_CRASH_SHUTDOWN +#define ESOC_PON_RETRY ESOC_PON_RETRY enum esoc_evt { ESOC_RUN_STATE = 0x1, @@ -38,6 +39,7 @@ enum esoc_evt { ESOC_CMD_ENG_ON, ESOC_CMD_ENG_OFF, ESOC_INVALID_STATE, + ESOC_RETRY_PON_EVT, }; enum esoc_cmd { @@ -61,6 +63,7 @@ enum esoc_notify { ESOC_DEBUG_FAIL, ESOC_PRIMARY_CRASH, ESOC_PRIMARY_REBOOT, + ESOC_PON_RETRY, }; enum esoc_req { diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 4199f8acbce53f5aab600a03872fc896e370daf9..971e82aec6d0af27ad9ae54a9a099b566730a9d6 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -275,6 +275,8 @@ struct fsxattr { #define FS_ENCRYPTION_MODE_AES_256_CTS 4 #define FS_ENCRYPTION_MODE_AES_128_CBC 5 #define FS_ENCRYPTION_MODE_AES_128_CTS 6 +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 struct fscrypt_policy { __u8 version; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 3ee3bf7c85262b034702875b7d356ac7595abea6..244e3213ecb0568089ea4b5e2a6c1fca342f092a 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -30,6 +30,7 @@ */ #define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_TLEN 2 /* Octets in ethernet type field */ #define ETH_HLEN 14 /* Total octets in header. */ #define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ #define ETH_DATA_LEN 1500 /* Max. octets in payload */ diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 71bd5b8c971a5ed40591f1b3b1c7ee3680157e0f..ab7e36fac3d89922fe94cd1ec7a37dffa297cfc4 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -571,6 +571,8 @@ enum ipa_rm_resource_name { * @IPA_HW_v3_5_1: IPA hardware version 3.5.1 * @IPA_HW_v4_0: IPA hardware version 4.0 * @IPA_HW_v4_1: IPA hardware version 4.1 + * @IPA_HW_v4_2: IPA hardware version 4.2 + * @IPA_HW_v4_5: IPA hardware version 4.5 */ enum ipa_hw_type { IPA_HW_None = 0, @@ -587,11 +589,15 @@ enum ipa_hw_type { IPA_HW_v3_5_1 = 13, IPA_HW_v4_0 = 14, IPA_HW_v4_1 = 15, + IPA_HW_v4_2 = 16, + IPA_HW_v4_5 = 17, }; -#define IPA_HW_MAX (IPA_HW_v4_1 + 1) +#define IPA_HW_MAX (IPA_HW_v4_5 + 1) #define IPA_HW_v4_0 IPA_HW_v4_0 #define IPA_HW_v4_1 IPA_HW_v4_1 +#define IPA_HW_v4_2 IPA_HW_v4_2 +#define IPA_HW_v4_5 IPA_HW_v4_5 /** * struct ipa_rule_attrib - attributes of a routing/filtering diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index cc71d730b34e23ca9d8dbb17d0603905253af388..7e8de4bcb231c64b22ee6075250a5954d68383f4 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1420,8 +1420,12 @@ enum nl80211_commands { * * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is * used for the association (&enum nl80211_mfp, represented as a u32); - * this attribute can be used - * with %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests + * this attribute can be used with %NL80211_CMD_ASSOCIATE and + * %NL80211_CMD_CONNECT requests. %NL80211_MFP_OPTIONAL is not allowed for + * %NL80211_CMD_ASSOCIATE since user space SME is expected and hence, it + * must have decided whether to use management frame protection or not. + * Setting %NL80211_MFP_OPTIONAL with a %NL80211_CMD_CONNECT request will + * let the driver (or the firmware) decide whether to use MFP or not. * * @NL80211_ATTR_STA_FLAGS2: Attribute containing a * &struct nl80211_sta_flag_update. @@ -2617,6 +2621,8 @@ enum nl80211_attrs { #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS +#define NL80211_WIPHY_NAME_MAXLEN 64 + #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_HT_RATES 77 #define NL80211_MAX_SUPP_REG_RULES 64 @@ -3960,10 +3966,12 @@ enum nl80211_key_type { * enum nl80211_mfp - Management frame protection state * @NL80211_MFP_NO: Management frame protection not used * @NL80211_MFP_REQUIRED: Management frame protection required + * @NL80211_MFP_OPTIONAL: Management frame protection is optional */ enum nl80211_mfp { NL80211_MFP_NO, NL80211_MFP_REQUIRED, + NL80211_MFP_OPTIONAL, }; enum nl80211_wpa_versions { @@ -4927,6 +4935,20 @@ enum nl80211_feature_flags { * handshake with 802.1X in station mode (will pass EAP frames to the host * and accept the set_pmk/del_pmk commands), doing it in the host might not * be supported. + * @NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME: Driver is capable of overriding + * the max channel attribute in the FILS request params IE with the + * actual dwell time. + * @NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP: Driver accepts broadcast probe + * response + * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE: Driver supports sending + * the first probe request in each channel at rate of at least 5.5Mbps. + * @NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: Driver supports + * probe request tx deferral and suppression + * @NL80211_EXT_FEATURE_MFP_OPTIONAL: Driver supports the %NL80211_MFP_OPTIONAL + * value in %NL80211_ATTR_USE_MFP. + * @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan. + * @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan. + * @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan. * @NL80211_EXT_FEATURE_DFS_OFFLOAD: HW/driver will offload DFS actions. * Device or driver will do all DFS-related actions by itself, * informing user-space about CAC progress, radar detection event, @@ -4955,6 +4977,14 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_FILS_SK_OFFLOAD, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK, NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION, + NL80211_EXT_FEATURE_MFP_OPTIONAL, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN, + NL80211_EXT_FEATURE_LOW_POWER_SCAN, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN, NL80211_EXT_FEATURE_DFS_OFFLOAD, /* add new features before the definition below */ @@ -5016,6 +5046,10 @@ enum nl80211_timeout_reason { * of NL80211_CMD_TRIGGER_SCAN and NL80211_CMD_START_SCHED_SCAN * requests. * + * NL80211_SCAN_FLAG_LOW_SPAN, NL80211_SCAN_FLAG_LOW_POWER, and + * NL80211_SCAN_FLAG_HIGH_ACCURACY flags are exclusive of each other, i.e., only + * one of them can be used in the request. + * * @NL80211_SCAN_FLAG_LOW_PRIORITY: scan request has low priority * @NL80211_SCAN_FLAG_FLUSH: flush cache before scanning * @NL80211_SCAN_FLAG_AP: force a scan even if the interface is configured @@ -5032,12 +5066,44 @@ enum nl80211_timeout_reason { * locally administered 1, multicast 0) is assumed. * This flag must not be requested when the feature isn't supported, check * the nl80211 feature flags for the device. + * @NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME: fill the dwell time in the FILS + * request parameters IE in the probe request + * @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses + * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at + * rate of at least 5.5M. In case non OCE AP is dicovered in the channel, + * only the first probe req in the channel will be sent in high rate. + * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request + * tx deferral (dot11FILSProbeDelay shall be set to 15ms) + * and suppression (if it has received a broadcast Probe Response frame, + * Beacon frame or FILS Discovery frame from an AP that the STA considers + * a suitable candidate for (re-)association - suitable in terms of + * SSID and/or RSSI. + * @NL80211_SCAN_FLAG_LOW_SPAN: Span corresponds to the total time taken to + * accomplish the scan. Thus, this flag intends the driver to perform the + * scan request with lesser span/duration. It is specific to the driver + * implementations on how this is accomplished. Scan accuracy may get + * impacted with this flag. + * @NL80211_SCAN_FLAG_LOW_POWER: This flag intends the scan attempts to consume + * optimal possible power. Drivers can resort to their specific means to + * optimize the power. Scan accuracy may get impacted with this flag. + * @NL80211_SCAN_FLAG_HIGH_ACCURACY: Accuracy here intends to the extent of scan + * results obtained. Thus HIGH_ACCURACY scan flag aims to get maximum + * possible scan results. This flag hints the driver to use the best + * possible scan configuration to improve the accuracy in scanning. + * Latency and power use may get impacted with this flag. */ enum nl80211_scan_flags { - NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, - NL80211_SCAN_FLAG_FLUSH = 1<<1, - NL80211_SCAN_FLAG_AP = 1<<2, - NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, + NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, + NL80211_SCAN_FLAG_FLUSH = 1<<1, + NL80211_SCAN_FLAG_AP = 1<<2, + NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 1<<4, + NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 1<<5, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 1<<6, + NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 1<<7, + NL80211_SCAN_FLAG_LOW_SPAN = 1<<8, + NL80211_SCAN_FLAG_LOW_POWER = 1<<9, + NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, }; /** diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 56c0ed196a1f3bb3d197fcb76327eb3e5628d90a..a4c0c8e745fce072d67da9d68df66ea300803dc3 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -198,6 +198,18 @@ struct prctl_mm_map { # define PR_CAP_AMBIENT_LOWER 3 # define PR_CAP_AMBIENT_CLEAR_ALL 4 +/* Per task speculation control */ +#define PR_GET_SPECULATION_CTRL 52 +#define PR_SET_SPECULATION_CTRL 53 +/* Speculation control variants */ +# define PR_SPEC_STORE_BYPASS 0 +/* Return and control values for PR_SET/GET_SPECULATION_CTRL */ +# define PR_SPEC_NOT_AFFECTED 0 +# define PR_SPEC_PRCTL (1UL << 0) +# define PR_SPEC_ENABLE (1UL << 1) +# define PR_SPEC_DISABLE (1UL << 2) +# define PR_SPEC_FORCE_DISABLE (1UL << 3) + #define PR_SET_VMA 0x53564d41 # define PR_SET_VMA_ANON_NAME 0 diff --git a/include/uapi/linux/qg-profile.h b/include/uapi/linux/qg-profile.h new file mode 100644 index 0000000000000000000000000000000000000000..bffddbb038e0ca3bd17cd7b7409bf9a24edcbddc --- /dev/null +++ b/include/uapi/linux/qg-profile.h @@ -0,0 +1,66 @@ +#ifndef __QG_PROFILE_H__ +#define __QG_PROFILE_H__ + +#include + +/** + * enum profile_table - Table index for battery profile data + */ +enum profile_table { + TABLE_SOC_OCV1, + TABLE_SOC_OCV2, + TABLE_FCC1, + TABLE_FCC2, + TABLE_Z1, + TABLE_Z2, + TABLE_Z3, + TABLE_Z4, + TABLE_Z5, + TABLE_Z6, + TABLE_Y1, + TABLE_Y2, + TABLE_Y3, + TABLE_Y4, + TABLE_Y5, + TABLE_Y6, + TABLE_MAX, +}; + +/** + * struct battery_params - Battery profile data to be exchanged + * @soc: SOC (state of charge) of the battery + * @ocv_uv: OCV (open circuit voltage) of the battery + * @batt_temp: Battery temperature in deci-degree + * @var: 'X' axis param for interpolation + * @table_index:Table index to be used for interpolation + */ +struct battery_params { + int soc; + int ocv_uv; + int fcc_mah; + int slope; + int var; + int batt_temp; + int table_index; +}; + +/* Profile MIN / MAX values */ +#define QG_MIN_SOC 0 +#define QG_MAX_SOC 10000 +#define QG_MIN_OCV_UV 3000000 +#define QG_MAX_OCV_UV 5000000 +#define QG_MIN_VAR 0 +#define QG_MAX_VAR 65535 +#define QG_MIN_FCC_MAH 100 +#define QG_MAX_FCC_MAH 16000 +#define QG_MIN_SLOPE 1 +#define QG_MAX_SLOPE 50000 + +/* IOCTLs to query battery profile data */ +#define BPIOCXSOC _IOWR('B', 0x01, struct battery_params) /* SOC */ +#define BPIOCXOCV _IOWR('B', 0x02, struct battery_params) /* OCV */ +#define BPIOCXFCC _IOWR('B', 0x03, struct battery_params) /* FCC */ +#define BPIOCXSLOPE _IOWR('B', 0x04, struct battery_params) /* Slope */ +#define BPIOCXVAR _IOWR('B', 0x05, struct battery_params) /* All-other */ + +#endif /* __QG_PROFILE_H__ */ diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h new file mode 100644 index 0000000000000000000000000000000000000000..2c7b49af873dcb4edc8d6bd0d67088c26dfc091f --- /dev/null +++ b/include/uapi/linux/qg.h @@ -0,0 +1,55 @@ +#ifndef __QG_H__ +#define __QG_H__ + +#define MAX_FIFO_LENGTH 16 + +enum qg { + QG_SOC, + QG_OCV_UV, + QG_RBAT_MOHM, + QG_PON_OCV_UV, + QG_GOOD_OCV_UV, + QG_ESR, + QG_CHARGE_COUNTER, + QG_FIFO_TIME_DELTA, + QG_BATT_SOC, + QG_CC_SOC, + QG_RESERVED_3, + QG_RESERVED_4, + QG_RESERVED_5, + QG_RESERVED_6, + QG_RESERVED_7, + QG_RESERVED_8, + QG_RESERVED_9, + QG_RESERVED_10, + QG_MAX, +}; + +#define QG_BATT_SOC QG_BATT_SOC +#define QG_CC_SOC QG_CC_SOC + +struct fifo_data { + unsigned int v; + unsigned int i; + unsigned int count; + unsigned int interval; +}; + +struct qg_param { + unsigned int data; + bool valid; +}; + +struct qg_kernel_data { + unsigned int seq_no; + unsigned int fifo_time; + unsigned int fifo_length; + struct fifo_data fifo[MAX_FIFO_LENGTH]; + struct qg_param param[QG_MAX]; +}; + +struct qg_user_data { + struct qg_param param[QG_MAX]; +}; + +#endif diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 2a0bd9dd104dc625f91b7938fa4f128e9a3c4df2..9efc0e73d50bee2416dc966254c9affbcd7ba413 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -17,8 +17,9 @@ #define SECCOMP_GET_ACTION_AVAIL 2 /* Valid flags for SECCOMP_SET_MODE_FILTER */ -#define SECCOMP_FILTER_FLAG_TSYNC 1 -#define SECCOMP_FILTER_FLAG_LOG 2 +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) /* * All BPF programs must return a 32-bit value. diff --git a/ipc/shm.c b/ipc/shm.c index a9cce632ed48ec7c39c80c79285cfd07974852fb..44cca2529a95844055e0da4ceccf1c6a150133ff 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -1309,14 +1309,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, if (addr) { if (addr & (shmlba - 1)) { - /* - * Round down to the nearest multiple of shmlba. - * For sane do_mmap_pgoff() parameters, avoid - * round downs that trigger nil-page and MAP_FIXED. - */ - if ((shmflg & SHM_RND) && addr >= shmlba) - addr &= ~(shmlba - 1); - else + if (shmflg & SHM_RND) { + addr &= ~(shmlba - 1); /* round down */ + + /* + * Ensure that the round-down is non-nil + * when remapping. This can happen for + * cases when addr < shmlba. + */ + if (!addr && (shmflg & SHM_REMAP)) + goto out; + } else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif diff --git a/kernel/audit.c b/kernel/audit.c index 5b34d3114af484f8375857541de87a6068f6178a..d301276bca584170e2d01de37f5372cbf5f4de38 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1058,6 +1058,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature return; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); + if (!ab) + return; audit_log_task_info(ab, current); audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", audit_feature_names[which], !!old_feature, !!new_feature, diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index c8146d53ca677a9ef28218ba5590fa3e28f530d7..07aefa8dbee8575a535627e54a229cc4f545f809 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1566,6 +1566,7 @@ static int kdb_md(int argc, const char **argv) int symbolic = 0; int valid = 0; int phys = 0; + int raw = 0; kdbgetintenv("MDCOUNT", &mdcount); kdbgetintenv("RADIX", &radix); @@ -1575,9 +1576,10 @@ static int kdb_md(int argc, const char **argv) repeat = mdcount * 16 / bytesperword; if (strcmp(argv[0], "mdr") == 0) { - if (argc != 2) + if (argc == 2 || (argc == 0 && last_addr != 0)) + valid = raw = 1; + else return KDB_ARGCOUNT; - valid = 1; } else if (isdigit(argv[0][2])) { bytesperword = (int)(argv[0][2] - '0'); if (bytesperword == 0) { @@ -1613,7 +1615,10 @@ static int kdb_md(int argc, const char **argv) radix = last_radix; bytesperword = last_bytesperword; repeat = last_repeat; - mdcount = ((repeat * bytesperword) + 15) / 16; + if (raw) + mdcount = repeat; + else + mdcount = ((repeat * bytesperword) + 15) / 16; } if (argc) { @@ -1630,7 +1635,10 @@ static int kdb_md(int argc, const char **argv) diag = kdbgetularg(argv[nextarg], &val); if (!diag) { mdcount = (int) val; - repeat = mdcount * 16 / bytesperword; + if (raw) + repeat = mdcount; + else + repeat = mdcount * 16 / bytesperword; } } if (argc >= nextarg+1) { @@ -1640,8 +1648,15 @@ static int kdb_md(int argc, const char **argv) } } - if (strcmp(argv[0], "mdr") == 0) - return kdb_mdr(addr, mdcount); + if (strcmp(argv[0], "mdr") == 0) { + int ret; + last_addr = addr; + ret = kdb_mdr(addr, mdcount); + last_addr += mdcount; + last_repeat = mdcount; + last_bytesperword = bytesperword; // to make REPEAT happy + return ret; + } switch (radix) { case 10: diff --git a/kernel/events/core.c b/kernel/events/core.c index b233bd435723f1570f7eae2fc7340938bdcd345b..ebd3baf759c939c2674f551a0ce4e154a28f89e9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -675,9 +675,15 @@ static inline void __update_cgrp_time(struct perf_cgroup *cgrp) static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) { - struct perf_cgroup *cgrp_out = cpuctx->cgrp; - if (cgrp_out) - __update_cgrp_time(cgrp_out); + struct perf_cgroup *cgrp = cpuctx->cgrp; + struct cgroup_subsys_state *css; + + if (cgrp) { + for (css = &cgrp->css; css; css = css->parent) { + cgrp = container_of(css, struct perf_cgroup, css); + __update_cgrp_time(cgrp); + } + } } static inline void update_cgrp_time_from_event(struct perf_event *event) @@ -705,6 +711,7 @@ perf_cgroup_set_timestamp(struct task_struct *task, { struct perf_cgroup *cgrp; struct perf_cgroup_info *info; + struct cgroup_subsys_state *css; /* * ctx->lock held by caller @@ -715,8 +722,12 @@ perf_cgroup_set_timestamp(struct task_struct *task, return; cgrp = perf_cgroup_from_task(task, ctx); - info = this_cpu_ptr(cgrp->info); - info->timestamp = ctx->timestamp; + + for (css = &cgrp->css; css; css = css->parent) { + cgrp = container_of(css, struct perf_cgroup, css); + info = this_cpu_ptr(cgrp->info); + info->timestamp = ctx->timestamp; + } } static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); @@ -922,27 +933,39 @@ list_update_cgroup_event(struct perf_event *event, if (!is_cgroup_event(event)) return; - if (add && ctx->nr_cgroups++) - return; - else if (!add && --ctx->nr_cgroups) - return; /* * Because cgroup events are always per-cpu events, * this will always be called from the right CPU. */ cpuctx = __get_cpu_context(ctx); - cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; - /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/ - if (add) { + + /* + * Since setting cpuctx->cgrp is conditional on the current @cgrp + * matching the event's cgroup, we must do this for every new event, + * because if the first would mismatch, the second would not try again + * and we would leave cpuctx->cgrp unset. + */ + if (add && !cpuctx->cgrp) { struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); - list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) cpuctx->cgrp = cgrp; - } else { - list_del(cpuctx_entry); - cpuctx->cgrp = NULL; } + + if (add && ctx->nr_cgroups++) + return; + else if (!add && --ctx->nr_cgroups) + return; + + /* no cgroup running */ + if (!add) + cpuctx->cgrp = NULL; + + cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; + if (add) + list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); + else + list_del(cpuctx_entry); } #else /* !CONFIG_CGROUP_PERF */ @@ -2443,6 +2466,18 @@ static int __perf_install_in_context(void *info) raw_spin_lock(&task_ctx->lock); } +#ifdef CONFIG_CGROUP_PERF + if (is_cgroup_event(event)) { + /* + * If the current cgroup doesn't match the event's + * cgroup, we should not try to schedule it. + */ + struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); + reprogram = cgroup_is_descendant(cgrp->css.cgroup, + event->cgrp->css.cgroup); + } +#endif + if (reprogram) { ctx_sched_out(ctx, cpuctx, EVENT_TIME); add_event_to_ctx(event, ctx); @@ -6000,7 +6035,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; - if (leader != event) + if ((leader != event) && + (leader->state == PERF_EVENT_STATE_ACTIVE)) leader->pmu->read(leader); values[n++] = perf_event_count(leader); diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index 2655f26ec882689f42d2ba7831209f166ce45869..c7471c3fb79898aa9c0176af57f72812564d2641 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -23,49 +23,11 @@ #include #include -/* - * This internal data structure is used for optimizing access to some of - * the subfields within the atomic_t cnts. - */ -struct __qrwlock { - union { - atomic_t cnts; - struct { -#ifdef __LITTLE_ENDIAN - u8 wmode; /* Writer mode */ - u8 rcnts[3]; /* Reader counts */ -#else - u8 rcnts[3]; /* Reader counts */ - u8 wmode; /* Writer mode */ -#endif - }; - }; - arch_spinlock_t lock; -}; - -/** - * rspin_until_writer_unlock - inc reader count & spin until writer is gone - * @lock : Pointer to queue rwlock structure - * @writer: Current queue rwlock writer status byte - * - * In interrupt context or at the head of the queue, the reader will just - * increment the reader count & wait until the writer releases the lock. - */ -static __always_inline void -rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) -{ - while ((cnts & _QW_WMASK) == _QW_LOCKED) { - cpu_relax(); - cnts = atomic_read_acquire(&lock->cnts); - } -} - /** * queued_read_lock_slowpath - acquire read lock of a queue rwlock * @lock: Pointer to queue rwlock structure - * @cnts: Current qrwlock lock value */ -void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) +void queued_read_lock_slowpath(struct qrwlock *lock) { /* * Readers come here when they cannot get the lock without waiting @@ -73,13 +35,11 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) if (unlikely(in_interrupt())) { /* * Readers in interrupt context will get the lock immediately - * if the writer is just waiting (not holding the lock yet). - * The rspin_until_writer_unlock() function returns immediately - * in this case. Otherwise, they will spin (with ACQUIRE - * semantics) until the lock is available without waiting in - * the queue. + * if the writer is just waiting (not holding the lock yet), + * so spin with ACQUIRE semantics until the lock is available + * without waiting in the queue. */ - rspin_until_writer_unlock(lock, cnts); + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); return; } atomic_sub(_QR_BIAS, &lock->cnts); @@ -88,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) * Put the reader into the wait queue */ arch_spin_lock(&lock->wait_lock); + atomic_add(_QR_BIAS, &lock->cnts); /* * The ACQUIRE semantics of the following spinning code ensure * that accesses can't leak upwards out of our subsequent critical * section in the case that the lock is currently held for write. */ - cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); - rspin_until_writer_unlock(lock, cnts); + atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); /* * Signal the next one in queue to become queue head @@ -110,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath); */ void queued_write_lock_slowpath(struct qrwlock *lock) { - u32 cnts; - /* Put the writer into the wait queue */ arch_spin_lock(&lock->wait_lock); @@ -120,30 +78,14 @@ void queued_write_lock_slowpath(struct qrwlock *lock) (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) goto unlock; - /* - * Set the waiting flag to notify readers that a writer is pending, - * or wait for a previous writer to go away. - */ - for (;;) { - struct __qrwlock *l = (struct __qrwlock *)lock; - - if (!READ_ONCE(l->wmode) && - (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) - break; + /* Set the waiting flag to notify readers that a writer is pending */ + atomic_add(_QW_WAITING, &lock->cnts); - cpu_relax(); - } - - /* When no more readers, set the locked flag */ - for (;;) { - cnts = atomic_read(&lock->cnts); - if ((cnts == _QW_WAITING) && - (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, - _QW_LOCKED) == _QW_WAITING)) - break; - - cpu_relax(); - } + /* When no more readers or writers, set the locked flag */ + do { + atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING); + } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING, + _QW_LOCKED) != _QW_WAITING); unlock: arch_spin_unlock(&lock->wait_lock); } diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 701fc202c2b9b3d4e4ba94267c66bff8755b44e5..11adcef768bc0b6fa145a4fc78986cacf4ba97c3 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -449,6 +450,9 @@ EXPORT_SYMBOL_GPL(pm_qos_request); int pm_qos_request_for_cpu(int pm_qos_class, int cpu) { + if (cpu_isolated(cpu)) + return INT_MAX; + return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu]; } EXPORT_SYMBOL(pm_qos_request_for_cpu); @@ -471,6 +475,9 @@ int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) val = c->default_value; for_each_cpu(cpu, mask) { + if (cpu_isolated(cpu)) + continue; + switch (c->type) { case PM_QOS_MIN: if (c->target_per_cpu[cpu] < val) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index fed95fa941e61737e55b84ce080135059c2dfbd3..8b3102d22823fae6377a96ef73e644a81f8e877d 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -559,8 +559,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) } t = list_entry(rnp->gp_tasks->prev, struct task_struct, rcu_node_entry); - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { + /* + * We could be printing a lot while holding a spinlock. + * Avoid triggering hard lockup. + */ + touch_nmi_watchdog(); sched_show_task(t); + } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -1677,6 +1683,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) char *ticks_title; unsigned long ticks_value; + /* + * We could be printing a lot while holding a spinlock. Avoid + * triggering hard lockup. + */ + touch_nmi_watchdog(); + if (rsp->gpnum == rdp->gpnum) { ticks_title = "ticks this GP"; ticks_value = rdp->ticks_this_gp; diff --git a/kernel/relay.c b/kernel/relay.c index 55da824f4adcfff0a2ef10bc7d86cd0d5af82825..1537158c67b38e139dc20de59525e738c1f66f21 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) { struct rchan_buf *buf; - if (chan->n_subbufs > UINT_MAX / sizeof(size_t *)) + if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *)) return NULL; buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL); diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c index 76a139770245e51cd1e095da7cd7a057b4f0cddf..8216954b57be4154a384d21029c5ccc4fb98c813 100644 --- a/kernel/sched/boost.c +++ b/kernel/sched/boost.c @@ -180,6 +180,7 @@ static void _sched_set_boost(int type) update_freq_aggregate_threshold( freq_aggr_threshold_backup); } + break; default: WARN_ON(1); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d62726796fd2de8ff170d565d4dba074330aa702..13ec4d8bebe62221b1b6f2a499cdf0f0ec598393 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5819,6 +5819,9 @@ int do_isolation_work_cpu_stop(void *data) } migrate_tasks(rq, &rf, false); + + if (rq->rd) + set_rq_online(rq); raw_spin_unlock(&rq->lock); clear_walt_request(cpu); diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index a6af8cbb925c151cce0c5fa70f9da317962ce1bf..e7787e631def4ec99cce1d5bad33a8df9d0381ab 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -600,23 +600,27 @@ static int prev_cluster_nr_need_assist(int index) int need = 0; int cpu; struct cluster_data *prev_cluster; - unsigned int prev_cluster_available_cpus; if (index == 0) return 0; index--; prev_cluster = &cluster_state[index]; - prev_cluster_available_cpus = prev_cluster->active_cpus + - prev_cluster->nr_isolated_cpus; + + /* + * Next cluster should not assist, while there are isolated cpus + * in this cluster. + */ + if (prev_cluster->nr_isolated_cpus) + return 0; for_each_cpu(cpu, &prev_cluster->cpu_mask) need += nr_stats[cpu].nr; need += compute_prev_cluster_misfit_need(index); - if (need > prev_cluster_available_cpus) - need = need - prev_cluster_available_cpus; + if (need > prev_cluster->active_cpus) + need = need - prev_cluster->active_cpus; else need = 0; @@ -672,8 +676,7 @@ static unsigned int apply_task_need(const struct cluster_data *cluster, * unisolate as many cores as the previous cluster * needs assistance with. */ - if (cluster->nr_prev_assist_thresh != 0 && - cluster->nr_prev_assist >= cluster->nr_prev_assist_thresh) + if (cluster->nr_prev_assist >= cluster->nr_prev_assist_thresh) new_need = new_need + cluster->nr_prev_assist; /* only unisolate more cores if there are tasks to run */ @@ -761,7 +764,12 @@ static bool eval_need(struct cluster_data *cluster) if (new_need > cluster->active_cpus) { ret = 1; } else { - if (new_need == last_need) { + /* + * When there is no change in need and there are no more + * active CPUs than currently needed, just update the + * need time stamp and return. + */ + if (new_need == last_need && new_need == cluster->active_cpus) { cluster->need_ts = now; spin_unlock_irqrestore(&state_lock, flags); return 0; @@ -903,6 +911,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need) unsigned long flags; unsigned int num_cpus = cluster->num_cpus; unsigned int nr_isolated = 0; + bool first_pass = cluster->nr_not_preferred_cpus; /* * Protect against entry being removed (and added at tail) by other @@ -948,6 +957,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need) cluster->nr_isolated_cpus += nr_isolated; spin_unlock_irqrestore(&state_lock, flags); +again: /* * If the number of active CPUs is within the limits, then * don't force isolation of any busy CPUs. @@ -967,6 +977,9 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need) if (cluster->active_cpus <= cluster->max_cpus) break; + if (first_pass && !c->not_preferred) + continue; + spin_unlock_irqrestore(&state_lock, flags); pr_debug("Trying to isolate CPU%u\n", c->cpu); @@ -983,6 +996,10 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need) cluster->nr_isolated_cpus += nr_isolated; spin_unlock_irqrestore(&state_lock, flags); + if (first_pass && cluster->active_cpus > cluster->max_cpus) { + first_pass = false; + goto again; + } } static void __try_to_unisolate(struct cluster_data *cluster, @@ -1195,7 +1212,7 @@ static int cluster_init(const struct cpumask *mask) cluster->need_cpus = cluster->num_cpus; cluster->offline_delay_ms = 100; cluster->task_thres = UINT_MAX; - cluster->nr_prev_assist_thresh = 0; + cluster->nr_prev_assist_thresh = UINT_MAX; cluster->nrrun = cluster->num_cpus; cluster->enable = true; cluster->nr_not_preferred_cpus = 0; @@ -1228,8 +1245,8 @@ static int cluster_init(const struct cpumask *mask) static int __init core_ctl_init(void) { - unsigned int cpu; - struct cpumask cpus = *cpu_possible_mask; + struct sched_cluster *cluster; + int ret; cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "core_ctl/isolation:online", @@ -1239,15 +1256,12 @@ static int __init core_ctl_init(void) "core_ctl/isolation:dead", NULL, core_ctl_isolation_dead_cpu); - for_each_cpu(cpu, &cpus) { - int ret; - const struct cpumask *cluster_cpus = cpu_coregroup_mask(cpu); - - ret = cluster_init(cluster_cpus); + for_each_sched_cluster(cluster) { + ret = cluster_init(&cluster->cpus); if (ret) pr_warn("unable to create core ctl group: %d\n", ret); - cpumask_andnot(&cpus, &cpus, cluster_cpus); } + initialized = true; return 0; } diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 41574c969715041a7129ca67db0db9702b807ab0..7c57a25fb3e4dd7b433cb40ea2c6c0700bc642a8 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -133,6 +133,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + cpumask_andnot(lowest_mask, lowest_mask, + cpu_isolated_mask); if (drop_nopreempts) drop_nopreempt_cpus(lowest_mask); /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b3e8cd6a8ac2abd20df9ee4f1c5926e5fbefce2f..3ace54a93c45d2c8f5183d844ef562775cfacf15 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6418,7 +6418,7 @@ boosted_task_util(struct task_struct *task) static unsigned long capacity_spare_wake(int cpu, struct task_struct *p) { - return capacity_orig_of(cpu) - cpu_util_wake(cpu, p); + return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0); } /* @@ -7000,8 +7000,9 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) return true; if (sched_boost_policy() == SCHED_BOOST_ON_BIG && - task_sched_boost(p)) - return !is_min_capacity_cpu(cpu); + task_sched_boost(p) && + is_min_capacity_cpu(cpu)) + return false; return task_fits_capacity(p, capacity, cpu); } @@ -7053,6 +7054,13 @@ static inline bool skip_sg(struct task_struct *p, struct sched_group *sg, if (!sg->group_weight) return true; + /* + * Don't skip a group if a task affinity allows it + * to run only on that group. + */ + if (cpumask_subset(&p->cpus_allowed, sched_group_span(sg))) + return false; + if (!task_fits_max(p, fcpu)) return true; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b10af337dea2f79f59573ca4107ad6b93a9b0549..cb41bad9c3392ba233e4ef1e71285b38407b5fb7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -850,6 +850,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) continue; raw_spin_lock(&rq->lock); + update_rq_clock(rq); + if (rt_rq->rt_time) { u64 runtime; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d06f2631e36a347eed4c715ed92ecd6ab4f93d57..9a537b5fea80bce433f6eb49d8857df73a6f11b9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1965,7 +1965,7 @@ cpu_util_freq_pelt(int cpu) } #ifdef CONFIG_SCHED_WALT -extern atomic64_t walt_irq_work_lastq_ws; +extern u64 walt_load_reported_window; static inline unsigned long cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) @@ -2003,7 +2003,7 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = pl; - walt_load->ws = atomic64_read(&walt_irq_work_lastq_ws); + walt_load->ws = walt_load_reported_window; } return (util >= capacity) ? capacity : util; diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c index e4a1fc5e16720613bea69a180850c19aceb3ecfb..f9b2238eefbbf4bf249338dd77a1166e137aacb9 100644 --- a/kernel/sched/sched_avg.c +++ b/kernel/sched/sched_avg.c @@ -35,7 +35,7 @@ static s64 last_get_time; static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0); -#define NR_THRESHOLD_PCT 85 +#define NR_THRESHOLD_PCT 15 /** * sched_get_nr_running_avg @@ -76,7 +76,7 @@ void sched_get_nr_running_avg(struct sched_avg_stats *stats) /* * NR_THRESHOLD_PCT is to make sure that the task ran - * at least 15% in the last window to compensate any + * at least 85% in the last window to compensate any * over estimating being done. */ stats[cpu].nr = (int)div64_u64((tmp_nr + NR_THRESHOLD_PCT), diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 47f17638236d5ed28b1a846e0e2c31c93b6e484a..22b0419eae057b2ac75888941c5363c293f5f4fc 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -45,7 +45,8 @@ const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP", static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; static bool use_cycle_counter; DEFINE_MUTEX(cluster_lock); -atomic64_t walt_irq_work_lastq_ws; +static atomic64_t walt_irq_work_lastq_ws; +u64 walt_load_reported_window; static struct irq_work walt_cpufreq_irq_work; static struct irq_work walt_migration_irq_work; @@ -850,6 +851,9 @@ void set_window_start(struct rq *rq) rq->window_start = 1; sync_cpu_available = 1; atomic64_set(&walt_irq_work_lastq_ws, rq->window_start); + walt_load_reported_window = + atomic64_read(&walt_irq_work_lastq_ws); + } else { struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); @@ -3074,7 +3078,7 @@ void walt_irq_work(struct irq_work *irq_work) raw_spin_lock(&cpu_rq(cpu)->lock); wc = ktime_get_ns(); - + walt_load_reported_window = atomic64_read(&walt_irq_work_lastq_ws); for_each_sched_cluster(cluster) { u64 aggr_grp_load = 0; diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 5f0dfb2abb8d39542d72003f860e25eaff66c8f0..075e344a87c3f9d4fbdd6b4aa9929248cd7e9a63 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include @@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) return true; } +void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } + static inline void seccomp_assign_mode(struct task_struct *task, - unsigned long seccomp_mode) + unsigned long seccomp_mode, + unsigned long flags) { assert_spin_locked(&task->sighand->siglock); @@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task, * filter) is set. */ smp_mb__before_atomic(); + /* Assume default seccomp processes want spec flaw mitigation. */ + if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) + arch_seccomp_spec_mitigate(task); set_tsk_thread_flag(task, TIF_SECCOMP); } @@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void) * without dropping the locks. * */ -static inline void seccomp_sync_threads(void) +static inline void seccomp_sync_threads(unsigned long flags) { struct task_struct *thread, *caller; @@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void) * allow one thread to transition the other. */ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, + flags); } } @@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags, /* Now that the new filter is in place, synchronize to all threads. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC) - seccomp_sync_threads(); + seccomp_sync_threads(flags); return 0; } @@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void) #ifdef TIF_NOTSC disable_TSC(); #endif - seccomp_assign_mode(current, seccomp_mode); + seccomp_assign_mode(current, seccomp_mode, 0); ret = 0; out: @@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags, /* Do not free the successfully attached filter. */ prepared = NULL; - seccomp_assign_mode(current, seccomp_mode); + seccomp_assign_mode(current, seccomp_mode, flags); out: spin_unlock_irq(¤t->sighand->siglock); if (flags & SECCOMP_FILTER_FLAG_TSYNC) diff --git a/kernel/signal.c b/kernel/signal.c index 6895f6bb98a76989e38aad115bf6023cd8121cfe..99db4b67269f15709e1a74fa2c428a39e87f077c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -40,6 +40,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include @@ -1284,8 +1286,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) ret = check_kill_permission(sig, info, p); rcu_read_unlock(); - if (!ret && sig) + if (!ret && sig) { ret = do_send_sig_info(sig, info, p, true); + if (capable(CAP_KILL) && sig == SIGKILL) + add_to_oom_reaper(p); + } return ret; } diff --git a/kernel/sys.c b/kernel/sys.c index 745953a1a736ad444b9de43ffde5ab4b84d677de..913990b95eba499856524094f271b9e0793d74c3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -63,6 +63,8 @@ #include #include +#include + #include /* Move somewhere else to avoid recompiling? */ #include @@ -1395,6 +1397,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, if (resource >= RLIM_NLIMITS) return -EINVAL; + resource = array_index_nospec(resource, RLIM_NLIMITS); task_lock(current->group_leader); x = current->signal->rlim[resource]; task_unlock(current->group_leader); @@ -1414,6 +1417,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, if (resource >= RLIM_NLIMITS) return -EINVAL; + resource = array_index_nospec(resource, RLIM_NLIMITS); task_lock(current->group_leader); r = current->signal->rlim[resource]; task_unlock(current->group_leader); @@ -2333,6 +2337,17 @@ static int prctl_set_vma(unsigned long opt, unsigned long start, } #endif +int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) +{ + return -EINVAL; +} + +int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, + unsigned long ctrl) +{ + return -EINVAL; +} + SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { @@ -2535,6 +2550,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_GET_FP_MODE: error = GET_FP_MODE(me); break; + case PR_GET_SPECULATION_CTRL: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = arch_prctl_spec_ctrl_get(me, arg2); + break; + case PR_SET_SPECULATION_CTRL: + if (arg4 || arg5) + return -EINVAL; + error = arch_prctl_spec_ctrl_set(me, arg2, arg3); + break; case PR_SET_VMA: error = prctl_set_vma(arg2, arg3, arg4, arg5); break; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4e498c8a1979b789d61efffed29e0256e9216d0e..0ed69a527bb5e8785203d9a765be72e36199680e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -105,6 +105,7 @@ extern char core_pattern[]; extern unsigned int core_pipe_limit; #endif extern int pid_max; +extern int extra_free_kbytes; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; extern int latencytop_enabled; @@ -1377,6 +1378,13 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "reap_mem_on_sigkill", + .data = &sysctl_reap_mem_on_sigkill, + .maxlen = sizeof(sysctl_reap_mem_on_sigkill), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, @@ -1570,6 +1578,14 @@ static struct ctl_table vm_table[] = { .extra1 = &one, .extra2 = &one_thousand, }, + { + .procname = "extra_free_kbytes", + .data = &extra_free_kbytes, + .maxlen = sizeof(extra_free_kbytes), + .mode = 0644, + .proc_handler = min_free_kbytes_sysctl_handler, + .extra1 = &zero, + }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, @@ -1774,6 +1790,22 @@ static struct ctl_table vm_table[] = { .extra1 = (void *)&mmap_rnd_compat_bits_min, .extra2 = (void *)&mmap_rnd_compat_bits_max, }, +#endif +#ifdef CONFIG_SWAP + { + .procname = "swap_ratio", + .data = &sysctl_swap_ratio, + .maxlen = sizeof(sysctl_swap_ratio), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + }, + { + .procname = "swap_ratio_enable", + .data = &sysctl_swap_ratio_enable, + .maxlen = sizeof(sysctl_swap_ratio_enable), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + }, #endif { } }; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b398c2ea69b290cdaec1769b7d11cbc501646652..aa2094d5dd275372f999d051887aec22bfab9f19 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) now = ktime_get(); /* Find all expired events */ for_each_cpu(cpu, tick_broadcast_oneshot_mask) { + /* + * Required for !SMP because for_each_cpu() reports + * unconditionally CPU0 as set on UP kernels. + */ + if (!IS_ENABLED(CONFIG_SMP) && + cpumask_empty(tick_broadcast_oneshot_mask)) + break; + td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event <= now) { cpumask_set_cpu(cpu, tmpmask); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 83dfa511fbc56f7af6286f74c736f59a5135ba18..407aabe9670b0c98f3d5a9222dd9c4ef5bbf021c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -895,7 +895,7 @@ int __trace_bputs(unsigned long ip, const char *str) EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT -static void tracing_snapshot_instance(struct trace_array *tr) +void tracing_snapshot_instance(struct trace_array *tr) { struct tracer *tracer = tr->current_trace; unsigned long flags; @@ -951,7 +951,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, struct trace_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); -static int alloc_snapshot(struct trace_array *tr) +int tracing_alloc_snapshot_instance(struct trace_array *tr) { int ret; @@ -997,7 +997,7 @@ int tracing_alloc_snapshot(void) struct trace_array *tr = &global_trace; int ret; - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; @@ -5405,7 +5405,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; } @@ -6386,7 +6386,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, } #endif if (!tr->allocated_snapshot) { - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) break; } @@ -7107,7 +7107,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, return ret; out_reg: - ret = alloc_snapshot(tr); + ret = tracing_alloc_snapshot_instance(tr); if (ret < 0) goto out; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f578ab705ece9d8c93768cc3529a67dbe3105de2..ed140875c13734f9e6a4ac9ace251edcd3542cdc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1813,6 +1813,17 @@ static inline void __init trace_event_init(void) { } static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } #endif +#ifdef CONFIG_TRACER_SNAPSHOT +void tracing_snapshot_instance(struct trace_array *tr); +int tracing_alloc_snapshot_instance(struct trace_array *tr); +#else +static inline void tracing_snapshot_instance(struct trace_array *tr) { } +static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) +{ + return 0; +} +#endif + extern struct trace_iterator *tracepoint_print_iter; #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f2ac9d44f6c4b1f0ea4128836d9977b138c840ca..b413fab7d75becb63fec1e9c795e135463ee552c 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr) struct trace_event_file *file; list_for_each_entry(file, &tr->events, list) { - struct event_trigger_data *data; - list_for_each_entry_rcu(data, &file->triggers, list) { + struct event_trigger_data *data, *n; + list_for_each_entry_safe(data, n, &file->triggers, list) { trace_event_trigger_enable_disable(file, 0); + list_del_rcu(&data->list); if (data->ops->free) data->ops->free(data->ops, data); } @@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops, trigger_data->count = -1; trigger_data->ops = trigger_ops; trigger_data->cmd_ops = cmd_ops; + trigger_data->private_data = file; INIT_LIST_HEAD(&trigger_data->list); INIT_LIST_HEAD(&trigger_data->named_list); @@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = { static void snapshot_trigger(struct event_trigger_data *data, void *rec) { - tracing_snapshot(); + struct trace_event_file *file = data->private_data; + + if (file) + tracing_snapshot_instance(file->tr); + else + tracing_snapshot(); } static void @@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, { int ret = register_trigger(glob, ops, data, file); - if (ret > 0 && tracing_alloc_snapshot() != 0) { + if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { unregister_trigger(glob, ops, data, file); ret = 0; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6ad163f0a8030a5e739794092e6748bf94f15a7a..8ca3eb25c490de5367061f2d38d766a25ebab462 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5365,7 +5365,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) ret = device_register(&wq_dev->dev); if (ret) { - kfree(wq_dev); + put_device(&wq_dev->dev); wq->wq_dev = NULL; return ret; } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8b1feca1230a08c4537036f517cfa4369b62dffa..d172f0341b807ecad322618be5749199b4e30f5c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1613,11 +1613,9 @@ static void set_iter_tags(struct radix_tree_iter *iter, static void __rcu **skip_siblings(struct radix_tree_node **nodep, void __rcu **slot, struct radix_tree_iter *iter) { - void *sib = node_to_entry(slot - 1); - while (iter->index < iter->next_index) { *nodep = rcu_dereference_raw(*slot); - if (*nodep && *nodep != sib) + if (*nodep && !is_sibling_entry(iter->node, *nodep)) return slot; slot++; iter->index = __radix_tree_iter_add(iter, 1); @@ -1632,7 +1630,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; - struct radix_tree_node *node = rcu_dereference_raw(*slot); + struct radix_tree_node *node; slot = skip_siblings(&node, slot, iter); @@ -2039,10 +2037,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node = NULL; - void __rcu **slot; + void __rcu **slot = NULL; void *entry; entry = __radix_tree_lookup(root, index, &node, &slot); + if (!slot) + return NULL; if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, get_slot_offset(node, slot)))) return NULL; diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0ddf293cfac319885af9acbf7aac3bb25037a0d8..0a6f492fb9d98feab0e8ed31e81b992e20df29b3 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -434,23 +434,32 @@ static void noinline __init test_mem_optimisations(void) unsigned int start, nbits; for (start = 0; start < 1024; start += 8) { - memset(bmap1, 0x5a, sizeof(bmap1)); - memset(bmap2, 0x5a, sizeof(bmap2)); for (nbits = 0; nbits < 1024 - start; nbits += 8) { + memset(bmap1, 0x5a, sizeof(bmap1)); + memset(bmap2, 0x5a, sizeof(bmap2)); + bitmap_set(bmap1, start, nbits); __bitmap_set(bmap2, start, nbits); - if (!bitmap_equal(bmap1, bmap2, 1024)) + if (!bitmap_equal(bmap1, bmap2, 1024)) { printk("set not equal %d %d\n", start, nbits); - if (!__bitmap_equal(bmap1, bmap2, 1024)) + failed_tests++; + } + if (!__bitmap_equal(bmap1, bmap2, 1024)) { printk("set not __equal %d %d\n", start, nbits); + failed_tests++; + } bitmap_clear(bmap1, start, nbits); __bitmap_clear(bmap2, start, nbits); - if (!bitmap_equal(bmap1, bmap2, 1024)) + if (!bitmap_equal(bmap1, bmap2, 1024)) { printk("clear not equal %d %d\n", start, nbits); - if (!__bitmap_equal(bmap1, bmap2, 1024)) + failed_tests++; + } + if (!__bitmap_equal(bmap1, bmap2, 1024)) { printk("clear not __equal %d %d\n", start, nbits); + failed_tests++; + } } } } diff --git a/lib/test_kmod.c b/lib/test_kmod.c index fba78d25e82569e57845d16b36c271d0848099b0..96c304fd656adf2102a8d75f608c333fc299af30 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1149,7 +1149,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) mutex_lock(®_dev_mutex); /* int should suffice for number of devices, test for wrap */ - if (unlikely(num_test_devs + 1) < 0) { + if (num_test_devs + 1 == INT_MAX) { pr_err("reached limit of number of test devices\n"); goto out; } diff --git a/mm/Kconfig b/mm/Kconfig index 718544ece9c3094d86a29dcb51d5d2ce854b9025..5b77318feff1edc928cdad66b0c18eca716ed8fb 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -183,6 +183,25 @@ config MEMORY_HOTPLUG_DEFAULT_ONLINE Say N here if you want the default policy to keep all hot-plugged memory blocks in 'offline' state. +config MEMORY_HOTPLUG_MOVABLE_NODE + bool "Add hot-added memory blocks to ZONE_MOVABLE type" + default n + depends on MEMORY_HOTPLUG + depends on QCOM_MEM_OFFLINE + help + When onlining memory blocks, this option helps to add the target + memory block to ZONE_MOVABLE zone type. For successful offlining, + these memory blocks should belong to 'ZONE_MOVABLE' since it carries + only movable pages. When this option is not set, the default zone + policy is to add the blocks to 'ZONE_NORMAL' which may pin pages. + + See Documentation/memory-hotplug.txt for more information. + + Say Y here if you want all hot-added memory blocks to be added to + 'ZONE_MOVABLE' type.state by default. + Say N here if you want the default policy to add all hot-added + memory blocks in 'ZONE_NORMAL' type. + config MEMORY_HOTREMOVE bool "Allow for memory hot remove" select MEMORY_ISOLATION @@ -650,6 +669,7 @@ config DEFERRED_STRUCT_PAGE_INIT depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT depends on NO_BOOTMEM && MEMORY_HOTPLUG depends on !FLATMEM + depends on !NEED_PER_CPU_KM help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable @@ -757,3 +777,25 @@ config PERCPU_STATS This feature collects and exposes statistics via debugfs. The information includes global and per chunk statistics, which can be used to help understand percpu memory usage. + +config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT + def_bool n + +config SPECULATIVE_PAGE_FAULT + bool "Speculative page faults" + default y + depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT + depends on MMU && SMP + help + Try to handle user space page faults without holding the mmap_sem. + + This should allow better concurrency for massively threaded process + since the page fault handler will not wait for other threads memory + layout change to be done, assuming that this change is done in another + part of the process's memory space. This type of page fault is named + speculative page fault. + + If the speculative page fault fails because of a concurrency is + detected or because underlying PMD or PTE tables are not yet + allocating, it is failing its processing and a classic page fault + is then tried. diff --git a/mm/Makefile b/mm/Makefile index 7184a528e298801fe7ce8342e3961d087b7fac89..5a2b9505e41398b00a77aa3c49bf3fdd15bfaa9c 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -55,7 +55,7 @@ ifdef CONFIG_MMU endif obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o -obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o +obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o swap_ratio.o obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_ZSWAP) += zswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o diff --git a/mm/cma.c b/mm/cma.c index aa78dd3ab72bc559f5d1a515d2cd44a515524d8e..3d91939750a39c78cca235230a0ad5cfecf727eb 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -409,6 +409,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, struct page *page = NULL; int ret = -ENOMEM; int retry_after_sleep = 0; + int max_retries = 2; + int available_regions = 0; if (!cma || !cma->count) return NULL; @@ -433,8 +435,15 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { - if (retry_after_sleep < 2) { + if (retry_after_sleep < max_retries) { start = 0; + /* + * update max retries if available free regions + * are less. + */ + if (available_regions < 3) + max_retries = 5; + available_regions = 0; /* * Page may be momentarily pinned by some other * process which has been scheduled out, eg. @@ -452,6 +461,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, break; } } + + available_regions++; bitmap_set(cma->bitmap, bitmap_no, bitmap_count); /* * It's safe to drop the lock here. We've marked this region for diff --git a/mm/gup.c b/mm/gup.c index 8fc23a60487d6ca8c10be6acd5a33bca6ad5b7cc..d2ba0be714411eb56c49ef10cc94b69276b2c789 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; + if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) + return -EFAULT; + if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a403d29da6fd23a63df9027307e5a94843a5b621..8af604f3b3708542f543ef8cb979e6207aa6406e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -555,7 +555,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, VM_BUG_ON_PAGE(!PageCompound(page), page); - if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { + if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg, + true)) { put_page(page); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; @@ -1304,7 +1305,7 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) } if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, - huge_gfp, &memcg, true))) { + huge_gfp | __GFP_NORETRY, &memcg, true))) { put_page(new_page); split_huge_pmd(vma, vmf->pmd, vmf->address); if (page) @@ -2387,7 +2388,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, __split_huge_page_tail(head, i, lruvec, list); /* Some pages can be beyond i_size: drop them from page cache */ if (head[i].index >= end) { - __ClearPageDirty(head + i); + ClearPageDirty(head + i); __delete_from_page_cache(head + i, NULL); if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) shmem_uncharge(head->mapping->host, 1); diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 6f6c00ce5e71fa04866b07444f6f57645587060c..a4dd69a01374bce6cfcc8020d1cc69d66c441549 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -791,6 +791,40 @@ DEFINE_ASAN_SET_SHADOW(f5); DEFINE_ASAN_SET_SHADOW(f8); #ifdef CONFIG_MEMORY_HOTPLUG +static bool shadow_mapped(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (pgd_none(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; + + /* + * We can't use pud_large() or pud_huge(), the first one is + * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse + * pud_bad(), if pud is bad then it's bad because it's huge. + */ + if (pud_bad(*pud)) + return true; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return false; + + if (pmd_bad(*pmd)) + return true; + pte = pte_offset_kernel(pmd, addr); + return !pte_none(*pte); +} + static int __meminit kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -812,6 +846,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, case MEM_GOING_ONLINE: { void *ret; + /* + * If shadow is mapped already than it must have been mapped + * during the boot. This could happen if we onlining previously + * offlined memory. + */ + if (shadow_mapped(shadow_start)) + return NOTIFY_OK; + ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, shadow_end, GFP_KERNEL, PAGE_KERNEL, VM_NO_GUARD, @@ -823,8 +865,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, kmemleak_ignore(ret); return NOTIFY_OK; } - case MEM_OFFLINE: - vfree((void *)shadow_start); + case MEM_CANCEL_ONLINE: + case MEM_OFFLINE: { + struct vm_struct *vm; + + /* + * shadow_start was either mapped during boot by kasan_init() + * or during memory online by __vmalloc_node_range(). + * In the latter case we can use vfree() to free shadow. + * Non-NULL result of the find_vm_area() will tell us if + * that was the second case. + * + * Currently it's not possible to free shadow mapped + * during boot by kasan_init(). It's because the code + * to do that hasn't been written yet. So we'll just + * leak the memory. + */ + vm = find_vm_area((void *)shadow_start); + if (vm) + vfree((void *)shadow_start); + } } return NOTIFY_OK; @@ -837,5 +897,5 @@ static int __init kasan_memhotplug_init(void) return 0; } -module_init(kasan_memhotplug_init); +core_initcall(kasan_memhotplug_init); #endif diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 29221602d802901d2e282d103c982db098e65a5d..0a5bb3e8a8a3c78d9a99fbb6260743275de88d1a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -965,7 +965,9 @@ static void collapse_huge_page(struct mm_struct *mm, goto out_nolock; } - if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { + /* Do not oom kill for khugepaged charges */ + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, + &memcg, true))) { result = SCAN_CGROUP_CHARGE_FAIL; goto out_nolock; } @@ -1324,7 +1326,9 @@ static void collapse_shmem(struct mm_struct *mm, goto out; } - if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { + /* Do not oom kill for khugepaged charges */ + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY, + &memcg, true))) { result = SCAN_CGROUP_CHARGE_FAIL; goto out; } diff --git a/mm/ksm.c b/mm/ksm.c index 5b6be9eeb095adfef11b1e89a0192dbf69df0a42..fdc8746ebcb459eccc70e36365b26c5fe24496d6 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2085,8 +2085,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { + bool split; + kpage = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); + /* + * If both pages we tried to merge belong to the same compound + * page, then we actually ended up increasing the reference + * count of the same compound page twice, and split_huge_page + * failed. + * Here we set a flag if that happened, and we use it later to + * try split_huge_page again. Since we call put_page right + * afterwards, the reference count will be correct and + * split_huge_page should succeed. + */ + split = PageTransCompound(page) + && compound_head(page) == compound_head(tree_page); put_page(tree_page); if (kpage) { /* @@ -2113,6 +2127,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) break_cow(tree_rmap_item); break_cow(rmap_item); } + } else if (split) { + /* + * We are here if we tried to merge two pages and + * failed because they both belonged to the same + * compound page. We will split the page now, but no + * merging will take place. + * We do not want to add the cost of a full lock; if + * the page is locked, it is better to skip it and + * perhaps try again later. + */ + if (!trylock_page(page)) + return; + split_huge_page(page); + unlock_page(page); } } } diff --git a/mm/memblock.c b/mm/memblock.c index 0e0e69332dfd38a18e9c96e7b9e5041c3588075c..24c6a16dfd34dd5741656489666a16b114567202 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -19,9 +19,6 @@ #include #include #include -#include -#include -#include #include #include @@ -34,7 +31,6 @@ static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIO static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; #endif -static seqcount_t memblock_seq; struct memblock memblock __initdata_memblock = { .memory.regions = memblock_memory_init_regions, .memory.cnt = 1, /* empty dummy entry */ @@ -1556,8 +1552,8 @@ void __init memblock_mem_limit_remove_map(phys_addr_t limit) memblock_cap_memory_range(0, max_addr); } -static int __init_memblock __memblock_search(struct memblock_type *type, - phys_addr_t addr) +static int __init_memblock memblock_search(struct memblock_type *type, + phys_addr_t addr) { unsigned int left = 0, right = type->cnt; @@ -1575,20 +1571,6 @@ static int __init_memblock __memblock_search(struct memblock_type *type, return -1; } -static int __init_memblock memblock_search(struct memblock_type *type, - phys_addr_t addr) -{ - int ret; - unsigned long seq; - - do { - seq = raw_read_seqcount_begin(&memblock_seq); - ret = __memblock_search(type, addr); - } while (unlikely(read_seqcount_retry(&memblock_seq, seq))); - - return ret; -} - bool __init memblock_is_reserved(phys_addr_t addr) { return memblock_search(&memblock.reserved, addr) != -1; @@ -1773,37 +1755,6 @@ void __init memblock_allow_resize(void) memblock_can_resize = 1; } -static unsigned long __init_memblock -memblock_resize_late(int begin, unsigned long flags) -{ - static int memblock_can_resize_old; - - if (begin) { - preempt_disable(); - local_irq_save(flags); - memblock_can_resize_old = memblock_can_resize; - memblock_can_resize = 0; - raw_write_seqcount_begin(&memblock_seq); - } else { - raw_write_seqcount_end(&memblock_seq); - memblock_can_resize = memblock_can_resize_old; - local_irq_restore(flags); - preempt_enable(); - } - - return flags; -} - -unsigned long __init_memblock memblock_region_resize_late_begin(void) -{ - return memblock_resize_late(1, 0); -} - -void __init_memblock memblock_region_resize_late_end(unsigned long flags) -{ - memblock_resize_late(0, flags); -} - static int __init early_memblock(char *p) { if (p && strstr(p, "debug")) diff --git a/mm/memory.c b/mm/memory.c index 57f678e9341426fa2a101ce9fe49691c9810df7b..6fb74d4bf7bb7bc0aad00659efca69b9fa0b5ddb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2844,21 +2844,14 @@ int do_swap_page(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct page *page = NULL, *swapcache; struct mem_cgroup *memcg; - struct vma_swap_readahead swap_ra; swp_entry_t entry; pte_t pte; int locked; int exclusive = 0; int ret = 0; - bool vma_readahead = swap_use_vma_readahead(); - if (vma_readahead) - page = swap_readahead_detect(vmf, &swap_ra); - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { - if (page) - put_page(page); + if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) goto out; - } entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { @@ -2881,17 +2874,33 @@ int do_swap_page(struct vm_fault *vmf) } goto out; } + + delayacct_set_flag(DELAYACCT_PF_SWAPIN); - if (!page) - page = lookup_swap_cache(entry, vma_readahead ? vma : NULL, - vmf->address); + page = lookup_swap_cache(entry, vma, vmf->address); + swapcache = page; + if (!page) { - if (vma_readahead) - page = do_swap_page_readahead(entry, - GFP_HIGHUSER_MOVABLE, vmf, &swap_ra); - else - page = swapin_readahead(entry, - GFP_HIGHUSER_MOVABLE, vma, vmf->address); + struct swap_info_struct *si = swp_swap_info(entry); + + if (si->flags & SWP_SYNCHRONOUS_IO && + __swap_count(si, entry) == 1) { + /* skip swapcache */ + page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, + vmf->address); + if (page) { + __SetPageLocked(page); + __SetPageSwapBacked(page); + set_page_private(page, entry.val); + lru_cache_add_anon(page); + swap_readpage(page, true); + } + } else { + page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, + vmf); + swapcache = page; + } + if (!page) { /* * Back out if somebody else faulted in this pte @@ -2916,11 +2925,9 @@ int do_swap_page(struct vm_fault *vmf) */ ret = VM_FAULT_HWPOISON; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - swapcache = page; goto out_release; } - swapcache = page; locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); @@ -2935,7 +2942,8 @@ int do_swap_page(struct vm_fault *vmf) * test below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not changed. */ - if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) + if (unlikely((!PageSwapCache(page) || + page_private(page) != entry.val)) && swapcache) goto out_page; page = ksm_might_need_to_copy(page, vma, vmf->address); @@ -2988,14 +2996,16 @@ int do_swap_page(struct vm_fault *vmf) pte = pte_mksoft_dirty(pte); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); vmf->orig_pte = pte; - if (page == swapcache) { - do_page_add_anon_rmap(page, vma, vmf->address, exclusive); - mem_cgroup_commit_charge(page, memcg, true, false); - activate_page(page); - } else { /* ksm created a completely new copy */ + + /* ksm created a completely new copy */ + if (unlikely(page != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); + } else { + do_page_add_anon_rmap(page, vma, vmf->address, exclusive); + mem_cgroup_commit_charge(page, memcg, true, false); + activate_page(page); } swap_free(entry); @@ -3003,7 +3013,7 @@ int do_swap_page(struct vm_fault *vmf) (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); - if (page != swapcache) { + if (page != swapcache && swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check @@ -3036,7 +3046,7 @@ int do_swap_page(struct vm_fault *vmf) unlock_page(page); out_release: put_page(page); - if (page != swapcache) { + if (page != swapcache && swapcache) { unlock_page(swapcache); put_page(swapcache); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 380c60dd97f552ea0c95ecd7da9d0f2bbc1829f4..d5364921ccaf134b0d2080aec4f1ebbfb4f5ad9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -35,6 +35,7 @@ #include #include #include +#include #include @@ -64,7 +65,11 @@ void put_online_mems(void) percpu_up_read(&mem_hotplug_lock); } +#ifndef CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE bool movable_node_enabled = false; +#else +bool movable_node_enabled = true; +#endif #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE bool memhp_auto_online; @@ -1089,6 +1094,37 @@ int try_online_node(int nid) return ret; } +static int online_memory_one_block(struct memory_block *mem, void *arg) +{ + bool *onlined_block = (bool *)arg; + int ret; + + if (*onlined_block || !is_memblock_offlined(mem)) + return 0; + + ret = device_online(&mem->dev); + if (!ret) + *onlined_block = true; + + return 0; +} + +bool try_online_one_block(int nid) +{ + struct zone *zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; + bool onlined_block = false; + int ret = lock_device_hotplug_sysfs(); + + if (ret) + return false; + + walk_memory_range(zone->zone_start_pfn, zone_end_pfn(zone), + &onlined_block, online_memory_one_block); + + unlock_device_hotplug(); + return onlined_block; +} + static int check_hotplug_memory_range(u64 start, u64 size) { u64 start_pfn = PFN_DOWN(start); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1d916bcf3c4523b9e99956025b3ba08f43163b88..aa169bfa4dd1ce117deedb6e2ae529113cf86537 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2129,6 +2129,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) case MPOL_INTERLEAVE: return !!nodes_equal(a->v.nodes, b->v.nodes); case MPOL_PREFERRED: + /* a's ->flags is the same as b's */ + if (a->flags & MPOL_F_LOCAL) + return true; return a->v.preferred_node == b->v.preferred_node; default: BUG(); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d4684c45000f4f896cc1e8a0d5c2f264aa95e66f..d69ed837fb89b180d25e3f1568155a37d70cda83 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include "internal.h" @@ -51,6 +52,7 @@ int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; +int sysctl_reap_mem_on_sigkill; DEFINE_MUTEX(oom_lock); @@ -628,13 +630,21 @@ static void wake_oom_reaper(struct task_struct *tsk) if (!oom_reaper_th) return; + /* + * Move the lock here to avoid scenario of queuing + * the same task by both OOM killer and any other SIGKILL + * path. + */ + spin_lock(&oom_reaper_lock); + /* tsk is already queued? */ - if (tsk == oom_reaper_list || tsk->oom_reaper_list) + if (tsk == oom_reaper_list || tsk->oom_reaper_list) { + spin_unlock(&oom_reaper_lock); return; + } get_task_struct(tsk); - spin_lock(&oom_reaper_lock); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; spin_unlock(&oom_reaper_lock); @@ -659,6 +669,16 @@ static inline void wake_oom_reaper(struct task_struct *tsk) } #endif /* CONFIG_MMU */ +static void __mark_oom_victim(struct task_struct *tsk) +{ + struct mm_struct *mm = tsk->mm; + + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { + mmgrab(tsk->signal->oom_mm); + set_bit(MMF_OOM_VICTIM, &mm->flags); + } +} + /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark @@ -671,18 +691,13 @@ static inline void wake_oom_reaper(struct task_struct *tsk) */ static void mark_oom_victim(struct task_struct *tsk) { - struct mm_struct *mm = tsk->mm; - WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ - if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { - mmgrab(tsk->signal->oom_mm); - set_bit(MMF_OOM_VICTIM, &mm->flags); - } + __mark_oom_victim(tsk); /* * Make sure that the task is woken up from uninterruptible sleep @@ -1020,6 +1035,12 @@ bool out_of_memory(struct oom_control *oc) if (oom_killer_disabled) return false; + if (try_online_one_block(numa_node_id())) { + /* Got some memory back */ + WARN(1, "OOM killer had to online a memory block\n"); + return true; + } + if (!is_memcg_oom(oc)) { blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) @@ -1106,3 +1127,21 @@ void pagefault_out_of_memory(void) out_of_memory(&oc); mutex_unlock(&oom_lock); } + +void add_to_oom_reaper(struct task_struct *p) +{ + if (!sysctl_reap_mem_on_sigkill) + return; + + p = find_lock_task_mm(p); + if (!p) + return; + + get_task_struct(p); + if (task_will_free_mem(p)) { + __mark_oom_victim(p); + wake_oom_reaper(p); + } + task_unlock(p); + put_task_struct(p); +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1b755dc30879d6cf7b6dc4acc04ec1f240e7b728..5f67c3902a32d98887d03d2ea7e3dbe5180af34a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -258,10 +258,22 @@ compound_page_dtor * const compound_page_dtors[] = { #endif }; +/* + * Try to keep at least this much lowmem free. Do not allow normal + * allocations below this point, only high priority ones. Automatically + * tuned according to the amount of memory in the system. + */ int min_free_kbytes = 1024; int user_min_free_kbytes = -1; int watermark_scale_factor = 10; +/* + * Extra memory for the system to try freeing. Used to temporarily + * free memory, to make space for new workloads. Anyone can allocate + * down to the min watermarks controlled by min_free_kbytes above. + */ +int extra_free_kbytes = 0; + static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; static unsigned long __meminitdata dma_reserve; @@ -6899,6 +6911,7 @@ static void setup_per_zone_lowmem_reserve(void) static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); + unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; @@ -6910,11 +6923,14 @@ static void __setup_per_zone_wmarks(void) } for_each_zone(zone) { - u64 tmp; + u64 min, low; spin_lock_irqsave(&zone->lock, flags); - tmp = (u64)pages_min * zone->managed_pages; - do_div(tmp, lowmem_pages); + min = (u64)pages_min * zone->managed_pages; + do_div(min, lowmem_pages); + low = (u64)pages_low * zone->managed_pages; + do_div(low, vm_total_pages); + if (is_highmem(zone)) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't @@ -6935,7 +6951,7 @@ static void __setup_per_zone_wmarks(void) * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->watermark[WMARK_MIN] = tmp; + zone->watermark[WMARK_MIN] = min; } /* @@ -6943,12 +6959,14 @@ static void __setup_per_zone_wmarks(void) * scale factor in proportion to available memory, but * ensure a minimum size on small systems. */ - tmp = max_t(u64, tmp >> 2, + min = max_t(u64, min >> 2, mult_frac(zone->managed_pages, watermark_scale_factor, 10000)); - zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; - zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; + zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + + low + min; + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + + low + min * 2; spin_unlock_irqrestore(&zone->lock, flags); } @@ -7031,7 +7049,7 @@ core_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes - * changes. + * or extra_free_kbytes changes. */ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) diff --git a/mm/page_idle.c b/mm/page_idle.c index 0a49374e693194504e715be8e54489f3eb1db2f1..e412a63b2b74f7820298751e1cf382fe90c48005 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -65,11 +65,15 @@ static bool page_idle_clear_pte_refs_one(struct page *page, while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - referenced = ptep_clear_young_notify(vma, addr, - pvmw.pte); + /* + * For PTE-mapped THP, one sub page is referenced, + * the whole THP is referenced. + */ + if (ptep_clear_young_notify(vma, addr, pvmw.pte)) + referenced = true; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - referenced = pmdp_clear_young_notify(vma, addr, - pvmw.pmd); + if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) + referenced = true; } else { /* unexpected pmd-mapped page? */ WARN_ON_ONCE(1); diff --git a/mm/page_io.c b/mm/page_io.c index 624f1e2f2d34e1eb23c04836a83413105bc5c183..94c45657a8aab2e35b8e58bed879d522ef7cc7d2 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -348,7 +348,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, return ret; } -int swap_readpage(struct page *page, bool do_poll) +int swap_readpage(struct page *page, bool synchronous) { struct bio *bio; int ret = 0; @@ -356,7 +356,7 @@ int swap_readpage(struct page *page, bool do_poll) blk_qc_t qc; struct gendisk *disk; - VM_BUG_ON_PAGE(!PageSwapCache(page), page); + VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageUptodate(page), page); if (frontswap_load(page) == 0) { @@ -404,7 +404,7 @@ int swap_readpage(struct page *page, bool do_poll) count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); - while (do_poll) { + while (synchronous) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio->bi_private)) break; diff --git a/mm/page_owner.c b/mm/page_owner.c index d5aa378d4f6e3e6fa50b0bbe36d4c7fcf0128dd2..c553b2cf5a6c882f255d731bad786c82fb4b1461 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -127,13 +127,13 @@ void __reset_page_owner(struct page *page, unsigned int order) static inline bool check_recursive_alloc(struct stack_trace *trace, unsigned long ip) { - int i, count; + int i; if (!trace->nr_entries) return false; - for (i = 0, count = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip && ++count == 2) + for (i = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip) return true; } diff --git a/mm/shmem.c b/mm/shmem.c index 6a5973d727f895d86a359ee6b8b769fcd68aafaf..6c050bde08cf10eca80b98650b0da142a03d4636 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1422,9 +1422,12 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, { struct vm_area_struct pvma; struct page *page; + struct vm_fault vmf; shmem_pseudo_vma_init(&pvma, info, index); - page = swapin_readahead(swap, gfp, &pvma, 0); + vmf.vma = &pvma; + vmf.address = 0; + page = swap_cluster_readahead(swap, gfp, &vmf); shmem_pseudo_vma_destroy(&pvma); return page; diff --git a/mm/slab.c b/mm/slab.c index 2a81cc099b0817173cee40f14a7c16ad7fbe6b94..29bc08f22ddff10dcb076494c6b74efda5592dab 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1283,6 +1283,7 @@ void __init kmem_cache_init(void) nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN); list_add(&kmem_cache->list, &slab_caches); + memcg_link_cache(kmem_cache); slab_state = PARTIAL; /* diff --git a/mm/swap_ratio.c b/mm/swap_ratio.c new file mode 100644 index 0000000000000000000000000000000000000000..bfff30864b6cfb6cc0f8a5af7210f0653180ead7 --- /dev/null +++ b/mm/swap_ratio.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#define SWAP_RATIO_GROUP_START (SWAP_FLAG_PRIO_MASK - 9) /* 32758 */ +#define SWAP_RATIO_GROUP_END (SWAP_FLAG_PRIO_MASK) /* 32767 */ +#define SWAP_FAST_WRITES \ + ((SWAPFILE_CLUSTER * (SWAP_CLUSTER_MAX / 8)) / SWAP_BATCH) +#define SWAP_SLOW_WRITES (SWAPFILE_CLUSTER / SWAP_BATCH) + +/* + * The fast/slow swap write ratio. + * 100 indicates that all writes should + * go to fast swap device. + */ +int sysctl_swap_ratio = 100; + +/* Enable the swap ratio feature */ +int sysctl_swap_ratio_enable; + +static bool is_same_group(struct swap_info_struct *a, + struct swap_info_struct *b) +{ + if (!sysctl_swap_ratio_enable) + return false; + + if (!is_swap_ratio_group(a->prio)) + return false; + + if (a->prio == b->prio) + return true; + + return false; +} + +/* Caller must hold swap_avail_lock */ +static int calculate_write_pending(struct swap_info_struct *si, + struct swap_info_struct *n) +{ + int ratio = sysctl_swap_ratio; + + if ((ratio < 0) || (ratio > 100)) + return -EINVAL; + + if (WARN_ON(!(si->flags & SWP_SYNCHRONOUS_IO))) + return -ENODEV; + + if ((n->flags & SWP_SYNCHRONOUS_IO) || !is_same_group(si, n)) + return -ENODEV; + + si->max_writes = ratio ? SWAP_FAST_WRITES : 0; + n->max_writes = ratio ? (SWAP_FAST_WRITES * 100) / + ratio - SWAP_FAST_WRITES : SWAP_SLOW_WRITES; + + si->write_pending = si->max_writes; + n->write_pending = n->max_writes; + + return 0; +} + +static int swap_ratio_slow(struct swap_info_struct **si, int node) +{ + struct swap_info_struct *n = NULL; + int ret = 0; + + spin_lock(&(*si)->lock); + spin_lock(&swap_avail_lock); + if (&(*si)->avail_lists[node] == plist_last(&swap_avail_heads[node])) { + /* just to make skip work */ + n = *si; + ret = -ENODEV; + goto skip; + } + n = plist_next_entry(&(*si)->avail_lists[node], + struct swap_info_struct, + avail_lists[node]); + if (n == *si) { + /* No other swap device */ + ret = -ENODEV; + goto skip; + } + + spin_unlock(&swap_avail_lock); + spin_lock(&n->lock); + spin_lock(&swap_avail_lock); + + if ((*si)->flags & SWP_SYNCHRONOUS_IO) { + if ((*si)->write_pending) { + (*si)->write_pending--; + goto exit; + } else { + if ((n->flags & SWP_SYNCHRONOUS_IO) || + !is_same_group(*si, n)) { + /* Should never happen */ + ret = -ENODEV; + } else if (n->write_pending) { + /* + * Requeue fast device, since there are pending + * writes for slow device. + */ + plist_requeue(&(*si)->avail_lists[node], + &swap_avail_heads[node]); + n->write_pending--; + spin_unlock(&(*si)->lock); + *si = n; + goto skip; + } else { + if (calculate_write_pending(*si, n) < 0) { + ret = -ENODEV; + goto exit; + } + /* Restart from fast device */ + (*si)->write_pending--; + } + } + } else { + if (!(n->flags & SWP_SYNCHRONOUS_IO) || + !is_same_group(*si, n)) { + /* Should never happen */ + ret = -ENODEV; + } else if (n->write_pending) { + /* + * Pending writes for fast device. + * We reach here when slow device is swapped on first, + * before fast device. + */ + /* requeue slow device to the end */ + plist_requeue(&(*si)->avail_lists[node], + &swap_avail_heads[node]); + n->write_pending--; + spin_unlock(&(*si)->lock); + *si = n; + goto skip; + } else { + if ((*si)->write_pending) { + (*si)->write_pending--; + } else { + if (calculate_write_pending(n, *si) < 0) { + ret = -ENODEV; + goto exit; + } + n->write_pending--; + plist_requeue(&(*si)->avail_lists[node], + &swap_avail_heads[node]); + spin_unlock(&(*si)->lock); + *si = n; + goto skip; + } + } + } +exit: + spin_unlock(&(*si)->lock); +skip: + spin_unlock(&swap_avail_lock); + /* n and si would have got interchanged */ + spin_unlock(&n->lock); + return ret; +} + +bool is_swap_ratio_group(int prio) +{ + return ((prio >= SWAP_RATIO_GROUP_START) && + (prio <= SWAP_RATIO_GROUP_END)) ? true : false; +} + +void setup_swap_ratio(struct swap_info_struct *p, int prio) +{ + /* Used only if sysctl_swap_ratio_enable is set */ + if (is_swap_ratio_group(prio)) { + if (p->flags & SWP_SYNCHRONOUS_IO) + p->write_pending = SWAP_FAST_WRITES; + else + p->write_pending = SWAP_SLOW_WRITES; + p->max_writes = p->write_pending; + } +} + +int swap_ratio(struct swap_info_struct **si, int node) +{ + if (!sysctl_swap_ratio_enable) + return -ENODEV; + + if (is_swap_ratio_group((*si)->prio)) + return swap_ratio_slow(si, node); + else + return -ENODEV; +} diff --git a/mm/swap_state.c b/mm/swap_state.c index 326439428daffd6c1da5e5cd5b50652ead47568d..2348a7ae18e8a9a9bd20f1c16efd17f0787f7e57 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -36,9 +36,9 @@ static const struct address_space_operations swap_aops = { #endif }; -struct address_space *swapper_spaces[MAX_SWAPFILES]; -static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; -bool swap_vma_readahead = true; +struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; +static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; +bool enable_vma_readahead __read_mostly = true; #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) @@ -322,6 +322,11 @@ void free_pages_and_swap_cache(struct page **pages, int nr) release_pages(pagep, nr, false); } +static inline bool swap_use_vma_readahead(void) +{ + return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); +} + /* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel @@ -332,32 +337,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) { struct page *page; - unsigned long ra_info; - int win, hits, readahead; page = find_get_page(swap_address_space(entry), swp_offset(entry)); INC_CACHE_INFO(find_total); if (page) { + bool vma_ra = swap_use_vma_readahead(); + bool readahead = TestClearPageReadahead(page); + INC_CACHE_INFO(find_success); if (unlikely(PageTransCompound(page))) return page; - readahead = TestClearPageReadahead(page); - if (vma) { - ra_info = GET_SWAP_RA_VAL(vma); - win = SWAP_RA_WIN(ra_info); - hits = SWAP_RA_HITS(ra_info); + + if (vma && vma_ra) { + unsigned long ra_val; + int win, hits; + + ra_val = GET_SWAP_RA_VAL(vma); + win = SWAP_RA_WIN(ra_val); + hits = SWAP_RA_HITS(ra_val); if (readahead) hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(addr, win, hits)); } + if (readahead) { count_vm_event(SWAP_RA_HIT); - if (!vma) + if (!vma || !vma_ra) atomic_inc(&swapin_readahead_hits); } } + return page; } @@ -533,11 +544,10 @@ static unsigned long swapin_nr_pages(unsigned long offset) } /** - * swapin_readahead - swap in pages in hope we need them soon + * swap_cluster_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags - * @vma: user vma this address belongs to - * @addr: target address for mempolicy + * @vmf: fault information * * Returns the struct page for entry and addr, after queueing swapin. * @@ -549,10 +559,10 @@ static unsigned long swapin_nr_pages(unsigned long offset) * This has been extended to use the NUMA policies from the mm triggering * the readahead. * - * Caller must hold down_read on the vma->vm_mm if vma is not NULL. + * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. */ -struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, + struct vm_fault *vmf) { struct page *page; unsigned long entry_offset = swp_offset(entry); @@ -561,6 +571,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, unsigned long mask; struct blk_plug plug; bool do_poll = true, page_allocated; + struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address; mask = swapin_nr_pages(offset) - 1; if (!mask) @@ -646,16 +658,15 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); } -struct page *swap_readahead_detect(struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra) +static void swap_ra_info(struct vm_fault *vmf, + struct vma_swap_readahead *ra_info) { struct vm_area_struct *vma = vmf->vma; - unsigned long swap_ra_info; - struct page *page; + unsigned long ra_val; swp_entry_t entry; unsigned long faddr, pfn, fpfn; unsigned long start, end; - pte_t *pte; + pte_t *pte, *orig_pte; unsigned int max_win, hits, prev_win, win, left; #ifndef CONFIG_64BIT pte_t *tpte; @@ -664,30 +675,32 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); if (max_win == 1) { - swap_ra->win = 1; - return NULL; + ra_info->win = 1; + return; } faddr = vmf->address; - entry = pte_to_swp_entry(vmf->orig_pte); - if ((unlikely(non_swap_entry(entry)))) - return NULL; - page = lookup_swap_cache(entry, vma, faddr); - if (page) - return page; + orig_pte = pte = pte_offset_map(vmf->pmd, faddr); + entry = pte_to_swp_entry(*pte); + if ((unlikely(non_swap_entry(entry)))) { + pte_unmap(orig_pte); + return; + } fpfn = PFN_DOWN(faddr); - swap_ra_info = GET_SWAP_RA_VAL(vma); - pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); - prev_win = SWAP_RA_WIN(swap_ra_info); - hits = SWAP_RA_HITS(swap_ra_info); - swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits, + ra_val = GET_SWAP_RA_VAL(vma); + pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); + prev_win = SWAP_RA_WIN(ra_val); + hits = SWAP_RA_HITS(ra_val); + ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); - if (win == 1) - return NULL; + if (win == 1) { + pte_unmap(orig_pte); + return; + } /* Copy the PTEs because the page table may be unmapped */ if (fpfn == pfn + 1) @@ -700,23 +713,21 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, &start, &end); } - swap_ra->nr_pte = end - start; - swap_ra->offset = fpfn - start; - pte = vmf->pte - swap_ra->offset; + ra_info->nr_pte = end - start; + ra_info->offset = fpfn - start; + pte -= ra_info->offset; #ifdef CONFIG_64BIT - swap_ra->ptes = pte; + ra_info->ptes = pte; #else - tpte = swap_ra->ptes; + tpte = ra_info->ptes; for (pfn = start; pfn != end; pfn++) *tpte++ = *pte++; #endif - - return NULL; + pte_unmap(orig_pte); } -struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra) +struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, + struct vm_fault *vmf) { struct blk_plug plug; struct vm_area_struct *vma = vmf->vma; @@ -725,12 +736,14 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, swp_entry_t entry; unsigned int i; bool page_allocated; + struct vma_swap_readahead ra_info = {0,}; - if (swap_ra->win == 1) + swap_ra_info(vmf, &ra_info); + if (ra_info.win == 1) goto skip; blk_start_plug(&plug); - for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte; + for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; i++, pte++) { pentry = *pte; if (pte_none(pentry)) @@ -746,7 +759,7 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, continue; if (page_allocated) { swap_readpage(page, false); - if (i != swap_ra->offset && + if (i != ra_info.offset && likely(!PageTransCompound(page))) { SetPageReadahead(page); count_vm_event(SWAP_RA); @@ -758,23 +771,43 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, lru_add_drain(); skip: return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, - swap_ra->win == 1); + ra_info.win == 1); +} + +/** + * swapin_readahead - swap in pages in hope we need them soon + * @entry: swap entry of this memory + * @gfp_mask: memory allocation flags + * @vmf: fault information + * + * Returns the struct page for entry and addr, after queueing swapin. + * + * It's a main entry function for swap readahead. By the configuration, + * it will read ahead blocks by cluster-based(ie, physical disk based) + * or vma-based(ie, virtual address based on faulty address) readahead. + */ +struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, + struct vm_fault *vmf) +{ + return swap_use_vma_readahead() ? + swap_vma_readahead(entry, gfp_mask, vmf) : + swap_cluster_readahead(entry, gfp_mask, vmf); } #ifdef CONFIG_SYSFS static ssize_t vma_ra_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false"); + return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); } static ssize_t vma_ra_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) - swap_vma_readahead = true; + enable_vma_readahead = true; else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) - swap_vma_readahead = false; + enable_vma_readahead = false; else return -EINVAL; diff --git a/mm/swapfile.c b/mm/swapfile.c index cb08fa65819fc6502cfc6432a34239f88b23d7b9..2b2de9e273fab84ca484f3de8daf591bbd0b0f46 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -86,7 +86,7 @@ PLIST_HEAD(swap_active_head); * before any swap_info_struct->lock. */ struct plist_head *swap_avail_heads; -static DEFINE_SPINLOCK(swap_avail_lock); +DEFINE_SPINLOCK(swap_avail_lock); struct swap_info_struct *swap_info[MAX_SWAPFILES]; @@ -931,6 +931,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[]) long avail_pgs; int n_ret = 0; int node; + int swap_ratio_off = 0; /* Only single cluster request supported */ WARN_ON_ONCE(n_goal > 1 && cluster); @@ -947,14 +948,34 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[]) atomic_long_sub(n_goal * nr_pages, &nr_swap_pages); +lock_and_start: spin_lock(&swap_avail_lock); start_over: node = numa_node_id(); plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { + + if (sysctl_swap_ratio && !swap_ratio_off) { + int ret; + + spin_unlock(&swap_avail_lock); + ret = swap_ratio(&si, node); + if (ret < 0) { + /* + * Error. Start again with swap + * ratio disabled. + */ + swap_ratio_off = 1; + goto lock_and_start; + } else { + goto start; + } + } + /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); +start: spin_lock(&si->lock); if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { spin_lock(&swap_avail_lock); @@ -1328,6 +1349,13 @@ int page_swapcount(struct page *page) return count; } +int __swap_count(struct swap_info_struct *si, swp_entry_t entry) +{ + pgoff_t offset = swp_offset(entry); + + return swap_count(si->swap_map[offset]); +} + static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { int count = 0; @@ -2954,6 +2982,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p, maxpages = swp_offset(pte_to_swp_entry( swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; last_page = swap_header->info.last_page; + if (!last_page) { + pr_warn("Empty swap-file\n"); + return 0; + } if (last_page > maxpages) { pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", maxpages << (PAGE_SHIFT - 10), @@ -3258,9 +3290,11 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) mutex_lock(&swapon_mutex); prio = -1; - if (swap_flags & SWAP_FLAG_PREFER) + if (swap_flags & SWAP_FLAG_PREFER) { prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; + setup_swap_ratio(p, prio); + } enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", @@ -3455,10 +3489,15 @@ int swapcache_prepare(swp_entry_t entry) return __swap_duplicate(entry, SWAP_HAS_CACHE); } +struct swap_info_struct *swp_swap_info(swp_entry_t entry) +{ + return swap_info[swp_type(entry)]; +} + struct swap_info_struct *page_swap_info(struct page *page) { - swp_entry_t swap = { .val = page_private(page) }; - return swap_info[swp_type(swap)]; + swp_entry_t entry = { .val = page_private(page) }; + return swp_swap_info(entry); } /* @@ -3466,7 +3505,6 @@ struct swap_info_struct *page_swap_info(struct page *page) */ struct address_space *__page_file_mapping(struct page *page) { - VM_BUG_ON_PAGE(!PageSwapCache(page), page); return page_swap_info(page)->swap_file->f_mapping; } EXPORT_SYMBOL_GPL(__page_file_mapping); @@ -3474,7 +3512,6 @@ EXPORT_SYMBOL_GPL(__page_file_mapping); pgoff_t __page_file_index(struct page *page) { swp_entry_t swap = { .val = page_private(page) }; - VM_BUG_ON_PAGE(!PageSwapCache(page), page); return swp_offset(swap); } EXPORT_SYMBOL_GPL(__page_file_index); diff --git a/mm/vmscan.c b/mm/vmscan.c index 1e9e1dcdeabbb8831d748f8be041c8ef6e74de57..c7c8e56da46a5ddc6a378a17d1d6fcfb5c6742d6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1451,7 +1451,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) return ret; mapping = page_mapping(page); - migrate_dirty = mapping && mapping->a_ops->migratepage; + migrate_dirty = !mapping || mapping->a_ops->migratepage; unlock_page(page); if (!migrate_dirty) return ret; @@ -3995,7 +3995,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) */ int page_evictable(struct page *page) { - return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); + int ret; + + /* Prevent address_space of inode and swap cache from being freed */ + rcu_read_lock(); + ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); + rcu_read_unlock(); + return ret; } #ifdef CONFIG_SHMEM diff --git a/mm/vmstat.c b/mm/vmstat.c index e35d6d77ad68e95ed2142070573647f009ff5b67..3459b767c307cfdc5ab5d355e39ccd03d7dbc717 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1772,9 +1772,11 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ + preempt_disable(); queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); + preempt_enable(); } } diff --git a/mm/z3fold.c b/mm/z3fold.c index ddfb20cfd9afd12533267afd9491ef2f708275ae..f33403d718ac92d11f4f04b46cb6d0b8fbbd3070 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -469,6 +469,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, spin_lock_init(&pool->lock); spin_lock_init(&pool->stale_lock); pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); + if (!pool->unbuddied) + goto out_pool; for_each_possible_cpu(cpu) { struct list_head *unbuddied = per_cpu_ptr(pool->unbuddied, cpu); @@ -481,7 +483,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, pool->name = name; pool->compact_wq = create_singlethread_workqueue(pool->name); if (!pool->compact_wq) - goto out; + goto out_unbuddied; pool->release_wq = create_singlethread_workqueue(pool->name); if (!pool->release_wq) goto out_wq; @@ -491,8 +493,11 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, out_wq: destroy_workqueue(pool->compact_wq); -out: +out_unbuddied: + free_percpu(pool->unbuddied); +out_pool: kfree(pool); +out: return NULL; } diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 64aa9f755e1d251e19f1b713acfc163318a9b57d..45c9bf5ff3a0c1f33d5e9443f9237b1277df6502 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp) * original position later */ skb_push(skb, offset); - skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, - skb->vlan_tci); + skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto, + skb->vlan_tci, skb->mac_len); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 83ba5483455a334dc559626c8e1682fe9f357fa7..71d8809fbe940c9343ce1007c650f5ad85306c1f 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -2719,7 +2719,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw; - int ret = -EINVAL; + int ret = 0; void *hdr; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 4e2724c5b33d8bda27339ffe65fc7d76f91fd119..a8f4c3902cf500e970d4ee29b6ca82eaece0648a 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -930,7 +930,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, struct batadv_neigh_ifinfo *router_ifinfo = NULL; struct batadv_neigh_node *router; struct batadv_gw_node *curr_gw; - int ret = -EINVAL; + int ret = 0; void *hdr; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index cdd8e8e4df0b382b21ff674b9aafcd19e9a581e7..422ee16b7854de39259bd171584f8dd0301a2ab6 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -2161,22 +2161,25 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_bla_claim *claim; int idx = 0; + int ret = 0; rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { if (idx++ < *idx_skip) continue; - if (batadv_bla_claim_dump_entry(msg, portid, seq, - primary_if, claim)) { + + ret = batadv_bla_claim_dump_entry(msg, portid, seq, + primary_if, claim); + if (ret) { *idx_skip = idx - 1; goto unlock; } } - *idx_skip = idx; + *idx_skip = 0; unlock: rcu_read_unlock(); - return 0; + return ret; } /** @@ -2391,22 +2394,25 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, { struct batadv_bla_backbone_gw *backbone_gw; int idx = 0; + int ret = 0; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (idx++ < *idx_skip) continue; - if (batadv_bla_backbone_dump_entry(msg, portid, seq, - primary_if, backbone_gw)) { + + ret = batadv_bla_backbone_dump_entry(msg, portid, seq, + primary_if, backbone_gw); + if (ret) { *idx_skip = idx - 1; goto unlock; } } - *idx_skip = idx; + *idx_skip = 0; unlock: rcu_read_unlock(); - return 0; + return ret; } /** diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b6cfa78e9381f5dca63e7a4a8d05f136d823c4db..4f0111bc6621e66c411bc929b3d18b3f543234cd 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -391,7 +391,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, batadv_arp_hw_src(skb, hdr_size), &ip_src, batadv_arp_hw_dst(skb, hdr_size), &ip_dst); - if (hdr_size == 0) + if (hdr_size < sizeof(struct batadv_unicast_packet)) return; unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index a98cf1104a30a30e66fb6018bef59dea83dc4b7a..b6abd19ab23ec8ca94a1615cc60bd0d55fbe72f5 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -287,7 +287,8 @@ batadv_frag_merge_packets(struct hlist_head *chain) /* Move the existing MAC header to just before the payload. (Override * the fragment header.) */ - skb_pull_rcsum(skb_out, hdr_size); + skb_pull(skb_out, hdr_size); + skb_out->ip_summed = CHECKSUM_NONE; memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN); skb_set_mac_header(skb_out, -ETH_HLEN); skb_reset_network_header(skb_out); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index de9955d5224d258cdbd20c14a1758bf7b4af19f9..06276ae9f7529cbc19f870148bb205961143d4a6 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -705,7 +705,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, { struct batadv_neigh_node *neigh_curr = NULL; struct batadv_neigh_node *neigh_old = NULL; - struct batadv_orig_node *orig_dst_node; + struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL; struct batadv_gw_node *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; @@ -716,6 +716,9 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, 0); + if (is_multicast_ether_addr(ethhdr->h_dest)) + goto out; + orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index d327670641ac336a14f0ecc85dd848d4952e8e6e..fa02fb73367c9a7c505c08299b8079e01d42c680 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -540,8 +540,8 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv) bat_priv->mcast.enabled = true; } - return !(mcast_data.flags & - (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); + return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 && + mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6); } /** @@ -809,8 +809,8 @@ static struct batadv_orig_node * batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv, struct ethhdr *ethhdr) { - return batadv_transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest, BATADV_NO_FLAGS); + return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest, + BATADV_NO_FLAGS); } /** diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index f10e3ff26f9dc8f565ecab012aaeb2b69d80d6ca..cd82cff716c78eb9df9f85be139e21693bab7464 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -743,6 +743,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, /** * batadv_reroute_unicast_packet - update the unicast header for re-routing * @bat_priv: the bat priv with all the soft interface information + * @skb: unicast packet to process * @unicast_packet: the unicast header to be updated * @dst_addr: the payload destination * @vid: VLAN identifier @@ -754,7 +755,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, * Return: true if the packet header has been updated, false otherwise */ static bool -batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, +batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_unicast_packet *unicast_packet, u8 *dst_addr, unsigned short vid) { @@ -783,8 +784,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, } /* update the packet header */ + skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); ether_addr_copy(unicast_packet->dest, orig_addr); unicast_packet->ttvn = orig_ttvn; + skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); ret = true; out: @@ -825,7 +828,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * the packet to */ if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { - if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, + if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, ethhdr->h_dest, vid)) batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, @@ -871,7 +874,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, * destination can possibly be updated and forwarded towards the new * target host */ - if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, + if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet, ethhdr->h_dest, vid)) { batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", @@ -894,12 +897,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, if (!primary_if) return false; + /* update the packet header */ + skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); + unicast_packet->ttvn = curr_ttvn; + skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet)); batadv_hardif_put(primary_if); - unicast_packet->ttvn = curr_ttvn; - return true; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 10f7edfb176ebd49c680ff4132db87aa00d3f04e..aa2c49fa31cec5ffd234204844a6874f0149a9ae 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -451,13 +451,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* skb->dev & skb->pkt_type are set here */ skb->protocol = eth_type_trans(skb, soft_iface); - - /* should not be necessary anymore as we use skb_pull_rcsum() - * TODO: please verify this and remove this TODO - * -- Dec 21st 2009, Simon Wunderlich - */ - - /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f3aef22931ab22c3a47211b2eb634668714066a0..55a73ef388bf5b74b8a52ac365534283e5b8e9bc 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -503,8 +503,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) return -ELOOP; - /* Device is already being bridged */ - if (br_port_exists(dev)) + /* Device has master upper dev */ + if (netdev_master_upper_dev_get(dev)) return -EBUSY; /* No bridging devices that dislike that (e.g. wireless) */ diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 2800c4c4978ca3ce99fa60330b4fbf33b27d60a1..5b8cd359c4c01eccc3e0ed8c65268f8852794a0c 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1641,7 +1641,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, int off = ebt_compat_match_offset(match, m->match_size); compat_uint_t msize = m->match_size - off; - BUG_ON(off >= m->match_size); + if (WARN_ON(off >= m->match_size)) + return -EINVAL; if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) || put_user(msize, &cm->match_size)) @@ -1671,7 +1672,8 @@ static int compat_target_to_user(struct ebt_entry_target *t, int off = xt_compat_target_offset(target); compat_uint_t tsize = t->target_size - off; - BUG_ON(off >= t->target_size); + if (WARN_ON(off >= t->target_size)) + return -EINVAL; if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) @@ -1907,7 +1909,8 @@ static int ebt_buf_add(struct ebt_entries_buf_state *state, if (state->buf_kern_start == NULL) goto count_only; - BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); + if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) + return -EINVAL; memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); @@ -1920,7 +1923,8 @@ static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) { char *b = state->buf_kern_start; - BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); + if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) + return -EINVAL; if (b != NULL && sz > 0) memset(b + state->buf_kern_offset, 0, sz); @@ -1997,8 +2001,10 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, pad = XT_ALIGN(size_kern) - size_kern; if (pad > 0 && dst) { - BUG_ON(state->buf_kern_len <= pad); - BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); + if (WARN_ON(state->buf_kern_len <= pad)) + return -EINVAL; + if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) + return -EINVAL; memset(dst + size_kern, 0, pad); } return off + match_size; @@ -2048,7 +2054,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (ret < 0) return ret; - BUG_ON(ret < match32->match_size); + if (WARN_ON(ret < match32->match_size)) + return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); @@ -2117,8 +2124,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0; i < 4 ; ++i) { - if (offsets[i] >= *total) + if (offsets[i] > *total) + return -EINVAL; + + if (i < 3 && offsets[i] == *total) return -EINVAL; + if (i == 0) continue; if (offsets[i-1] > offsets[i]) @@ -2157,7 +2168,8 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, startoff = state->buf_user_offset - startoff; - BUG_ON(*total < startoff); + if (WARN_ON(*total < startoff)) + return -EINVAL; *total -= startoff; return 0; } @@ -2286,7 +2298,8 @@ static int compat_do_replace(struct net *net, void __user *user, state.buf_kern_len = size64; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); - BUG_ON(ret < 0); /* parses same data again */ + if (WARN_ON(ret < 0)) + goto out_unlock; vfree(entries_tmp); tmp.entries_size = size64; diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 5c036d2f401e25b42ece6d7cc6c4fc30c00dea43..cdb5b693a135e77c60007ddd1d5eaaa6ab8ec7a7 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -418,11 +418,15 @@ ceph_parse_options(char *options, const char *dev_name, opt->flags |= CEPH_OPT_FSID; break; case Opt_name: + kfree(opt->name); opt->name = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, GFP_KERNEL); break; case Opt_secret: + ceph_crypto_key_destroy(opt->key); + kfree(opt->key); + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); if (!opt->key) { err = -ENOMEM; @@ -433,6 +437,9 @@ ceph_parse_options(char *options, const char *dev_name, goto out; break; case Opt_key: + ceph_crypto_key_destroy(opt->key); + kfree(opt->key); + opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL); if (!opt->key) { err = -ENOMEM; diff --git a/net/compat.c b/net/compat.c index 22381719718c4fbd5d63b8836e3161026b59a459..32ed993588d64a369fac473563b502cf88a958ff 100644 --- a/net/compat.c +++ b/net/compat.c @@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname, optname == SO_ATTACH_REUSEPORT_CBPF) return do_set_attach_filter(sock, level, optname, optval, optlen); - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) + if (!COMPAT_USE_64BIT_TIME && + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) return do_set_sock_timeout(sock, level, optname, optval, optlen); return sock_setsockopt(sock, level, optname, optval, optlen); @@ -442,7 +443,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname, static int compat_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) + if (!COMPAT_USE_64BIT_TIME && + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) return do_get_sock_timeout(sock, level, optname, optval, optlen); return sock_getsockopt(sock, level, optname, optval, optlen); } diff --git a/net/core/dev.c b/net/core/dev.c index 1aa1a4064ab44651b78755520d322eafffc75ee9..65609a7aa9f6f19417c521ca957d32ce34a9b3a3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2060,7 +2060,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev, int i, j; for (i = count, j = offset; i--; j++) { - if (!remove_xps_queue(dev_maps, cpu, j)) + if (!remove_xps_queue(dev_maps, tci, j)) break; } @@ -4720,6 +4720,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) unsigned long diffs; NAPI_GRO_CB(p)->flush = 0; + NAPI_GRO_CB(p)->flush_id = 0; if (hash != skb_get_hash_raw(p)) { NAPI_GRO_CB(p)->same_flow = 0; @@ -4808,7 +4809,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->encap_mark = 0; NAPI_GRO_CB(skb)->recursion_counter = 0; NAPI_GRO_CB(skb)->is_fou = 0; - NAPI_GRO_CB(skb)->is_atomic = 1; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 9563358f928d550fd419cd4ea247794fa672ea89..8023fff32a58e1237543bed090003d673f9fc936 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -2508,11 +2508,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr) static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) { struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; + int rc; if (!dev->ethtool_ops->get_fecparam) return -EOPNOTSUPP; - dev->ethtool_ops->get_fecparam(dev, &fecparam); + rc = dev->ethtool_ops->get_fecparam(dev, &fecparam); + if (rc) + return rc; if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) return -EFAULT; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4f209777e58dc06c68604c32cf0e932559218afc..b73e7c87f926717b00c9609b5ab700044b4c2276 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4941,13 +4941,18 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) { + int mac_len; + if (skb_cow(skb, skb_headroom(skb)) < 0) { kfree_skb(skb); return NULL; } - memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, - 2 * ETH_ALEN); + mac_len = skb->data - skb_mac_header(skb); + if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { + memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), + mac_len - VLAN_HLEN - ETH_TLEN); + } skb->mac_header += VLAN_HLEN; return skb; } diff --git a/net/core/sock.c b/net/core/sock.c index 59e010ac9c50f6eedc7ad80631170bafb767ffc8..e872a5a6321ebdb4cfdb07497e65affb985df348 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1595,7 +1595,7 @@ void sk_destruct(struct sock *sk) static void __sk_free(struct sock *sk) { - if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) + if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) sock_diag_broadcast_destroy(sk); else sk_destruct(sk); diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index 97791b0b1b51418c150ce31a99eda44046983f0e..3887bc115762688e80288609af1a7ed76f11f540 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val) DCCPF_SEQ_WMAX)); } +static void dccp_tasklet_schedule(struct sock *sk) +{ + struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; + + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + sock_hold(sk); + __tasklet_schedule(t); + } +} + static void ccid2_hc_tx_rto_expire(unsigned long data) { struct sock *sk = (struct sock *)data; @@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) /* if we were blocked before, we may now send cwnd=1 packet */ if (sender_was_blocked) - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); + dccp_tasklet_schedule(sk); /* restart backed-off timer */ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); out: @@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) done: /* check if incoming Acks allow pending packets to be sent */ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); + dccp_tasklet_schedule(sk); dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); } diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 3a2c3402775860b3d1aeeffaf52f951b1b9c9272..2a952cbd6efa90c37ff868bc2f09cdd0e35cef9a 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data) else dccp_write_xmit(sk); bh_unlock_sock(sk); + sock_put(sk); } static void dccp_write_xmit_timer(unsigned long data) { dccp_write_xmitlet(data); - sock_put((struct sock *)data); } void dccp_init_xmit_timers(struct sock *sk) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6fffcda326b7e1fdf865b365fcd84c00f7501d33..a3efeed0bb6b8cc116f9b5d5085698bc50ee89db 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1378,7 +1378,6 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) for (p = *head; p; p = p->next) { struct iphdr *iph2; - u16 flush_id; if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -1404,34 +1403,23 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) NAPI_GRO_CB(p)->flush |= flush; - /* We need to store of the IP ID check to be included later - * when we can verify that this packet does in fact belong - * to a given flow. + /* For non-atomic datagrams we need to save the IP ID offset + * to be included later. If the frame has the DF bit set + * we must ignore the IP ID value as per RFC 6864. */ - flush_id = (u16)(id - ntohs(iph2->id)); - - /* This bit of code makes it much easier for us to identify - * the cases where we are doing atomic vs non-atomic IP ID - * checks. Specifically an atomic check can return IP ID - * values 0 - 0xFFFF, while a non-atomic check can only - * return 0 or 0xFFFF. - */ - if (!NAPI_GRO_CB(p)->is_atomic || - !(iph->frag_off & htons(IP_DF))) { - flush_id ^= NAPI_GRO_CB(p)->count; - flush_id = flush_id ? 0xFFFF : 0; - } + if (iph2->frag_off & htons(IP_DF)) + continue; - /* If the previous IP ID value was based on an atomic - * datagram we can overwrite the value and ignore it. + /* We must save the offset as it is possible to have multiple + * flows using the same protocol and address pairs so we + * need to wait until we can validate this is part of the + * same flow with a 5-tuple or better to avoid unnecessary + * collisions between flows. */ - if (NAPI_GRO_CB(skb)->is_atomic) - NAPI_GRO_CB(p)->flush_id = flush_id; - else - NAPI_GRO_CB(p)->flush_id |= flush_id; + NAPI_GRO_CB(p)->flush_id |= ntohs(iph2->id) ^ + (u16)(id - NAPI_GRO_CB(p)->count); } - NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF)); NAPI_GRO_CB(skb)->flush |= flush; skb_set_network_header(skb, off); /* The above will be needed by the transport layer if there is one diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 23e6d5532b5c5f05e73ec2c12968d5a38fc92ceb..2459e9cc22a694be003c951aeb99a2eb942c650d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -951,9 +951,6 @@ static void __gre_tunnel_init(struct net_device *dev) t_hlen = tunnel->hlen + sizeof(struct iphdr); - dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; - dev->mtu = ETH_DATA_LEN - t_hlen - 4; - dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; @@ -1253,8 +1250,6 @@ static int erspan_tunnel_init(struct net_device *dev) sizeof(struct erspanhdr); t_hlen = tunnel->hlen + sizeof(struct iphdr); - dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; - dev->mtu = ETH_DATA_LEN - t_hlen - 4; dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 5ce268784a6eb0b6c68563f04887e48ac86cec73..2f7067bd55de18b9484f527ec7b346fda560e8fb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1048,7 +1048,8 @@ static int __ip_append_data(struct sock *sk, if (copy > length) copy = length; - if (!(rt->dst.dev->features&NETIF_F_SG)) { + if (!(rt->dst.dev->features&NETIF_F_SG) && + skb_tailroom(skb) >= copy) { unsigned int off; off = skb->len; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index a2fcc20774a67a5cb8b524d9d6687130ec04d28e..4784f3f36b7e0397cba2ea728c75ae1cb9fbcda4 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -1103,8 +1103,14 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], eth_hw_addr_random(dev); mtu = ip_tunnel_bind_dev(dev); - if (!tb[IFLA_MTU]) + if (tb[IFLA_MTU]) { + unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; + + dev->mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, + (unsigned int)(max - sizeof(struct iphdr))); + } else { dev->mtu = mtu; + } ip_tunnel_add(itn, nt); out: diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index c9cd891f69c237e97633e37576a2e41f5ada1ed6..00d4371d457369f0f03bff69e26e3761e5b29085 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -396,7 +396,6 @@ static int vti_tunnel_init(struct net_device *dev) memcpy(dev->dev_addr, &iph->saddr, 4); memcpy(dev->broadcast, &iph->daddr, 4); - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN; dev->flags = IFF_NOARP; dev->addr_len = 4; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index c07e9db95ccc3d10700ec39d838ce413aa0e5ab5..cc7c9d67ac193609c7b9c2c44e3fcbe9c8cc75ac 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -228,7 +228,6 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, c->hash_mode = i->hash_mode; c->hash_initval = i->hash_initval; refcount_set(&c->refcount, 1); - refcount_set(&c->entries, 1); spin_lock_bh(&cn->lock); if (__clusterip_config_find(net, ip)) { @@ -259,8 +258,10 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, c->notifier.notifier_call = clusterip_netdev_event; err = register_netdevice_notifier(&c->notifier); - if (!err) + if (!err) { + refcount_set(&c->entries, 1); return c; + } #ifdef CONFIG_PROC_FS proc_remove(c->pde); @@ -269,7 +270,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i, spin_lock_bh(&cn->lock); list_del_rcu(&c->list); spin_unlock_bh(&cn->lock); - kfree(c); + clusterip_config_put(c); return ERR_PTR(err); } @@ -492,12 +493,15 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) return PTR_ERR(config); } } - cipinfo->config = config; ret = nf_ct_netns_get(par->net, par->family); - if (ret < 0) + if (ret < 0) { pr_info("cannot load conntrack support for proto=%u\n", par->family); + clusterip_config_entry_put(par->net, config); + clusterip_config_put(config); + return ret; + } if (!par->net->xt.clusterip_deprecated_warning) { pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " @@ -505,6 +509,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) par->net->xt.clusterip_deprecated_warning = true; } + cipinfo->config = config; return ret; } diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c b/net/ipv4/netfilter/nf_socket_ipv4.c index 2880dddb5431eda0c547126dbfc73f6d61a308b4..0f4ed1bf9d77471f1869a629a6a5d469a4a1a273 100644 --- a/net/ipv4/netfilter/nf_socket_ipv4.c +++ b/net/ipv4/netfilter/nf_socket_ipv4.c @@ -109,10 +109,12 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const struct sk_buff *skb, int doff = 0; if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct tcphdr _hdr; + struct udphdr *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), - sizeof(_hdr), &_hdr); + iph->protocol == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), &_hdr); if (hp == NULL) return NULL; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index b8f0db54b1978c800c1f606e0d06a015499a2972..16226d49263de714f93d53d0c83cb07482fb2339 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { - if (!daddr) - return -EINVAL; + if (!daddr) { + err = -EINVAL; + goto out_free; + } faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); @@ -842,6 +844,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) out: ip_rt_put(rt); +out_free: if (free) kfree(ipc.opt); if (!err) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5ea559f8c456464b13628a899c4c3023173671fc..7afa8d2463d85197a79a202f0d3b7919eacba7f9 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -639,6 +639,7 @@ static inline u32 fnhe_hashfun(__be32 daddr) static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) { rt->rt_pmtu = fnhe->fnhe_pmtu; + rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; rt->dst.expires = fnhe->fnhe_expires; if (fnhe->fnhe_gw) { @@ -649,7 +650,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh } static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, - u32 pmtu, unsigned long expires) + u32 pmtu, bool lock, unsigned long expires) { struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; @@ -686,8 +687,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_genid = genid; if (gw) fnhe->fnhe_gw = gw; - if (pmtu) + if (pmtu) { fnhe->fnhe_pmtu = pmtu; + fnhe->fnhe_mtu_locked = lock; + } fnhe->fnhe_expires = max(1UL, expires); /* Update all cached dsts too */ rt = rcu_dereference(fnhe->fnhe_rth_input); @@ -711,7 +714,8 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; fnhe->fnhe_pmtu = pmtu; - fnhe->fnhe_expires = expires; + fnhe->fnhe_mtu_locked = lock; + fnhe->fnhe_expires = max(1UL, expires); /* Exception created; mark the cached routes for the nexthop * stale, so anyone caching it rechecks if this exception @@ -792,7 +796,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, new_gw, - 0, jiffies + ip_rt_gc_timeout); + 0, false, + jiffies + ip_rt_gc_timeout); } if (kill_route) rt->dst.obsolete = DST_OBSOLETE_KILL; @@ -1005,15 +1010,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; struct fib_result res; + bool lock = false; - if (dst_metric_locked(dst, RTAX_MTU)) + if (ip_mtu_locked(dst)) return; if (ipv4_mtu(dst) < mtu) return; - if (mtu < ip_rt_min_pmtu) + if (mtu < ip_rt_min_pmtu) { + lock = true; mtu = ip_rt_min_pmtu; + } if (rt->rt_pmtu == mtu && time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) @@ -1023,7 +1031,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); - update_or_create_fnhe(nh, fl4->daddr, 0, mtu, + update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, jiffies + ip_rt_mtu_expires); } rcu_read_unlock(); @@ -1276,7 +1284,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) mtu = READ_ONCE(dst->dev->mtu); - if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { + if (unlikely(ip_mtu_locked(dst))) { if (rt->rt_uses_gateway && mtu > 576) mtu = 576; } @@ -1286,6 +1294,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) +{ + struct fnhe_hash_bucket *hash; + struct fib_nh_exception *fnhe, __rcu **fnhe_p; + u32 hval = fnhe_hashfun(daddr); + + spin_lock_bh(&fnhe_lock); + + hash = rcu_dereference_protected(nh->nh_exceptions, + lockdep_is_held(&fnhe_lock)); + hash += hval; + + fnhe_p = &hash->chain; + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); + while (fnhe) { + if (fnhe->fnhe_daddr == daddr) { + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); + fnhe_flush_routes(fnhe); + kfree_rcu(fnhe, rcu); + break; + } + fnhe_p = &fnhe->fnhe_next; + fnhe = rcu_dereference_protected(fnhe->fnhe_next, + lockdep_is_held(&fnhe_lock)); + } + + spin_unlock_bh(&fnhe_lock); +} + static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); @@ -1299,8 +1337,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) for (fnhe = rcu_dereference(hash[hval].chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { - if (fnhe->fnhe_daddr == daddr) + if (fnhe->fnhe_daddr == daddr) { + if (fnhe->fnhe_expires && + time_after(jiffies, fnhe->fnhe_expires)) { + ip_del_fnhe(nh, daddr); + break; + } return fnhe; + } } return NULL; } @@ -1512,6 +1556,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev, rt->rt_is_input = 0; rt->rt_iif = 0; rt->rt_pmtu = 0; + rt->rt_mtu_locked = 0; rt->rt_gateway = 0; rt->rt_uses_gateway = 0; rt->rt_table_id = 0; @@ -1620,36 +1665,6 @@ static void ip_handle_martian_source(struct net_device *dev, #endif } -static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) -{ - struct fnhe_hash_bucket *hash; - struct fib_nh_exception *fnhe, __rcu **fnhe_p; - u32 hval = fnhe_hashfun(daddr); - - spin_lock_bh(&fnhe_lock); - - hash = rcu_dereference_protected(nh->nh_exceptions, - lockdep_is_held(&fnhe_lock)); - hash += hval; - - fnhe_p = &hash->chain; - fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); - while (fnhe) { - if (fnhe->fnhe_daddr == daddr) { - rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( - fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); - fnhe_flush_routes(fnhe); - kfree_rcu(fnhe, rcu); - break; - } - fnhe_p = &fnhe->fnhe_next; - fnhe = rcu_dereference_protected(fnhe->fnhe_next, - lockdep_is_held(&fnhe_lock)); - } - - spin_unlock_bh(&fnhe_lock); -} - static void set_lwt_redirect(struct rtable *rth) { if (lwtunnel_output_redirect(rth->dst.lwtstate)) { @@ -1716,20 +1731,10 @@ static int __mkroute_input(struct sk_buff *skb, fnhe = find_exception(&FIB_RES_NH(*res), daddr); if (do_cache) { - if (fnhe) { + if (fnhe) rth = rcu_dereference(fnhe->fnhe_rth_input); - if (rth && rth->dst.expires && - time_after(jiffies, rth->dst.expires)) { - ip_del_fnhe(&FIB_RES_NH(*res), daddr); - fnhe = NULL; - } else { - goto rt_cache; - } - } - - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); - -rt_cache: + else + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); goto out; @@ -2206,39 +2211,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res, * the loopback interface and the IP_PKTINFO ipi_ifindex will * be set to the loopback interface as well. */ - fi = NULL; + do_cache = false; } fnhe = NULL; do_cache &= fi != NULL; - if (do_cache) { + if (fi) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); fnhe = find_exception(nh, fl4->daddr); + if (!do_cache) + goto add; if (fnhe) { prth = &fnhe->fnhe_rth_output; - rth = rcu_dereference(*prth); - if (rth && rth->dst.expires && - time_after(jiffies, rth->dst.expires)) { - ip_del_fnhe(nh, fl4->daddr); - fnhe = NULL; - } else { - goto rt_cache; + } else { + if (unlikely(fl4->flowi4_flags & + FLOWI_FLAG_KNOWN_NH && + !(nh->nh_gw && + nh->nh_scope == RT_SCOPE_LINK))) { + do_cache = false; + goto add; } + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); } - - if (unlikely(fl4->flowi4_flags & - FLOWI_FLAG_KNOWN_NH && - !(nh->nh_gw && - nh->nh_scope == RT_SCOPE_LINK))) { - do_cache = false; - goto add; - } - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); rth = rcu_dereference(*prth); - -rt_cache: if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) return rth; } @@ -2538,6 +2535,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; rt->rt_pmtu = ort->rt_pmtu; + rt->rt_mtu_locked = ort->rt_mtu_locked; rt->rt_genid = rt_genid_ipv4(net); rt->rt_flags = ort->rt_flags; @@ -2640,6 +2638,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id, memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt_pmtu && expires) metrics[RTAX_MTU - 1] = rt->rt_pmtu; + if (rt->rt_mtu_locked && expires) + metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU); if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index b694fbf44a35642cdc826c3c55b35f81d63582f0..e3ece12f0250436fe6c980e7a14b56e8431ad0bc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1194,7 +1194,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) uarg->zerocopy = 0; } - if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && + !tp->repair) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); if (err == -EINPROGRESS && copied_syn > 0) goto out; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 25c5a0b60cfcc62d16ecbb977c3512efc6bdaf0e..9a0b952dd09b5d380a66fe519ba8c230e43044fc 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -802,7 +802,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) } } } - bbr->idle_restart = 0; + /* Restart after idle ends only once we process a new S/ACK for data */ + if (rs->delivered > 0) + bbr->idle_restart = 0; } static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 7c843578f2333db58100cedbc2a9d0784f72d861..faddf4f9a707f1583fc71e0711e3db95b5d08255 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -6,7 +6,7 @@ * The algorithm is described in: * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm * for High-Speed Networks" - * http://www.ifp.illinois.edu/~srikant/Papers/liubassri06perf.pdf + * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf * * Implemented from description in paper and ns-2 simulation. * Copyright (C) 2007 Stephen Hemminger diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1197f85c105ac7d313830d4b7a3016400bcf3901..ba5628689c57abba234761a28aaa91f7efd68cf8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -592,8 +592,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + u32 copied; int time; - int copied; tcp_mstamp_refresh(tp); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); @@ -616,12 +616,13 @@ void tcp_rcv_space_adjust(struct sock *sk) if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { - int rcvwin, rcvmem, rcvbuf; + int rcvmem, rcvbuf; + u64 rcvwin; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ - rcvwin = (copied << 1) + 16 * tp->advmss; + rcvwin = ((u64)copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied @@ -641,7 +642,8 @@ void tcp_rcv_space_adjust(struct sock *sk) while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; - rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); + do_div(rcvwin, tp->advmss); + rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 4d58e2ce0b5b181b39aeb12f8761ba016606dd43..114098a52f5b5dcd9dadc7b15852abb9acad641e 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -238,7 +238,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) found: /* Include the IP ID check below from the inner most IP hdr */ - flush = NAPI_GRO_CB(p)->flush; + flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id; flush |= (__force int)(flags & TCP_FLAG_CWR); flush |= (__force int)((flags ^ tcp_flag_word(th2)) & ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); @@ -247,17 +247,6 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i); - /* When we receive our second frame we can made a decision on if we - * continue this flow as an atomic flow with a fixed ID or if we use - * an incrementing ID. - */ - if (NAPI_GRO_CB(p)->flush_id != 1 || - NAPI_GRO_CB(p)->count != 1 || - !NAPI_GRO_CB(p)->is_atomic) - flush |= NAPI_GRO_CB(p)->flush_id; - else - NAPI_GRO_CB(p)->is_atomic = false; - mss = skb_shinfo(p)->gso_size; flush |= (len - 1) >= mss; @@ -326,9 +315,6 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) iph->daddr, 0); skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; - if (NAPI_GRO_CB(skb)->is_atomic) - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; - return tcp_gro_complete(skb); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d23d118f22e923a42d68031af7571041e7a0305a..9e8a95a555dfd18e8184772cdd322227f64c66a1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2814,8 +2814,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) return -EBUSY; if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { - if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) - BUG(); + if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { + WARN_ON_ONCE(1); + return -EINVAL; + } if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return -ENOMEM; } @@ -3312,6 +3314,7 @@ static void tcp_connect_init(struct sock *sk) sock_reset_flag(sk, SOCK_DONE); tp->snd_wnd = 0; tcp_init_wl(tp, 0); + tcp_write_queue_purge(sk); tp->snd_una = tp->write_seq; tp->snd_sml = tp->write_seq; tp->snd_up = tp->write_seq; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c7c43a1922a65d28ab862a78ed277b6ea9461060..e2f3000d60b56fe70831d17c7c17999247513ef3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -418,9 +418,9 @@ static int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score += 4; } @@ -1039,8 +1039,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (ipc.opt && ipc.opt->opt.srr) { - if (!daddr) - return -EINVAL; + if (!daddr) { + err = -EINVAL; + goto out_free; + } faddr = ipc.opt->opt.faddr; connected = 0; } @@ -1150,6 +1152,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) out: ip_rt_put(rt); +out_free: if (free) kfree(ipc.opt); if (!err) diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 05017e2c849c12b43715fe4944759a3c91880ee8..4b586e7d5637032eff69d79d7802615bba274bf5 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -100,6 +100,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, xdst->u.rt.rt_gateway = rt->rt_gateway; xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; xdst->u.rt.rt_pmtu = rt->rt_pmtu; + xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked; xdst->u.rt.rt_table_id = rt->rt_table_id; INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 5b3f2f89ef41c3276ef4b478683bd9ab04a1d3da..1f36ea2e5d3ce16f9b9497613cc06826c1ead626 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -239,15 +239,8 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; - - /* If the previous IP ID value was based on an atomic - * datagram we can overwrite the value and ignore it. - */ - if (NAPI_GRO_CB(skb)->is_atomic) - NAPI_GRO_CB(p)->flush_id = 0; } - NAPI_GRO_CB(skb)->is_atomic = true; NAPI_GRO_CB(skb)->flush |= flush; skb_gro_postpull_rcsum(skb, iph, nlen); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7c50c83174593100d490cc7eaba275f1c07f677f..8763bccad29f86a7ea78eb9d299dc041a5b11324 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1499,7 +1499,8 @@ static int __ip6_append_data(struct sock *sk, if (copy > length) copy = length; - if (!(rt->dst.dev->features&NETIF_F_SG)) { + if (!(rt->dst.dev->features&NETIF_F_SG) && + skb_tailroom(skb) >= copy) { unsigned int off; off = skb->len; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index d61a82fd4b60e55493d62e4410a6a2bc3eedd819..565a0388587aa679f818993b61c7638faced1ba7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1990,14 +1990,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, { struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - struct ip6_tnl *nt, *t; struct ip_tunnel_encap ipencap; + struct ip6_tnl *nt, *t; + int err; nt = netdev_priv(dev); if (ip6_tnl_netlink_encap_parms(data, &ipencap)) { - int err = ip6_tnl_encap_setup(nt, &ipencap); - + err = ip6_tnl_encap_setup(nt, &ipencap); if (err < 0) return err; } @@ -2013,7 +2013,11 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, return -EEXIST; } - return ip6_tnl_create2(dev); + err = ip6_tnl_create2(dev); + if (!err && tb[IFLA_MTU]) + ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + + return err; } static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 2493a40bc4b15c9347cb13f3fc4a0b3b73f06ef1..0e0ab90a433494a259333745be9ef1966022f614 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -852,7 +852,7 @@ static void vti6_dev_setup(struct net_device *dev) dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); dev->mtu = ETH_DATA_LEN; dev->min_mtu = IPV6_MIN_MTU; - dev->max_mtu = IP_MAX_MTU; + dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); netif_keep_dst(dev); diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index 42ce8496f44a0b350acf0d6cdae5130c9986385f..be7ce4dfe18b748ba137da4649f7e1614e2a2583 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -117,9 +117,11 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, } if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) { - struct udphdr _hdr, *hp; + struct tcphdr _hdr; + struct udphdr *hp; - hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); + hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ? + sizeof(*hp) : sizeof(_hdr), &_hdr); if (hp == NULL) return NULL; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index bf64e9da34d6c52c89f3c03f532b49d379ebda15..e07044a2487a7ec8416bbe78ae2917a73c2f70de 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1222,11 +1222,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb, const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; + struct icmp6hdr _icmph; if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) goto out; - icmph = icmp6_hdr(skb); + icmph = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_icmph), &_icmph); + if (!icmph) + goto out; + if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && icmph->icmp6_type != ICMPV6_PKT_TOOBIG && icmph->icmp6_type != ICMPV6_TIME_EXCEED && diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index b35d8905794cc5b8eabb8ea6008b4841bb8f189e..ad1e7e6ce0093ff590bcb1789068953a3ca20264 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1569,6 +1569,13 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; + if (tb[IFLA_MTU]) { + u32 mtu = nla_get_u32(tb[IFLA_MTU]); + + if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) + dev->mtu = mtu; + } + #ifdef CONFIG_IPV6_SIT_6RD if (ipip6_netlink_6rd_parms(data, &ip6rd)) err = ipip6_tunnel_update_6rd(nt, &ip6rd); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index a8f5c04bbaff938383539e312cd2b27efef93ada..4ead3066676e62086076faf69123890545d3f691 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -164,9 +164,9 @@ static int compute_score(struct sock *sk, struct net *net, bool dev_match = (sk->sk_bound_dev_if == dif || sk->sk_bound_dev_if == sdif); - if (exact_dif && !dev_match) + if (!dev_match) return -1; - if (sk->sk_bound_dev_if && dev_match) + if (sk->sk_bound_dev_if) score++; } diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index fca69c3771f55d7fb743cea0ae5937a7887c0b89..c28223d8092b18b6e54ac9aed3241dd93a951746 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -765,8 +765,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if ((session->ifname[0] && nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || - (session->offset && - nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) || (session->cookie_len && nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0])) || diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index cf41d9b4a0b82dd46c804d6906bc514fd29ccfc4..b49f5afab405f77319af0ab333aa7021574532c7 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (size > llc->dev->mtu) size = llc->dev->mtu; copied = size - hdrlen; + rc = -EINVAL; + if (copied < 0) + goto release; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index f8d4ab8ca1a5f0781ca4704e85481efd9bdd47be..4b60f68cb4925251dc88a2c3d5f78f502e2e2437 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -389,7 +389,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - llc_conn_send_pdu(sk, skb); + rc = llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; @@ -916,7 +916,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { - llc_conn_send_pdu(sk, skb); + rc = llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; @@ -935,14 +935,17 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); + int ret; if (llc->ack_must_be_send) { - llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); + ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0 ; llc->ack_pf = 0; - } else - llc_conn_ac_send_i_cmd_p_set_0(sk, skb); - return 0; + } else { + ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb); + } + + return ret; } /** diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 9a42448eb182913dc87ba706a2057bcbfc1fa8cc..b084fd19ad325bb332019dc167410be69ff06c39 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -30,7 +30,7 @@ #endif static int llc_find_offset(int state, int ev_type); -static void llc_conn_send_pdus(struct sock *sk); +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb); static int llc_conn_service(struct sock *sk, struct sk_buff *skb); static int llc_exec_conn_trans_actions(struct sock *sk, struct llc_conn_state_trans *trans, @@ -193,11 +193,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) return rc; } -void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) +int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) { /* queue PDU to send to MAC layer */ skb_queue_tail(&sk->sk_write_queue, skb); - llc_conn_send_pdus(sk); + return llc_conn_send_pdus(sk, skb); } /** @@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk); + llc_conn_send_pdus(sk, NULL); out:; } @@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) if (howmany_resend > 0) llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; /* any PDUs to re-send are queued up; start sending to MAC */ - llc_conn_send_pdus(sk); + llc_conn_send_pdus(sk, NULL); out:; } @@ -340,12 +340,16 @@ int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked) /** * llc_conn_send_pdus - Sends queued PDUs * @sk: active connection + * @hold_skb: the skb held by caller, or NULL if does not care * - * Sends queued pdus to MAC layer for transmission. + * Sends queued pdus to MAC layer for transmission. When @hold_skb is + * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent + * successfully, or 1 for failure. */ -static void llc_conn_send_pdus(struct sock *sk) +static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) { struct sk_buff *skb; + int ret = 0; while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); @@ -357,10 +361,20 @@ static void llc_conn_send_pdus(struct sock *sk) skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); if (!skb2) break; - skb = skb2; + dev_queue_xmit(skb2); + } else { + bool is_target = skb == hold_skb; + int rc; + + if (is_target) + skb_get(skb); + rc = dev_queue_xmit(skb); + if (is_target) + ret = rc; } - dev_queue_xmit(skb); } + + return ret; } /** diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 2849a1fc41c5dbbe06a7e5c9fbb4ff6276c84394..3a7cfe01ee6d3f9a567aaab8fb8d8bb84b880b57 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -8,6 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -322,9 +323,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta, * driver so reject the timeout update. */ status = WLAN_STATUS_REQUEST_DECLINED; - ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, - tid, dialog_token, status, - 1, buf_size, timeout); goto end; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 9675814f64dbcc9807e69452e4ad93a4dfe42556..894937bcd479751fe254f926b367575d41de6e00 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1466,7 +1466,7 @@ struct ieee802_11_elems { const struct ieee80211_timeout_interval_ie *timeout_int; const u8 *opmode_notif; const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; - const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; + struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie; const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie; /* length of them, respectively */ diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index a550c707cd8a6130ef5756cedf2fa4738ae9a0e7..96e57d7c287204f53dea1661bd1d8593ea725e94 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1253,13 +1253,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, } static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee802_11_elems *elems) { struct ieee80211_mgmt *mgmt_fwd; struct sk_buff *skb; struct ieee80211_local *local = sdata->local; - u8 *pos = mgmt->u.action.u.chan_switch.variable; - size_t offset_ttl; skb = dev_alloc_skb(local->tx_headroom + len); if (!skb) @@ -1267,13 +1266,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, skb_reserve(skb, local->tx_headroom); mgmt_fwd = skb_put(skb, len); - /* offset_ttl is based on whether the secondary channel - * offset is available or not. Subtract 1 from the mesh TTL - * and disable the initiator flag before forwarding. - */ - offset_ttl = (len < 42) ? 7 : 10; - *(pos + offset_ttl) -= 1; - *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; + elems->mesh_chansw_params_ie->mesh_ttl--; + elems->mesh_chansw_params_ie->mesh_flags &= + ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; memcpy(mgmt_fwd, mgmt, len); eth_broadcast_addr(mgmt_fwd->da); @@ -1321,7 +1316,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata, /* forward or re-broadcast the CSA frame */ if (fwd_csa) { - if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0) + if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 4daafb07602f5f9727af7bec4ea7603901438b63..dddd498e1338183c14a2f9b930e63e2466e31778 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3928,7 +3928,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) != fast_rx->expected_ds_bits) - goto drop; + return false; /* assign the key to drop unencrypted frames (later) * and strip the IV/MIC if necessary diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index ee0181778a4297515de9bf0c75a947e7c45924a2..0293348357474ab411d5068218a521e59a54249e 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -8,6 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2008, Intel Corporation * Copyright 2008, Johannes Berg + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, u32 sta_flags, u8 *bssid, struct ieee80211_csa_ie *csa_ie) { - enum nl80211_band new_band; + enum nl80211_band new_band = current_band; int new_freq; u8 new_chan_no; struct ieee80211_channel *new_chan; @@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, elems->ext_chansw_ie->new_operating_class, &new_band)) { sdata_info(sdata, - "cannot understand ECSA IE operating class %d, disconnecting\n", + "cannot understand ECSA IE operating class, %d, ignoring\n", elems->ext_chansw_ie->new_operating_class); - return -EINVAL; } new_chan_no = elems->ext_chansw_ie->new_ch_num; csa_ie->count = elems->ext_chansw_ie->count; csa_ie->mode = elems->ext_chansw_ie->mode; } else if (elems->ch_switch_ie) { - new_band = current_band; new_chan_no = elems->ch_switch_ie->new_ch_num; csa_ie->count = elems->ch_switch_ie->count; csa_ie->mode = elems->ch_switch_ie->mode; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 69615016d5bf60cb89f46056f3faf7c6f2fd3ce4..f1b496222bda6665ed81a0abf6b6212af6969f1b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, if (ieee80211_hw_check(hw, USES_RSS)) { sta->pcpu_rx_stats = - alloc_percpu(struct ieee80211_sta_rx_stats); + alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp); if (!sta->pcpu_rx_stats) goto free; } @@ -439,6 +439,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); free: + free_percpu(sta->pcpu_rx_stats); #ifdef CONFIG_MAC80211_MESH kfree(sta->mesh); #endif diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 3e17d32b629d18e97f85fe8e543431562cdf3c6e..58d5d05aec24c5fcc0bb23f2ccceea887bfaa029 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -260,7 +260,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, buf_len = strlen(buf); ct = nf_ct_get(skb, &ctinfo); - if (ct && (ct->status & IPS_NAT_MASK)) { + if (ct) { bool mangled; /* If mangling fails this function will return 0 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5b504aa653f57bbabfcb22bcf5aba4cbef39398f..689e9c0570ba7c136c61996477e9c3d448756204 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2344,41 +2344,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, } if (nlh->nlmsg_flags & NLM_F_REPLACE) { - if (nft_is_active_next(net, old_rule)) { - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, - old_rule); - if (trans == NULL) { - err = -ENOMEM; - goto err2; - } - nft_deactivate_next(net, old_rule); - chain->use--; - list_add_tail_rcu(&rule->list, &old_rule->list); - } else { + if (!nft_is_active_next(net, old_rule)) { err = -ENOENT; goto err2; } - } else if (nlh->nlmsg_flags & NLM_F_APPEND) - if (old_rule) - list_add_rcu(&rule->list, &old_rule->list); - else - list_add_tail_rcu(&rule->list, &chain->rules); - else { - if (old_rule) - list_add_tail_rcu(&rule->list, &old_rule->list); - else - list_add_rcu(&rule->list, &chain->rules); - } + trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, + old_rule); + if (trans == NULL) { + err = -ENOMEM; + goto err2; + } + nft_deactivate_next(net, old_rule); + chain->use--; - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { - err = -ENOMEM; - goto err3; + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + list_add_tail_rcu(&rule->list, &old_rule->list); + } else { + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + if (nlh->nlmsg_flags & NLM_F_APPEND) { + if (old_rule) + list_add_rcu(&rule->list, &old_rule->list); + else + list_add_tail_rcu(&rule->list, &chain->rules); + } else { + if (old_rule) + list_add_tail_rcu(&rule->list, &old_rule->list); + else + list_add_rcu(&rule->list, &chain->rules); + } } chain->use++; return 0; -err3: - list_del_rcu(&rule->list); err2: nf_tables_rule_destroy(&ctx, rule); err1: @@ -3196,18 +3201,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = ops->init(set, &desc, nla); if (err < 0) - goto err2; + goto err3; err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err3; + goto err4; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; -err3: +err4: ops->destroy(set); +err3: + kfree(set->name); err2: kvfree(set); err1: diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 22dc1b9d63625e76c72bdb80b97b1d07540fac32..c070dfc0190aa2bd84871eee50596a3d7c735636 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1472,6 +1472,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, iface = rcu_dereference(netlbl_unlhsh_def); if (iface == NULL || !iface->valid) goto unlabel_getattr_nolabel; + +#if IS_ENABLED(CONFIG_IPV6) + /* When resolving a fallback label, check the sk_buff version as + * it is possible (e.g. SCTP) to have family = PF_INET6 while + * receiving ip_hdr(skb)->version = 4. + */ + if (family == PF_INET6 && ip_hdr(skb)->version == 4) + family = PF_INET; +#endif /* IPv6 */ + switch (family) { case PF_INET: { struct iphdr *hdr4; diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 367d8c02710181f06bf1a422e41239ed12e77d90..2ceefa183ceed6ba3d06f2aae958104a514f2146 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri, pr_debug("uri: %s, len: %zu\n", uri, uri_len); + /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ + if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) + return NULL; + sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); if (sdreq == NULL) return NULL; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index b251fb936a27a9632c55d5aa91c1dbd2fa543ffa..08ed6abe4aaedc2c2854902fd3caf60974b8b287 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = { }; static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { - [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, + [NFC_SDP_ATTR_URI] = { .type = NLA_STRING, + .len = U8_MAX - 4 }, [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, }; diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index 58fb827439a86d64df4ef1978c23b66a238275c9..6df6f58a810388af5a1e69fcba99702894041c08 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -30,6 +30,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) goto out; nsh_len = nsh_hdr_len(nsh_hdr(skb)); + if (nsh_len < NSH_BASE_HDR_LEN) + goto out; if (unlikely(!pskb_may_pull(skb, nsh_len))) goto out; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 0d9f6afa266c5b9f4bca002e235035149c370c40..4c9c9458374a956a4a03683691cc259bcc299f4d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1404,13 +1404,10 @@ static void nlattr_set(struct nlattr *attr, u8 val, /* The nlattr stream should already have been validated */ nla_for_each_nested(nla, attr, rem) { - if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { - if (tbl[nla_type(nla)].next) - tbl = tbl[nla_type(nla)].next; - nlattr_set(nla, val, tbl); - } else { + if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) + nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl); + else memset(nla_data(nla), val, nla_len(nla)); - } if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE) *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3994b71f81971b6419f277138d98c7ef7d003ef5..8351faabba62a4d8e1af7b9116e44a3261010cfa 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2912,13 +2912,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (skb == NULL) goto out_unlock; - skb_set_network_header(skb, reserve); + skb_reset_network_header(skb); err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (unlikely(offset < 0)) goto out_free; + } else if (reserve) { + skb_push(skb, reserve); } /* Returns -EFAULT on error */ diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index 50615d5efac1529a0fd617c2692b1e9419da7137..9cf089b9754eaadbc58d9fdd9c455876a07712ca 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c @@ -114,5 +114,6 @@ static struct rpmsg_driver qcom_smd_qrtr_driver = { module_rpmsg_driver(qcom_smd_qrtr_driver); +MODULE_ALIAS("rpmsg:IPCRTR"); MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/net/rds/recv.c b/net/rds/recv.c index b25bcfe411ca65935ef9e2417a836d2575023cdd..555f07ccf0dc60d480adb0495091a748dca9b3ba 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, struct rds_cmsg_rx_trace t; int i, j; + memset(&t, 0, sizeof(t)); inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); t.rx_traces = rs->rs_rx_traces; for (i = 0; i < rs->rs_rx_traces; i++) { diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index c061d6eb465d528966a513173950463ded396c5e..22571189f21e7e4a805af1b7edaed1c9f3c918ef 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Oracle. All rights reserved. + * Copyright (c) 2006, 2018 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -142,12 +142,20 @@ int rds_tcp_accept_one(struct socket *sock) if (ret) goto out; - new_sock->type = sock->type; - new_sock->ops = sock->ops; ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); if (ret < 0) goto out; + /* sock_create_lite() does not get a hold on the owner module so we + * need to do it here. Note that sock_release() uses sock->ops to + * determine if it needs to decrement the reference count. So set + * sock->ops after calling accept() in case that fails. And there's + * no need to do try_module_get() as the listener should have a hold + * already. + */ + new_sock->ops = sock->ops; + __module_get(new_sock->ops->owner); + ret = rds_tcp_keepalive(new_sock); if (ret < 0) goto out; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index e56e23ed2229c8568f839cdda32ade1ade0eaee7..5edb636dbc4d6d018c31adb13b737317d4e5d778 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -1175,16 +1175,19 @@ void rxrpc_data_ready(struct sock *udp_sk) goto discard_unlock; if (sp->hdr.callNumber == chan->last_call) { - /* For the previous service call, if completed successfully, we - * discard all further packets. + if (chan->call || + sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) + goto discard_unlock; + + /* For the previous service call, if completed + * successfully, we discard all further packets. */ if (rxrpc_conn_is_service(conn) && - (chan->last_type == RXRPC_PACKET_TYPE_ACK || - sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)) + chan->last_type == RXRPC_PACKET_TYPE_ACK) goto discard_unlock; - /* But otherwise we need to retransmit the final packet from - * data cached in the connection record. + /* But otherwise we need to retransmit the final packet + * from data cached in the connection record. */ rxrpc_post_packet_to_conn(conn, skb); goto out_unlock; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index bdece21f313de7d2a0d3bdf3dd1933c8d7a7d0b2..abcf48026d9908f0394058fdb25f9e104d2c0833 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -513,9 +513,10 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { + unsigned long idl = call->user_call_ID; + ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, - sizeof(unsigned long), - &call->user_call_ID); + sizeof(unsigned long), &idl); } if (ret < 0) goto error_unlock_call; diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index d2f51d6a253c2e093f71ac8a879dfe97a0bc25ec..016e293681b8cfe64ec120e198af0f8e76f1c8d8 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -92,7 +92,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix) spin_lock_bh(&call->lock); if (call->state < RXRPC_CALL_COMPLETE) { - call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS; + call->rxtx_annotations[ix] = + (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) | + RXRPC_TX_ANNO_RETRANS; if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) rxrpc_queue_call(call); } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 2b087623fb1d1ba31d13176c1b38abaac7c3b4f4..364a878e51cb19c62f329559947278bba574f7e1 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -356,7 +356,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, return res; out: if (res == ACT_P_CREATED) - tcf_idr_cleanup(*act, est); + tcf_idr_release(*act, bind); return ret; } diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d9e399a7e3d59c8ec53e46e0862e2ca3c83988bc..18b2fd2ba7d7e5a30de876045d651b732bd91949 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t) static void tcf_ipt_release(struct tc_action *a, int bind) { struct tcf_ipt *ipt = to_ipt(a); - ipt_destroy_target(ipt->tcfi_t); + + if (ipt->tcfi_t) { + ipt_destroy_target(ipt->tcfi_t); + kfree(ipt->tcfi_t); + } kfree(ipt->tcfi_tname); - kfree(ipt->tcfi_t); } static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { @@ -187,7 +190,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, kfree(tname); err1: if (ret == ACT_P_CREATED) - tcf_idr_cleanup(*a, est); + tcf_idr_release(*a, bind); return err; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 491fe5deb09ee7f38a6c0f892c43ffe0bce79fd1..51ab463d9e168b7e174ae1e3169639ed721580ad 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { - tcf_idr_cleanup(*a, est); + tcf_idr_release(*a, bind); kfree(keys_ex); return -ENOMEM; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 3bb2ebf9e9aec2743033ecdc4f5763ce601bf653..c16127109f219ef2e74b132baeb120d10017103f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -194,7 +194,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); if (ret == ACT_P_CREATED) - tcf_idr_cleanup(*a, est); + tcf_idr_release(*a, bind); return err; } diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 8b5abcd2f32faeaa2a283bcc8fb388201f7a86e2..53752b9327d02e1f81ce48a0d4459366421bd117 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -103,7 +103,8 @@ static void tcf_sample_cleanup_rcu(struct rcu_head *rcu) psample_group = rcu_dereference_protected(s->psample_group, 1); RCU_INIT_POINTER(s->psample_group, NULL); - psample_group_put(psample_group); + if (psample_group) + psample_group_put(psample_group); } static void tcf_sample_cleanup(struct tc_action *a, int bind) diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index e7b57e5071a365743de9d2c5aaa22dca445fa0d9..b5f80e675783b3cb3fcd02a67dd04e199b39ee85 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, d = to_defact(*a); ret = alloc_defdata(d, defdata); if (ret < 0) { - tcf_idr_cleanup(*a, est); + tcf_idr_release(*a, bind); return ret; } d->tcf_action = parm->action; diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index 6d10b3af479b65ecb086306d05f5c0feec4c2cd5..d227599f7e73381cb56a5b4516ae36ef2f02ef1f 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, if (exists && bind) return 0; - if (!lflags) + if (!lflags) { + if (exists) + tcf_idr_release(*a, bind); return -EINVAL; + } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, @@ -152,7 +155,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla, ASSERT_RTNL(); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { - if (ovr) + if (ret == ACT_P_CREATED) tcf_idr_release(*a, bind); return -ENOMEM; } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 16eb067a8d8fa20c17894db8047571789c0b96e8..5c10a0fce35b390d32dec73631981b26a2e2f16d 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -154,6 +154,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, case htons(ETH_P_8021AD): break; default: + if (exists) + tcf_idr_release(*a, bind); return -EPROTONOSUPPORT; } } else { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index c2fab4bcb8beca770c5108fb231b37664b91bcc0..2f4e1483aced9e831460e3aa483842848b480e2d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -151,8 +151,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, } else { err = -ENOENT; } - goto errout; #endif + goto errout; } tp->classify = tp->ops->classify; tp->protocol = protocol; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 263d16e3219e6d747496619fe5fedd03c22ba44f..f50eb87cfe7996ef03d6ffd8b6869ac9fcec969e 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f) return f->next == &detached; } +static bool fq_flow_is_throttled(const struct fq_flow *f) +{ + return f->next == &throttled; +} + +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) +{ + if (head->first) + head->last->next = flow; + else + head->first = flow; + head->last = flow; + flow->next = NULL; +} + +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) +{ + rb_erase(&f->rate_node, &q->delayed); + q->throttled_flows--; + fq_flow_add_tail(&q->old_flows, f); +} + static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) { struct rb_node **p = &q->delayed.rb_node, *parent = NULL; @@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) static struct kmem_cache *fq_flow_cachep __read_mostly; -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) -{ - if (head->first) - head->last->next = flow; - else - head->first = flow; - head->last = flow; - flow->next = NULL; -} /* limit number of collected flows per round */ #define FQ_GC_MAX 8 @@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; + if (fq_flow_is_throttled(f)) + fq_flow_unset_throttled(q, f); f->time_next_packet = 0ULL; } return f; @@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) q->time_next_delayed_flow = f->time_next_packet; break; } - rb_erase(p, &q->delayed); - q->throttled_flows--; - fq_flow_add_tail(&q->old_flows, f); + fq_flow_unset_throttled(q, f); } } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index d87c41e82917570712f4f50c723e04fff04e2448..c453b8d81c9e34483fa0842df29608e653181daa 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -191,10 +191,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); if (IS_ERR(child)) return PTR_ERR(child); - } - if (child != &noop_qdisc) + /* child is fifo, no need to check for noop_qdisc */ qdisc_hash_add(child, true); + } + sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 120f4f36596786746b89a2832c125e1814d6fd9b..b36ecb58aa6ee0ae6aad3af570c0a77900ced352 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -388,6 +388,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) err = PTR_ERR(child); goto done; } + + /* child is fifo, no need to check for noop_qdisc */ + qdisc_hash_add(child, true); } sch_tree_lock(sch); @@ -396,8 +399,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) q->qdisc->qstats.backlog); qdisc_destroy(q->qdisc); q->qdisc = child; - if (child != &noop_qdisc) - qdisc_hash_add(child, true); } q->limit = qopt->limit; if (tb[TCA_TBF_PBURST]) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index dfb9651e818bb597a5f4cde0b3b7ce3d3d4fd3d5..58f7d8cfd748c95452201fff58dd73e6ecb06d5d 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1025,8 +1025,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sctp_inq *inqueue; - int state; + int first_time = 1; /* is this the first time through the loop */ int error = 0; + int state; /* The association should be held so we should be safe. */ ep = asoc->ep; @@ -1037,6 +1038,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* If the first chunk in the packet is AUTH, do special + * processing specified in Section 6.3 of SCTP-AUTH spec + */ + if (first_time && subtype.chunk == SCTP_CID_AUTH) { + struct sctp_chunkhdr *next_hdr; + + next_hdr = sctp_inq_peek(inqueue); + if (!next_hdr) + goto normal; + + /* If the next chunk is COOKIE-ECHO, skip the AUTH + * chunk while saving a pointer to it so we can do + * Authentication later (during cookie-echo + * processing). + */ + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { + chunk->auth_chunk = skb_clone(chunk->skb, + GFP_ATOMIC); + chunk->auth = 1; + continue; + } + } + +normal: /* SCTP-AUTH, Section 6.3: * The receiver has a list of chunk types which it expects * to be received only after an AUTH-chunk. This list has @@ -1075,6 +1100,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) /* If there is an error on chunk, discard this packet. */ if (error && chunk) chunk->pdiscard = 1; + + if (first_time) + first_time = 0; } sctp_association_put(asoc); } diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index 48392552ee7c1ea75a134375b55ffb0ebf59064e..1aa89d4682f42f84b7108f2c79b1099f549ef5f6 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -217,7 +217,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) skb_pull(chunk->skb, sizeof(*ch)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ - if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { + if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 7219a1c041f759da3c6275d15824799274e3ee01..853fecdf63744e9b4bd61cccafcfd72f7ec2dd1e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -865,6 +865,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) return 1; + if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) + return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; + return __sctp_v6_cmp_addr(addr1, addr2); } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8f8ccded13e47c4b5403b5a27416b0d5707d1f33..01b078172306a59e66e28883327942548c1baaf9 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -150,10 +150,7 @@ static enum sctp_disposition sctp_sf_violation_chunk( struct sctp_cmd_seq *commands); static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const union sctp_subtype type, struct sctp_chunk *chunk); static enum sctp_disposition __sctp_sf_do_9_1_abort( @@ -618,6 +615,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, return SCTP_DISPOSITION_CONSUME; } +static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk, + const struct sctp_association *asoc) +{ + struct sctp_chunk auth; + + if (!chunk->auth_chunk) + return true; + + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo + * is supposed to be authenticated and we have to do delayed + * authentication. We've just recreated the association using + * the information in the cookie and now it's much easier to + * do the authentication. + */ + + /* Make sure that we and the peer are AUTH capable */ + if (!net->sctp.auth_enable || !asoc->peer.auth_capable) + return false; + + /* set-up our fake chunk so that we can process it */ + auth.skb = chunk->auth_chunk; + auth.asoc = chunk->asoc; + auth.sctp_hdr = chunk->sctp_hdr; + auth.chunk_hdr = (struct sctp_chunkhdr *) + skb_push(chunk->auth_chunk, + sizeof(struct sctp_chunkhdr)); + skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); + auth.transport = chunk->transport; + + return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR; +} + /* * Respond to a normal COOKIE ECHO chunk. * We are the side that is being asked for an association. @@ -755,37 +784,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, if (error) goto nomem_init; - /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo - * is supposed to be authenticated and we have to do delayed - * authentication. We've just recreated the association using - * the information in the cookie and now it's much easier to - * do the authentication. - */ - if (chunk->auth_chunk) { - struct sctp_chunk auth; - enum sctp_ierror ret; - - /* Make sure that we and the peer are AUTH capable */ - if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { - sctp_association_free(new_asoc); - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - } - - /* set-up our fake chunk so that we can process it */ - auth.skb = chunk->auth_chunk; - auth.asoc = chunk->asoc; - auth.sctp_hdr = chunk->sctp_hdr; - auth.chunk_hdr = (struct sctp_chunkhdr *) - skb_push(chunk->auth_chunk, - sizeof(struct sctp_chunkhdr)); - skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr)); - auth.transport = chunk->transport; - - ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); - if (ret != SCTP_IERROR_NO_ERROR) { - sctp_association_free(new_asoc); - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - } + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } repl = sctp_make_cookie_ack(new_asoc, chunk); @@ -1755,13 +1756,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a( GFP_ATOMIC)) goto nomem; + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Make sure no new addresses are being added during the * restart. Though this is a pretty complicated attack * since you'd have to get inside the cookie. */ - if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) return SCTP_DISPOSITION_CONSUME; - } /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes * the peer has restarted (Action A), it MUST NOT setup a new @@ -1867,6 +1870,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b( GFP_ATOMIC)) goto nomem; + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Update the content of current association. */ sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -1961,6 +1967,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( * a COOKIE ACK. */ + if (!sctp_auth_chunk_verify(net, chunk, asoc)) + return SCTP_DISPOSITION_DISCARD; + /* Don't accidentally move back into established state. */ if (asoc->state < SCTP_STATE_ESTABLISHED) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -2000,7 +2009,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d( } } - repl = sctp_make_cookie_ack(new_asoc, chunk); + repl = sctp_make_cookie_ack(asoc, chunk); if (!repl) goto nomem; @@ -4111,10 +4120,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast( * The return value is the disposition of the chunk. */ static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const union sctp_subtype type, struct sctp_chunk *chunk) { struct sctp_authhdr *auth_hdr; @@ -4212,7 +4218,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net, commands); auth_hdr = (struct sctp_authhdr *)chunk->skb->data; - error = sctp_sf_authenticate(net, ep, asoc, type, chunk); + error = sctp_sf_authenticate(asoc, chunk); switch (error) { case SCTP_IERROR_AUTH_BAD_HMAC: /* Generate the ERROR chunk and discard the rest diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 5447228bf1a014ad6c95b80355bb1c3827800db7..8538c96c96c113efabe52aceeabc3e7e3adc8900 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -717,7 +717,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, return event; fail_mark: - sctp_chunk_put(chunk); kfree_skb(skb); fail: return NULL; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 7166e7ecbe861d94ba99bd97885bea3d3692ced2..f04a037dc96774e7f2cd94fce15d373d411795f1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -174,6 +174,7 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, lnk = &lgr->lnk[SMC_SINGLE_LINK]; /* initialize link */ + lnk->link_id = SMC_SINGLE_LINK; lnk->smcibdev = smcibdev; lnk->ibport = ibport; lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 9033b8a36fe17fe9bd72e8fe9bd52c5dcf7e4290..4410d007151542c8dfc12e57f2c942662b9958d3 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -23,6 +23,8 @@ #include "smc_wr.h" #include "smc.h" +#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ + #define SMC_QP_MIN_RNR_TIMER 5 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ @@ -435,9 +437,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) { struct ib_cq_init_attr cqattr = { - .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 }; + .cqe = SMC_MAX_CQE, .comp_vector = 0 }; + int cqe_size_order, smc_order; long rc; + /* the calculated number of cq entries fits to mlx5 cq allocation */ + cqe_size_order = cache_line_size() == 128 ? 7 : 6; + smc_order = MAX_ORDER - cqe_size_order - 1; + if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) + cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, smc_wr_tx_cq_handler, NULL, smcibdev, &cqattr); diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 92fe4cc8c82c2f0e860c40e40e61e1fa1ccea427..b4aa4fcedb96e5feeaa29c1126589502480b5fe2 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -92,7 +92,7 @@ int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[], memcpy(confllc->sender_mac, mac, ETH_ALEN); memcpy(confllc->sender_gid, gid, SMC_GID_SIZE); hton24(confllc->sender_qp_num, link->roce_qp->qp_num); - /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */ + confllc->link_num = link->link_id; memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE); confllc->max_links = SMC_LINKS_PER_LGR_MAX; /* send llc message */ diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 74568cdbca7087532c20b891496edeb467b13829..d7b88b2d1b224195b2d82523c047052c67f2e1eb 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -245,40 +245,45 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, struct nlattr *tb[]) { - char *string, *ibname = NULL; - int rc = 0; + char *string, *ibname; + int rc; memset(pnetelem, 0, sizeof(*pnetelem)); INIT_LIST_HEAD(&pnetelem->list); - if (tb[SMC_PNETID_NAME]) { - string = (char *)nla_data(tb[SMC_PNETID_NAME]); - if (!smc_pnetid_valid(string, pnetelem->pnet_name)) { - rc = -EINVAL; - goto error; - } - } - if (tb[SMC_PNETID_ETHNAME]) { - string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); - pnetelem->ndev = dev_get_by_name(net, string); - if (!pnetelem->ndev) - return -ENOENT; - } - if (tb[SMC_PNETID_IBNAME]) { - ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); - ibname = strim(ibname); - pnetelem->smcibdev = smc_pnet_find_ib(ibname); - if (!pnetelem->smcibdev) { - rc = -ENOENT; - goto error; - } - } - if (tb[SMC_PNETID_IBPORT]) { - pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); - if (pnetelem->ib_port > SMC_MAX_PORTS) { - rc = -EINVAL; - goto error; - } - } + + rc = -EINVAL; + if (!tb[SMC_PNETID_NAME]) + goto error; + string = (char *)nla_data(tb[SMC_PNETID_NAME]); + if (!smc_pnetid_valid(string, pnetelem->pnet_name)) + goto error; + + rc = -EINVAL; + if (!tb[SMC_PNETID_ETHNAME]) + goto error; + rc = -ENOENT; + string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); + pnetelem->ndev = dev_get_by_name(net, string); + if (!pnetelem->ndev) + goto error; + + rc = -EINVAL; + if (!tb[SMC_PNETID_IBNAME]) + goto error; + rc = -ENOENT; + ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); + ibname = strim(ibname); + pnetelem->smcibdev = smc_pnet_find_ib(ibname); + if (!pnetelem->smcibdev) + goto error; + + rc = -EINVAL; + if (!tb[SMC_PNETID_IBPORT]) + goto error; + pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); + if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS) + goto error; + return 0; error: @@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) void *hdr; int rc; + if (!info->attrs[SMC_PNETID_NAME]) + return -EINVAL; pnetelem = smc_pnet_find_pnetid( (char *)nla_data(info->attrs[SMC_PNETID_NAME])); if (!pnetelem) @@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) { + if (!info->attrs[SMC_PNETID_NAME]) + return -EINVAL; return smc_pnet_remove_by_pnetid( (char *)nla_data(info->attrs[SMC_PNETID_NAME])); } diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 2acf12b060639a409e5d5f0ad98e6d48aeab0da9..c307402e67d653408b81d8de78e99a766bd51244 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -19,7 +19,6 @@ #include "smc.h" #include "smc_core.h" -#define SMC_WR_MAX_CQE 32768 /* max. # of completion queue elements */ #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 282361ac0263248188c0e792aa43baf0183429d1..ffb1a3a69bdd99d34d1cc29c2176b118a1ebe2f0 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -87,6 +87,7 @@ int tls_push_sg(struct sock *sk, size = sg->length - offset; offset += sg->offset; + ctx->in_tcp_sendpages = true; while (1) { if (sg_is_last(sg)) sendpage_flags = flags; @@ -107,6 +108,7 @@ int tls_push_sg(struct sock *sk, offset -= sg->offset; ctx->partially_sent_offset = offset; ctx->partially_sent_record = (void *)sg; + ctx->in_tcp_sendpages = false; return ret; } @@ -121,6 +123,8 @@ int tls_push_sg(struct sock *sk, } clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); + ctx->in_tcp_sendpages = false; + ctx->sk_write_space(sk); return 0; } @@ -190,6 +194,10 @@ static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); + /* We are already sending pages, ignore notification */ + if (ctx->in_tcp_sendpages) + return; + if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { gfp_t sk_allocation = sk->sk_allocation; int rc; @@ -291,7 +299,8 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, goto out; } lock_sock(sk); - memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, + memcpy(crypto_info_aes_gcm_128->iv, + ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); release_sock(sk); if (copy_to_user(optval, diff --git a/net/wireless/core.c b/net/wireless/core.c index 33ce0484b2a03f9b31858a06257f176fdf6bb3a7..45cbade9ad68cfdd4ecc198b0abced521d8ed974 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev, ASSERT_RTNL(); + if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN) + return -EINVAL; + /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index fa42a435943d5ee958c2def1dc2570efcab0e874..7ae58969a6511dd21903d83653b5fead1391d6d6 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6621,6 +6621,86 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev) return regulatory_pre_cac_allowed(wdev->wiphy); } +static int +nl80211_check_scan_flags(struct wiphy *wiphy, struct wireless_dev *wdev, + void *request, struct nlattr **attrs, + bool is_sched_scan) +{ + u8 *mac_addr, *mac_addr_mask; + u32 *flags; + enum nl80211_feature_flags randomness_flag; + + if (!attrs[NL80211_ATTR_SCAN_FLAGS]) + return 0; + + if (is_sched_scan) { + struct cfg80211_sched_scan_request *req = request; + + randomness_flag = wdev ? + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR : + NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + flags = &req->flags; + mac_addr = req->mac_addr; + mac_addr_mask = req->mac_addr_mask; + } else { + struct cfg80211_scan_request *req = request; + + randomness_flag = NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + flags = &req->flags; + mac_addr = req->mac_addr; + mac_addr_mask = req->mac_addr_mask; + } + + *flags = nla_get_u32(attrs[NL80211_ATTR_SCAN_FLAGS]); + + if (((*flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && + !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) || + ((*flags & NL80211_SCAN_FLAG_LOW_SPAN) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_LOW_SPAN_SCAN)) || + ((*flags & NL80211_SCAN_FLAG_LOW_POWER) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_LOW_POWER_SCAN)) || + ((*flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN))) + return -EOPNOTSUPP; + + if (*flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + int err; + + if (!(wiphy->features & randomness_flag) || + (wdev && wdev->current_bss)) + return -EOPNOTSUPP; + + err = nl80211_parse_random_mac(attrs, mac_addr, mac_addr_mask); + if (err) + return err; + } + + if ((*flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME)) + return -EOPNOTSUPP; + + if ((*flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP)) + return -EOPNOTSUPP; + + if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION)) + return -EOPNOTSUPP; + + if ((*flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE) && + !wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE)) + return -EOPNOTSUPP; + + return 0; +} + static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; @@ -6826,34 +6906,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]); } - if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { - request->flags = nla_get_u32( - info->attrs[NL80211_ATTR_SCAN_FLAGS]); - if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && - !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) { - err = -EOPNOTSUPP; - goto out_free; - } - - if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - if (!(wiphy->features & - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR)) { - err = -EOPNOTSUPP; - goto out_free; - } - - if (wdev->current_bss) { - err = -EOPNOTSUPP; - goto out_free; - } - - err = nl80211_parse_random_mac(info->attrs, - request->mac_addr, - request->mac_addr_mask); - if (err) - goto out_free; - } - } + err = nl80211_check_scan_flags(wiphy, wdev, request, info->attrs, + false); + if (err) + goto out_free; request->no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); @@ -7301,37 +7357,9 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, request->ie_len); } - if (attrs[NL80211_ATTR_SCAN_FLAGS]) { - request->flags = nla_get_u32( - attrs[NL80211_ATTR_SCAN_FLAGS]); - if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && - !(wiphy->features & NL80211_FEATURE_LOW_PRIORITY_SCAN)) { - err = -EOPNOTSUPP; - goto out_free; - } - - if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - u32 flg = NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR; - - if (!wdev) /* must be net-detect */ - flg = NL80211_FEATURE_ND_RANDOM_MAC_ADDR; - - if (!(wiphy->features & flg)) { - err = -EOPNOTSUPP; - goto out_free; - } - - if (wdev && wdev->current_bss) { - err = -EOPNOTSUPP; - goto out_free; - } - - err = nl80211_parse_random_mac(attrs, request->mac_addr, - request->mac_addr_mask); - if (err) - goto out_free; - } - } + err = nl80211_check_scan_flags(wiphy, wdev, request, attrs, true); + if (err) + goto out_free; if (attrs[NL80211_ATTR_SCHED_SCAN_DELAY]) request->delay = @@ -8939,8 +8967,14 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_USE_MFP]) { connect.mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); + if (connect.mfp == NL80211_MFP_OPTIONAL && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_MFP_OPTIONAL)) + return -EOPNOTSUPP; + if (connect.mfp != NL80211_MFP_REQUIRED && - connect.mfp != NL80211_MFP_NO) + connect.mfp != NL80211_MFP_NO && + connect.mfp != NL80211_MFP_OPTIONAL) return -EINVAL; } else { connect.mfp = NL80211_MFP_NO; @@ -15147,7 +15181,8 @@ void cfg80211_ft_event(struct net_device *netdev, if (!ft_event->target_ap) return; - msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL); + msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len, + GFP_KERNEL); if (!msg) return; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 5b2409746ae0a14c491cc8dfd316facc5cd67347..9f492dc417d59f09c2378ce1e136ddf1ad7d6e79 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -26,6 +26,12 @@ struct xfrm_trans_tasklet { }; struct xfrm_trans_cb { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); }; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 73ad8c8ef344ac4bb6229b49c0b14b099e5fdf13..35610cc881a9a388efa6081d16690efebe4b3614 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -285,8 +285,9 @@ void xfrm_local_error(struct sk_buff *skb, int mtu) return; afinfo = xfrm_state_get_afinfo(proto); - if (afinfo) + if (afinfo) { afinfo->local_error(skb, mtu); - rcu_read_unlock(); + rcu_read_unlock(); + } } EXPORT_SYMBOL_GPL(xfrm_local_error); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7d17c207fc8aa6f4729c467dfde5c7afe888c747..9c57d6a5816cc4c8d863387a8351e22bad424ede 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1459,10 +1459,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, static int xfrm_get_tos(const struct flowi *fl, int family) { const struct xfrm_policy_afinfo *afinfo; - int tos = 0; + int tos; afinfo = xfrm_policy_get_afinfo(family); - tos = afinfo ? afinfo->get_tos(fl) : 0; + if (!afinfo) + return 0; + + tos = afinfo->get_tos(fl); rcu_read_unlock(); diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 02501817227bebf58fc24a2e808f0b51b15fa28b..bdb9b5121ba888133c149e8474ad1913b254d288 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -658,7 +658,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff } else { XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; - xo->seq.low = oseq = oseq + 1; + xo->seq.low = oseq + 1; xo->seq.hi = oseq_hi; oseq += skb_shinfo(skb)->gso_segs; } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 8f13fb57eab5a25d300b04a986c2375a8986b75c..6c4ec69e11a0062a298d21983dbf05ed8eb2dfb7 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1345,6 +1345,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, if (orig->aead) { x->aead = xfrm_algo_aead_clone(orig->aead); + x->geniv = orig->geniv; if (!x->aead) goto error; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3badd2c753e17da45e2cb0c799a93fd67eba9cc9..dbfcfefd6d69309027c2ccf5871894dcbe5b4fa4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1753,10 +1753,6 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, struct sk_buff *skb; int err; - err = verify_policy_dir(dir); - if (err) - return ERR_PTR(err); - skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); @@ -2278,10 +2274,6 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); struct xfrm_encap_tmpl *encap = NULL; - err = verify_policy_dir(pi->dir); - if (err) - return err; - if (attrs[XFRMA_MIGRATE] == NULL) return -EINVAL; @@ -2415,11 +2407,6 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, { struct net *net = &init_net; struct sk_buff *skb; - int err; - - err = verify_policy_dir(dir); - if (err) - return err; skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap), GFP_ATOMIC); @@ -3089,11 +3076,6 @@ static int xfrm_notify_policy_flush(const struct km_event *c) static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { - int err; - - err = verify_policy_dir(dir); - if (err) - return err; switch (c->event) { case XFRM_MSG_NEWPOLICY: diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh index 513da1a4a2daaf7ed329b816c49d787916e72e3b..d67830e6e3604f18ef073ca80727af82122d7cde 100755 --- a/scripts/adjust_autoksyms.sh +++ b/scripts/adjust_autoksyms.sh @@ -84,6 +84,13 @@ while read sympath; do depfile="include/config/ksym/${sympath}.h" mkdir -p "$(dirname "$depfile")" touch "$depfile" + # Filesystems with coarse time precision may create timestamps + # equal to the one from a file that was very recently built and that + # needs to be rebuild. Let's guard against that by making sure our + # dep files are always newer than the first file we created here. + while [ ! "$depfile" -nt "$new_ksyms_file" ]; do + touch "$depfile" + done echo $((count += 1)) done | tail -1 ) changed=${changed:-0} diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 0bc87473f68f817b81640591f75f18c3b068351c..e15159d0a884e40ea1a1cd0d4e4c5a5c9ae590b9 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -313,7 +313,7 @@ fi # Build kernel header package (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles" -(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles" +(cd $srctree; find arch/*/include include scripts -type f -o -type l) >> "$objtree/debian/hdrsrcfiles" (cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles" (cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles" if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c index 06554c448dce886d3bd7a01a745a78ab5fd734e8..9676c8887da9b7aab62e758f56872b459a3cf767 100644 --- a/security/integrity/digsig.c +++ b/security/integrity/digsig.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 35ef69312811377125ff03923c5956ed6ab878af..6a8f67714c831570a674c2bde566191a544906fc 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -10,6 +10,7 @@ config IMA select CRYPTO_HASH_INFO select TCG_TPM if HAS_IOMEM && !UML select TCG_TIS if TCG_TPM && X86 + select TCG_CRB if TCG_TPM && ACPI select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES help The Trusted Computing Group(TCG) runtime Integrity diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 802d5d20f36fe46ecb787163f4dafd532741c78b..90453aa1c81328155bd2c2d7459580b6408d837d 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -78,6 +78,8 @@ int __init ima_init_crypto(void) hash_algo_name[ima_hash_algo], rc); return rc; } + pr_info("Allocated hash algorithm: %s\n", + hash_algo_name[ima_hash_algo]); return 0; } diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index ab70a395f4903ead18db22b0ad7dd7aaba8ac4a6..7e334fd31c154380f83c88fb0b6bd9c73d50efcf 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -16,6 +16,9 @@ * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -427,6 +430,16 @@ static int __init init_ima(void) ima_init_template_list(); hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); + + if (error && strcmp(hash_algo_name[ima_hash_algo], + CONFIG_IMA_DEFAULT_HASH) != 0) { + pr_info("Allocating %s failed, going to use default hash algorithm %s\n", + hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH); + hash_setup_done = 0; + hash_setup(CONFIG_IMA_DEFAULT_HASH); + error = ima_init(); + } + if (!error) { ima_initialized = 1; ima_update_policy_flag(); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index c9c031e3d1ae86273bab92b1f1f5b6b2f9a09d0b..b275743e23cc1d919d1049f873d8d92319f9d2a2 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, scontext_len, &context, def_sid); if (rc == -EINVAL && force) { context.str = str; - context.len = scontext_len; + context.len = strlen(str) + 1; str = NULL; } else if (rc) goto out_unlock; diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index a848836a5de0468534d5eecfd24adf4bc743f9f2..507fd5210c1cd54d764e718ab00c7c657857ad6c 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file, if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) || copy_from_user(&data->type, &data32->type, 3 * sizeof(u32))) goto error; - if (get_user(data->owner, &data32->owner) || - get_user(data->type, &data32->type)) + if (get_user(data->owner, &data32->owner)) goto error; switch (data->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: diff --git a/sound/core/timer.c b/sound/core/timer.c index c8b8d7a01917f7e9a9a6b4a52b745b0408c32623..626f47b322cc30af81fce8db4c655ee0b76194a3 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) else timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : - SNDRV_TIMER_EVENT_CONTINUE); + SNDRV_TIMER_EVENT_PAUSE); unlock: spin_unlock_irqrestore(&timer->lock, flags); return result; @@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : - SNDRV_TIMER_EVENT_CONTINUE); + SNDRV_TIMER_EVENT_PAUSE); spin_unlock(&timeri->timer->lock); } spin_unlock_irqrestore(&slave_active_lock, flags); diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index 8632301489fa66e9973b40330ac28a75026c20af..b67de2bb06a2f3ac6953ca13b8c22e43511a8d78 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave) return -ENOMEM; uctl->id = slave->slave.id; err = slave->slave.get(&slave->slave, uctl); + if (err < 0) + goto error; for (ch = 0; ch < slave->info.count; ch++) slave->vals[ch] = uctl->value.integer.value[ch]; + error: kfree(uctl); - return 0; + return err < 0 ? err : 0; } /* get the slave ctl info and save the initial values */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 738e1fe903120791526ad2fcd3b4eaa18f017c44..62fbdbe74b938d6bd16560db23211dbbc6e08035 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_blacklist[] = { SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), + /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */ + SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0), /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), {} diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 3abf82563408fde68c8d5398ef6cf1f7592c9b42..cf3b905b4eadfa793b9248cdaf7a667ecc6165c9 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -801,12 +801,7 @@ static int hdmi_codec_probe(struct platform_device *pdev) static int hdmi_codec_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct hdmi_codec_priv *hcp; - - hcp = dev_get_drvdata(dev); - kfree(hcp->chmap_info); - snd_soc_unregister_codec(dev); + snd_soc_unregister_codec(&pdev->dev); return 0; } diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c index a086c35f91bb94bfe879ebc743fc81177e787118..79a9fdf94d3840df55b207b2c839e2c8e9af47e5 100644 --- a/sound/soc/intel/common/sst-firmware.c +++ b/sound/soc/intel/common/sst-firmware.c @@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst) struct sst_pdata *sst_pdata = sst->pdata; struct sst_dma *dma; struct resource mem; - const char *dma_dev_name; int ret = 0; if (sst->pdata->resindex_dma_base == -1) @@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst) * is attached to the ADSP IP. */ switch (sst->pdata->dma_engine) { case SST_DMA_TYPE_DW: - dma_dev_name = "dw_dmac"; break; default: dev_err(sst->dev, "error: invalid DMA engine %d\n", diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig index b0825370d262f43730a9e96afd8a6d8239579797..957046ac6c8ca1438dde183a0382489f5ccd8ef8 100644 --- a/sound/soc/rockchip/Kconfig +++ b/sound/soc/rockchip/Kconfig @@ -56,6 +56,9 @@ config SND_SOC_RK3288_HDMI_ANALOG depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP select SND_SOC_ROCKCHIP_I2S select SND_SOC_HDMI_CODEC + select SND_SOC_ES8328_I2C + select SND_SOC_ES8328_SPI if SPI_MASTER + select DRM_DW_HDMI_I2S_AUDIO if DRM_DW_HDMI help Say Y or M here if you want to add support for SoC audio on Rockchip RK3288 boards using an analog output and the built-in HDMI audio. diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 10a4da06c0a1477388758f30b2aaf61ae8a5ef12..f058f2bdd519cda894faa89d56b320310182008f 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -653,8 +653,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, tmp |= mod_slave; break; case SND_SOC_DAIFMT_CBS_CFS: - /* Set default source clock in Master mode */ - if (i2s->rclk_srcrate == 0) + /* + * Set default source clock in Master mode, only when the + * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any + * clock configuration assigned in DT is not overwritten. + */ + if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, 0, SND_SOC_CLOCK_IN); break; @@ -878,6 +882,11 @@ static int config_setup(struct i2s_dai *i2s) return 0; if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { + struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; + + if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc)) + i2s->rclk_srcrate = clk_get_rate(rclksrc); + psr = i2s->rclk_srcrate / i2s->frmclk / rfs; writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); dev_dbg(&i2s->pdev->dev, diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c index 44b6de5a331aeb0e9b21b4c67697baff3b0f31d7..06a31a9585a05afa8d605bde5afbfd7d5c86a384 100644 --- a/sound/soc/samsung/odroid.c +++ b/sound/soc/samsung/odroid.c @@ -36,23 +36,26 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream, { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card); - unsigned int pll_freq, rclk_freq; + unsigned int pll_freq, rclk_freq, rfs; int ret; switch (params_rate(params)) { - case 32000: case 64000: - pll_freq = 131072006U; + pll_freq = 196608001U; + rfs = 384; break; case 44100: case 88200: case 176400: pll_freq = 180633609U; + rfs = 512; break; + case 32000: case 48000: case 96000: case 192000: pll_freq = 196608001U; + rfs = 512; break; default: return -EINVAL; @@ -67,7 +70,7 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream, * frequency values due to the EPLL output frequency not being exact * multiple of the audio sampling rate. */ - rclk_freq = params_rate(params) * 256 + 1; + rclk_freq = params_rate(params) * rfs + 1; ret = clk_set_rate(priv->sclk_i2s, rclk_freq); if (ret < 0) diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 782c580b7aa32e53eda7a7a46db832f4c6b1d6a6..e5049fbfc4f1770db65f01d2eb6495d55413557a 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1276,6 +1276,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( kfree(sm); continue; } + + /* create any TLV data */ + soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr); } return kc; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 00fc481081ff4d17a4709dfa9b8cd1cd2a1be877..ff8848c18835ccada618ccba0b395616b3084e97 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -981,6 +981,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; + case USB_ID(0x0d8c, 0x0103): + if (!strcmp(kctl->id.name, "PCM Playback Volume")) { + usb_audio_info(chip, + "set volume quirk for CM102-A+/102S+\n"); + cval->min = -256; + } + break; + case USB_ID(0x0471, 0x0101): case USB_ID(0x0471, 0x0104): case USB_ID(0x0471, 0x0105): diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 58f94f399efb01f078c27ae80c14648035de0796..ad14d6b78bdcfc6f8ee19d3c4202d7787fd2d7b5 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1155,24 +1155,27 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) return false; } -/* Marantz/Denon USB DACs need a vendor cmd to switch +/* ITF-USB DSD based DACs need a vendor cmd to switch * between PCM and native DSD mode + * (2 altsets version) */ -static bool is_marantz_denon_dac(unsigned int id) +static bool is_itf_usb_dsd_2alts_dac(unsigned int id) { switch (id) { case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ + case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */ return true; } return false; } -/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch - * between PCM/DOP and native DSD mode +/* ITF-USB DSD based DACs need a vendor cmd to switch + * between PCM and native DSD mode + * (3 altsets version) */ -static bool is_teac_dsd_dac(unsigned int id) +static bool is_itf_usb_dsd_3alts_dac(unsigned int id) { switch (id) { case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */ @@ -1189,7 +1192,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, struct usb_device *dev = subs->dev; int err; - if (is_marantz_denon_dac(subs->stream->chip->usb_id)) { + if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) { /* First switch to alt set 0, otherwise the mode switch cmd * will not be accepted by the DAC */ @@ -1210,7 +1213,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, break; } mdelay(20); - } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) { + } else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) { /* Vendor mode switch cmd is required. */ switch (fmt->altsetting) { case 3: /* DSD mode (DSD_U32) requested */ @@ -1306,10 +1309,10 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); - /* Marantz/Denon devices with USB DAC functionality need a delay + /* ITF-USB DSD based DACs functionality need a delay * after each class compliant request */ - if (is_marantz_denon_dac(chip->usb_id) + if (is_itf_usb_dsd_2alts_dac(chip->usb_id) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); @@ -1396,14 +1399,14 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, break; } - /* Denon/Marantz devices with USB DAC functionality */ - if (is_marantz_denon_dac(chip->usb_id)) { + /* ITF-USB DSD based DACs (2 altsets version) */ + if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) { if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; } - /* TEAC devices with USB DAC functionality */ - if (is_teac_dsd_dac(chip->usb_id)) { + /* ITF-USB DSD based DACs (3 altsets version) */ + if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) { if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; } diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 1f57bbe82b6fb8582c2a3a1617345266c22e33e8..df24fc8da1bc22e1a437a64dcf36a91f842f9424 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -180,6 +180,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 51149ec75fe480b324fd74d2697579a936438fdc..9f74ce5899f007b1bda2b0f221bc6db73f4542aa 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -200,6 +200,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 61d6049f4c1eced70ad3fef32f99b3a438da3eab..8aaec831053af0bf4e6dcb6a8fda63737f65a93d 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -607,6 +607,8 @@ struct kvm_ppc_rmmu_info { #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) +#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 9ad172dcd912d5763b0bf954617c9e398ad31aa8..a3938db010f77a69b53924e7bf9252e8a12fb0a4 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -228,6 +228,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_RICCB (1UL << 7) #define KVM_SYNC_FPRS (1UL << 8) #define KVM_SYNC_GSCB (1UL << 9) +#define KVM_SYNC_BPBC (1UL << 10) /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 #define SDNXL (1UL << SDNXC) @@ -251,7 +252,9 @@ struct kvm_sync_regs { }; __u8 reserved[512]; /* for future vector expansion */ __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ - __u8 padding1[52]; /* riccb needs to be 64byte aligned */ + __u8 bpbc : 1; /* bp mode */ + __u8 reserved2 : 7; + __u8 padding1[51]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ union { diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 793690fbda3625defd130262db1e94b57152c0bc..403e97d5e24322775dc01953ef32f8f4e3dd9276 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,173 +13,176 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 18 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 19 /* N 32-bit words worth of info */ +#define NBUGINTS 1 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used * in /proc/cpuinfo instead of the macro name. If the string is "", * this feature bit is not displayed in /proc/cpuinfo at all. + * + * When adding new features here that depend on other features, + * please update the table in kernel/cpu/cpuid-deps.c as well. */ -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ -#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ -#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ - /* (plus FCMOVcc, FCOMI with FPU) */ -#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ -#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ -#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ -#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ -#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ -#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ -#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ -#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ -#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ +/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */ +#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ +#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ +#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ +#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ +#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ +#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ +#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ +#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ +#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ +#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ +#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ +#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ +#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ +#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ +#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */ +#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ +#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ +#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ +#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ +#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ +#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ +#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ +#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ +#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ +#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ +#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ +#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ +#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ +#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ +#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ /* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ -#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ -#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ +#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ +#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */ +#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ +#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ +#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ +#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ +#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ +#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */ +#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */ +#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */ /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ +#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ +#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ +#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ /* Other features, Linux-defined mapping, word 3 */ /* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ -#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ -#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ -#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ -#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ -#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ -#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ -#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ -#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ -#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ -#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ -#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ -#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ -#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ -#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ -#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ -#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ -#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ -#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ -#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ -#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ +#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ +#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ +#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ +#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ + +/* CPU types for specific tunings: */ +#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ +#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ +#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ +#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ +#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ +#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */ +#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */ +#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ +#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ +#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ +#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ +#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ +#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ +#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */ +#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ +#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ +#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ +#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ +#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ +#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ +#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */ +#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */ +#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */ +#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ +#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ -#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ -#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ -#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ -#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ -#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ -#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ -#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ -#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ -#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ -#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ -#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ -#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ -#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ -#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ -#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ -#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ -#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ -#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ -#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ -#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ -#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ -#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ -#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ -#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ -#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ -#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ -#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ +/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */ +#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ +#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ +#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ +#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */ +#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */ +#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ +#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */ +#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ +#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ +#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ +#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ +#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ +#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */ +#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ +#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */ +#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ +#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ +#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ +#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ +#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */ +#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ +#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ +#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */ +#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ +#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */ +#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */ +#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */ +#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */ +#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ -#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ -#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ -#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ -#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ +#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ +#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ +#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ +#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ +#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ +#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ +#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ +#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ -#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ -#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ -#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ -#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ -#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ -#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ -#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ -#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ -#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ -#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ -#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ -#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ -#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ -#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ -#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ -#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ -#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ -#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ -#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ -#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ -#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ -#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ -#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ +/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ +#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */ +#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ +#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ +#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ +#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ +#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ +#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ +#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ +#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ +#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ +#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ +#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ +#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ +#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ +#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */ +#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ +#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */ +#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */ +#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */ +#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */ +#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */ +#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -187,146 +190,185 @@ * * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ -#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ -#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ -#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ -#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ -#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ - -#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ -#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */ +#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ +#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ +#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ +#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ +#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ +#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ +#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ +#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ +#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ -#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ -#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ -#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ +#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */ +#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ -#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ +#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ +#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */ +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */ +#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */ +#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */ +#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */ /* Virtualization flags: Linux defined, word 8 */ -#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ -#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ -#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ -#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ -#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ -#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ -#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ -#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ -#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ -#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ -#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ -#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ -#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ -#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ -#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ -#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ -#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ -#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ -#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ -#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ -#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ -#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ -#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ -#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ -#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ -#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ -#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ -#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ -#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ +#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ +#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */ +#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ +#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ +#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ +#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ +#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ +#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ +#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ +#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ +#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ +#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ +#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ +#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ +#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */ +#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ +#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ +#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */ +#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ +#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ +#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ -/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ -#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ -#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ -#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ -#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ +/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */ +#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */ +#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */ +#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ +#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ -#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ -/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ -#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ -#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ -#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ +#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ +#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ -/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ -#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ -#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ +/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ +#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ +#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ +#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ +#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ +#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ +#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ -/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ -#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ -#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ -#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ -#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ -#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ -#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ -#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ -/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ -#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ -#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ -#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ -#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ -#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ -#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ -#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ +/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ +#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ +#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ +#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ -/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ -#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ -#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ -#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ -#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ -#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ +#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ +#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ +#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ +#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ +#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ +#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ +#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ +#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ +#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ +#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ +#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ +#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ -/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ -#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ -#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ +/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ +#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ +#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */ +#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */ + +/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ +#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ +#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ +#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ +#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ +#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */ /* * BUG word(s) */ -#define X86_BUG(x) (NCAPINTS*32 + (x)) +#define X86_BUG(x) (NCAPINTS*32 + (x)) -#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ -#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ -#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ -#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ -#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ -#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ -#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ -#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ +#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ +#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ +#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ +#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ +#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ +#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ +#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ +#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ #ifdef CONFIG_X86_32 /* * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional * to avoid confusion. */ -#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ +#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ #endif -#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ -#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ -#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ -#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ +#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ +#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ +#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ +#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ +#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ +#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index c10c9128f54e6b7296014a74e7a253a1eedaacd9..c6a3af198294e6128b623197107ac6e24cd30521 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -44,6 +44,12 @@ # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define DISABLE_PTI 0 +#else +# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -54,7 +60,7 @@ #define DISABLED_MASK4 (DISABLE_PCID) #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 -#define DISABLED_MASK7 0 +#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK10 0 @@ -65,6 +71,7 @@ #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) #define DISABLED_MASK17 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define DISABLED_MASK18 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index d91ba04dd00709b7e549ced791759c12cf70d5d0..fb3a6de7440bce69c794449ecf5740e8df924f87 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -106,6 +106,7 @@ #define REQUIRED_MASK15 0 #define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK17 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) +#define REQUIRED_MASK18 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c index 457a1521f32fe6cc6cb3493cdde8105c187c0d44..785f4e95148cf13a48458c00147162ef503d6ef6 100644 --- a/tools/hv/hv_fcopy_daemon.c +++ b/tools/hv/hv_fcopy_daemon.c @@ -23,13 +23,14 @@ #include #include #include +#include #include #include #include #include static int target_fd; -static char target_fname[W_MAX_PATH]; +static char target_fname[PATH_MAX]; static unsigned long long filesize; static int hv_start_fcopy(struct hv_start_fcopy *smsg) diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c index b2b4ebffab8ca491cb31f47c52c2e8406a546328..34031a297f0246116d7cff92e6226a8900e4c4a8 100644 --- a/tools/hv/hv_vss_daemon.c +++ b/tools/hv/hv_vss_daemon.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 7e99999d6236fa2940fa2b565442e8b1b1331407..857bad91c454046200c81e260c9850933df0981b 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_SMT_POSSIBLE 147 #define KVM_CAP_HYPERV_SYNIC2 148 #define KVM_CAP_HYPERV_VP_INDEX 149 +#define KVM_CAP_S390_BPB 152 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/objtool/check.c b/tools/objtool/check.c index c8b8b7101c6f9db8ee914f9f8cfd3bf76a2df6ba..e128d1c71c3068056ed84e7aa5f2a7fdbdd34103 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file, return next; } +static struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = list_next_entry(insn, list); + struct symbol *func = insn->func; + + if (!func) + return NULL; + + if (&next->list != &file->insn_list && next->func == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +#define func_for_each_insn_all(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + #define func_for_each_insn(file, func, insn) \ for (insn = find_insn(file, func->sec, func->offset); \ insn && &insn->list != &file->insn_list && \ @@ -148,10 +173,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, if (!strcmp(func->name, global_noreturns[i])) return 1; - if (!func->sec) + if (!func->len) return 0; - func_for_each_insn(file, func, insn) { + insn = find_insn(file, func->sec, func->offset); + if (!insn->func) + return 0; + + func_for_each_insn_all(file, func, insn) { empty = false; if (insn->type == INSN_RETURN) @@ -166,35 +195,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, * case, the function's dead-end status depends on whether the target * of the sibling call returns. */ - func_for_each_insn(file, func, insn) { - if (insn->sec != func->sec || - insn->offset >= func->offset + func->len) - break; - + func_for_each_insn_all(file, func, insn) { if (insn->type == INSN_JUMP_UNCONDITIONAL) { struct instruction *dest = insn->jump_dest; - struct symbol *dest_func; if (!dest) /* sibling call to another file */ return 0; - if (dest->sec != func->sec || - dest->offset < func->offset || - dest->offset >= func->offset + func->len) { - /* local sibling call */ - dest_func = find_symbol_by_offset(dest->sec, - dest->offset); - if (!dest_func) - continue; + if (dest->func && dest->func->pfunc != insn->func->pfunc) { + /* local sibling call */ if (recursion == 5) { - WARN_FUNC("infinite recursion (objtool bug!)", - dest->sec, dest->offset); - return -1; + /* + * Infinite recursion: two functions + * have sibling calls to each other. + * This is a very rare case. It means + * they aren't dead ends. + */ + return 0; } - return __dead_end_function(file, dest_func, + return __dead_end_function(file, dest->func, recursion + 1); } } @@ -421,7 +443,7 @@ static void add_ignores(struct objtool_file *file) if (!ignore_func(file, func)) continue; - func_for_each_insn(file, func, insn) + func_for_each_insn_all(file, func, insn) insn->ignore = true; } } @@ -781,30 +803,35 @@ static int add_special_section_alts(struct objtool_file *file) return ret; } -static int add_switch_table(struct objtool_file *file, struct symbol *func, - struct instruction *insn, struct rela *table, - struct rela *next_table) +static int add_switch_table(struct objtool_file *file, struct instruction *insn, + struct rela *table, struct rela *next_table) { struct rela *rela = table; struct instruction *alt_insn; struct alternative *alt; + struct symbol *pfunc = insn->func->pfunc; + unsigned int prev_offset = 0; list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { if (rela == next_table) break; - if (rela->sym->sec != insn->sec || - rela->addend <= func->offset || - rela->addend >= func->offset + func->len) + /* Make sure the switch table entries are consecutive: */ + if (prev_offset && rela->offset != prev_offset + 8) break; - alt_insn = find_insn(file, insn->sec, rela->addend); - if (!alt_insn) { - WARN("%s: can't find instruction at %s+0x%x", - file->rodata->rela->name, insn->sec->name, - rela->addend); - return -1; - } + /* Detect function pointers from contiguous objects: */ + if (rela->sym->sec == pfunc->sec && + rela->addend == pfunc->offset) + break; + + alt_insn = find_insn(file, rela->sym->sec, rela->addend); + if (!alt_insn) + break; + + /* Make sure the jmp dest is in the function or subfunction: */ + if (alt_insn->func->pfunc != pfunc) + break; alt = malloc(sizeof(*alt)); if (!alt) { @@ -814,6 +841,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, alt->insn = alt_insn; list_add_tail(&alt->list, &insn->alts); + prev_offset = rela->offset; + } + + if (!prev_offset) { + WARN_FUNC("can't find switch jump table", + insn->sec, insn->offset); + return -1; } return 0; @@ -868,40 +902,21 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + unsigned long table_offset; - text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) { - /* case 1 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend); - if (rodata_rela) - return rodata_rela; - - /* case 2 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend + 4); - if (!rodata_rela) - return NULL; - - file->ignore_unreachables = true; - return rodata_rela; - } - - /* case 3 */ /* * Backward search using the @first_jump_src links, these help avoid * much of the 'in between' code. Which avoids us getting confused by * it. */ - for (insn = list_prev_entry(insn, list); - + for (; &insn->list != &file->insn_list && insn->sec == func->sec && insn->offset >= func->offset; insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { - if (insn->type == INSN_JUMP_DYNAMIC) + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) break; /* allow small jumps within the range */ @@ -917,18 +932,29 @@ static struct rela *find_switch_table(struct objtool_file *file, if (!text_rela || text_rela->sym != file->rodata->sym) continue; + table_offset = text_rela->addend; + if (text_rela->type == R_X86_64_PC32) + table_offset += 4; + /* * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, text_rela->addend)) + if (find_symbol_containing(file->rodata, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend); - if (!rodata_rela) - continue; + rodata_rela = find_rela_by_dest(file->rodata, table_offset); + if (rodata_rela) { + /* + * Use of RIP-relative switch jumps is quite rare, and + * indicates a rare GCC quirk/bug which can leave dead + * code behind. + */ + if (text_rela->type == R_X86_64_PC32) + file->ignore_unreachables = true; - return rodata_rela; + return rodata_rela; + } } return NULL; @@ -942,7 +968,7 @@ static int add_func_switch_tables(struct objtool_file *file, struct rela *rela, *prev_rela = NULL; int ret; - func_for_each_insn(file, func, insn) { + func_for_each_insn_all(file, func, insn) { if (!last) last = insn; @@ -973,8 +999,7 @@ static int add_func_switch_tables(struct objtool_file *file, * the beginning of another switch table in the same function. */ if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, - rela); + ret = add_switch_table(file, prev_jump, prev_rela, rela); if (ret) return ret; } @@ -984,7 +1009,7 @@ static int add_func_switch_tables(struct objtool_file *file, } if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, NULL); + ret = add_switch_table(file, prev_jump, prev_rela, NULL); if (ret) return ret; } @@ -1748,15 +1773,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, while (1) { next_insn = next_insn_same_sec(file, insn); - - if (file->c_file && func && insn->func && func != insn->func) { + if (file->c_file && func && insn->func && func != insn->func->pfunc) { WARN("%s() falls through to next function %s()", func->name, insn->func->name); return 1; } - if (insn->func) - func = insn->func; + func = insn->func ? insn->func->pfunc : NULL; if (func && insn->ignore) { WARN_FUNC("BUG: why am I validating an ignored function?", @@ -1777,7 +1800,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, i = insn; save_insn = NULL; - func_for_each_insn_continue_reverse(file, func, i) { + func_for_each_insn_continue_reverse(file, insn->func, i) { if (i->save) { save_insn = i; break; @@ -1864,7 +1887,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_UNCONDITIONAL: if (insn->jump_dest && (!func || !insn->jump_dest->func || - func == insn->jump_dest->func)) { + insn->jump_dest->func->pfunc == func)) { ret = validate_branch(file, insn->jump_dest, state); if (ret) @@ -2059,7 +2082,7 @@ static int validate_functions(struct objtool_file *file) for_each_sec(file, sec) { list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) + if (func->type != STT_FUNC || func->pfunc != func) continue; insn = find_insn(file, sec, func->offset); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index c1c338661699788c8189becaab8465ed1bdcd775..4e60e105583ee803916589ca56df0e81e12b8fb3 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_by_name(struct elf *elf, const char *name) +{ + struct section *sec; + struct symbol *sym; + + list_for_each_entry(sec, &elf->sections, list) + list_for_each_entry(sym, &sec->symbol_list, list) + if (!strcmp(sym->name, name)) + return sym; + + return NULL; +} + struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) { struct symbol *sym; @@ -203,10 +216,11 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { - struct section *symtab; - struct symbol *sym; + struct section *symtab, *sec; + struct symbol *sym, *pfunc; struct list_head *entry, *tmp; int symbols_nr, i; + char *coldstr; symtab = find_section_by_name(elf, ".symtab"); if (!symtab) { @@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf) hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } + /* Create parent/child links for any cold subfunctions */ + list_for_each_entry(sec, &elf->sections, list) { + list_for_each_entry(sym, &sec->symbol_list, list) { + if (sym->type != STT_FUNC) + continue; + sym->pfunc = sym->cfunc = sym; + coldstr = strstr(sym->name, ".cold."); + if (coldstr) { + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; + + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + } + } + } + return 0; err: diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index d86e2ff14466148d3b8ae46065274956a5b3c4f8..de5cd2ddded987bf524be46e446bd1e814422761 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,6 +61,7 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; + struct symbol *pfunc, *cfunc; }; struct rela { @@ -86,6 +87,7 @@ struct elf { struct elf *elf_open(const char *name, int flags); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index 643cc4ba6872511d7941fad057ac875f5382deb2..3e5135dded16bde491da1e711db172358f5e03c4 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -31,5 +31,6 @@ config.mak.autogen .config-detected util/intel-pt-decoder/inat-tables.c arch/*/include/generated/ +trace/beauty/generated/ pmu-events/pmu-events.c pmu-events/jevents diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 91ef44bfaf3e3891cfa22ec1b8f71ba5c2216b5a..2a858ea56a81ebd95a6c902bc131c9cbb115ec0d 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -368,7 +368,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive ifeq ($(USE_CLANG), 1) CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization - LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a)) + CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l)) + LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so)) LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group endif diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 9df0af17e9c27799ba0565e6260232c8e9d1c67d..52486c90ab936ba19400bcf542967e10a1753c06 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2185,11 +2185,16 @@ static int add_default_attributes(void) return 0; if (transaction_run) { + struct parse_events_error errinfo; + if (pmu_have_event("cpu", "cycles-ct") && pmu_have_event("cpu", "el-start")) - err = parse_events(evsel_list, transaction_attrs, NULL); + err = parse_events(evsel_list, transaction_attrs, + &errinfo); else - err = parse_events(evsel_list, transaction_limited_attrs, NULL); + err = parse_events(evsel_list, + transaction_limited_attrs, + &errinfo); if (err) { fprintf(stderr, "Cannot set up transaction events\n"); return -1; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index dd57978b20962fd495fa8f74a067d066c24727ee..3103a33c13a841f72e38c790adb7bf111f26b7d7 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1080,8 +1080,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused) { - if (!strcmp(var, "top.call-graph")) - var = "call-graph.record-mode"; /* fall-through */ + if (!strcmp(var, "top.call-graph")) { + var = "call-graph.record-mode"; + return perf_default_config(var, value, cb); + } if (!strcmp(var, "top.children")) { symbol_conf.cumulate_callchain = perf_config_bool(var, value); return 0; diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c index 260418969120fd2c4d2eff6ab3742539589c8d9c..2f008067d989e0a895a00006aa78aa1ed0349f16 100644 --- a/tools/perf/tests/dwarf-unwind.c +++ b/tools/perf/tests/dwarf-unwind.c @@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine) mmap_handler, machine, true, 500); } +/* + * We need to keep these functions global, despite the + * fact that they are used only locally in this object, + * in order to keep them around even if the binary is + * stripped. If they are gone, the unwind check for + * symbol fails. + */ +int test_dwarf_unwind__thread(struct thread *thread); +int test_dwarf_unwind__compare(void *p1, void *p2); +int test_dwarf_unwind__krava_3(struct thread *thread); +int test_dwarf_unwind__krava_2(struct thread *thread); +int test_dwarf_unwind__krava_1(struct thread *thread); + #define MAX_STACK 8 static int unwind_entry(struct unwind_entry *entry, void *arg) @@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) char *symbol = entry->sym ? entry->sym->name : NULL; static const char *funcs[MAX_STACK] = { "test__arch_unwind_sample", - "unwind_thread", - "compare", + "test_dwarf_unwind__thread", + "test_dwarf_unwind__compare", "bsearch", - "krava_3", - "krava_2", - "krava_1", + "test_dwarf_unwind__krava_3", + "test_dwarf_unwind__krava_2", + "test_dwarf_unwind__krava_1", "test__dwarf_unwind" }; /* @@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) return strcmp((const char *) symbol, funcs[idx]); } -static noinline int unwind_thread(struct thread *thread) +noinline int test_dwarf_unwind__thread(struct thread *thread) { struct perf_sample sample; unsigned long cnt = 0; @@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread) static int global_unwind_retval = -INT_MAX; -static noinline int compare(void *p1, void *p2) +noinline int test_dwarf_unwind__compare(void *p1, void *p2) { /* Any possible value should be 'thread' */ struct thread *thread = *(struct thread **)p1; @@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2) /* Call unwinder twice for both callchain orders. */ callchain_param.order = ORDER_CALLER; - global_unwind_retval = unwind_thread(thread); + global_unwind_retval = test_dwarf_unwind__thread(thread); if (!global_unwind_retval) { callchain_param.order = ORDER_CALLEE; - global_unwind_retval = unwind_thread(thread); + global_unwind_retval = test_dwarf_unwind__thread(thread); } } return p1 - p2; } -static noinline int krava_3(struct thread *thread) +noinline int test_dwarf_unwind__krava_3(struct thread *thread) { struct thread *array[2] = {thread, thread}; void *fp = &bsearch; @@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread) size_t, int (*)(void *, void *)); _bsearch = fp; - _bsearch(array, &thread, 2, sizeof(struct thread **), compare); + _bsearch(array, &thread, 2, sizeof(struct thread **), + test_dwarf_unwind__compare); return global_unwind_retval; } -static noinline int krava_2(struct thread *thread) +noinline int test_dwarf_unwind__krava_2(struct thread *thread) { - return krava_3(thread); + return test_dwarf_unwind__krava_3(thread); } -static noinline int krava_1(struct thread *thread) +noinline int test_dwarf_unwind__krava_1(struct thread *thread) { - return krava_2(thread); + return test_dwarf_unwind__krava_2(thread); } int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused) @@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu goto out; } - err = krava_1(thread); + err = test_dwarf_unwind__krava_1(thread); thread__put(thread); out: diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh index a2f757da49d9bf331307485f6c6747b72de7fe8b..73bea00f590f93ba8eac52395a42a1a87cc90082 100755 --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh @@ -21,12 +21,12 @@ trace_libc_inet_pton_backtrace() { expected[3]=".*packets transmitted.*" expected[4]="rtt min.*" expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" - expected[6]=".*inet_pton[[:space:]]\($libc\)$" + expected[6]=".*inet_pton[[:space:]]\($libc|inlined\)$" case "$(uname -m)" in s390x) eventattr='call-graph=dwarf' - expected[7]="gaih_inet[[:space:]]\(inlined\)$" - expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$" + expected[7]="gaih_inet.*[[:space:]]\($libc|inlined\)$" + expected[8]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" expected[9]="main[[:space:]]\(.*/bin/ping.*\)$" expected[10]="__libc_start_main[[:space:]]\($libc\)$" expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$" diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index f6789fb029d688fa165744a0f9bf264d3791f4ac..884cad122acf43805f53c49ac4c75b1a1c14c9c1 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest if (pair && UM(pair->start) == mem_start) { next_pair: - if (strcmp(sym->name, pair->name) == 0) { + if (arch__compare_symbol_names(sym->name, pair->name) == 0) { /* * kallsyms don't have the symbol end, so we * set that by using the next symbol start - 1, diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 8f7f59d1a2b5913c2a3027a89384011dab86c9e5..0c486d2683c4ea85927abc735e6156c8bb3fe28e 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -312,6 +312,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) struct map_symbol *ms = ab->b.priv; struct symbol *sym = ms->sym; u8 pcnt_width = annotate_browser__pcnt_width(ab); + int width = 0; /* PLT symbols contain external offsets */ if (strstr(sym->name, "@plt")) @@ -335,13 +336,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) to = (u64)btarget->idx; } + if (ab->have_cycles) + width = IPC_WIDTH + CYCLES_WIDTH; + ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS); - __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width, + __ui_browser__line_arrow(browser, + pcnt_width + 2 + ab->addr_width + width, from, to); if (is_fused(ab, cursor)) { ui_browser__mark_fused(browser, - pcnt_width + 3 + ab->addr_width, + pcnt_width + 3 + ab->addr_width + width, from - 1, to > from ? true : false); } diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index 1bfc946e37dcdaf4f3c92c361fca029df6893938..bf31ceab33bd487d0021ccd9818384dde51fd371 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp @@ -9,6 +9,7 @@ * Copyright (C) 2016 Huawei Inc. */ +#include "clang/Basic/Version.h" #include "clang/CodeGen/CodeGenAction.h" #include "clang/Frontend/CompilerInvocation.h" #include "clang/Frontend/CompilerInstance.h" @@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path, FrontendOptions& Opts = CI->getFrontendOpts(); Opts.Inputs.clear(); - Opts.Inputs.emplace_back(Path, IK_C); + Opts.Inputs.emplace_back(Path, + FrontendOptions::getInputKindForExtension("c")); return CI; } @@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags, Clang.setVirtualFileSystem(&*VFS); +#if CLANG_VERSION_MAJOR < 4 IntrusiveRefCntPtr CI = createCompilerInvocation(std::move(CFlags), Path, Clang.getDiagnostics()); Clang.setInvocation(&*CI); +#else + std::shared_ptr CI( + createCompilerInvocation(std::move(CFlags), Path, + Clang.getDiagnostics())); + Clang.setInvocation(CI); +#endif std::unique_ptr Act(new EmitLLVMOnlyAction(&*LLVMCtx)); if (!Clang.ExecuteAction(*Act)) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 097473600d943fc558140e3fde742636a2894d1d..5d420209505eb03aa6ab17d189f4fcb6cf60e237 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -878,7 +878,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter, * cumulated only one time to prevent entries more than 100% * overhead. */ - he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); + he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); if (he_cache == NULL) return -ENOMEM; @@ -1043,8 +1043,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, if (err) return err; - iter->max_stack = max_stack_depth; - err = iter->ops->prepare_entry(iter, al); if (err) goto out; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index f6630cb95effc353deba329a77dc6a87e0f6279f..b99d68943f252808acc827551f3d92aed373ab53 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -107,7 +107,6 @@ struct hist_entry_iter { int curr; bool hide_unresolved; - int max_stack; struct perf_evsel *evsel; struct perf_sample *sample; diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 30cd0b296f1a76847122f009c2cd0f2d5b9109f4..8e61aad0ca3f7ff65ca7ef640d6f9c9c0e7b75a9 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -202,6 +202,13 @@ void idr_checks(void) idr_remove(&idr, 3); idr_remove(&idr, 0); + assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); + idr_remove(&idr, 1); + for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) + assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); + idr_remove(&idr, 1 << 30); + idr_destroy(&idr); + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { struct item *item = item_create(i, 0); assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 3c9c0bbe7dbb669786f6700c8418f35edc74b6b6..ea300e7818a70cddef099096fcfe92d54b470e84 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -122,6 +122,7 @@ ifdef INSTALL_PATH BUILD_TARGET=$$BUILD/$$TARGET; \ echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \ echo "echo ========================================" >> $(ALL_SCRIPT); \ + echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \ echo "cd $$TARGET" >> $(ALL_SCRIPT); \ make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \ echo "cd \$$ROOT" >> $(ALL_SCRIPT); \ diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 8b9470b5af6d593b736574cc935bd26b4d07a057..96c6238a4a1f4e2bd6dfcbf2cfde2181c5b77e96 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data) fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, 2, map_flags); if (fd < 0) { + if (errno == ENOMEM) + return; printf("Failed to create hashmap key=%d value=%d '%s'\n", i, j, strerror(errno)); exit(1); diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc new file mode 100644 index 0000000000000000000000000000000000000000..5ba73035e1d95ad45788610668950084844d1861 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -0,0 +1,46 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: Kprobe event string type argument + +[ -f kprobe_events ] || exit_unsupported # this is configurable + +echo 0 > events/enable +echo > kprobe_events + +case `uname -m` in +x86_64) + ARG2=%si + OFFS=8 +;; +i[3456]86) + ARG2=%cx + OFFS=4 +;; +aarch64) + ARG2=%x1 + OFFS=8 +;; +arm*) + ARG2=%r1 + OFFS=4 +;; +*) + echo "Please implement other architecture here" + exit_untested +esac + +: "Test get argument (1)" +echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string" > kprobe_events +echo 1 > events/kprobes/testprobe/enable +! echo test >> kprobe_events +tail -n 1 trace | grep -qe "testprobe.* arg1=\"test\"" + +echo 0 > events/kprobes/testprobe/enable +: "Test get argument (2)" +echo "p:testprobe create_trace_kprobe arg1=+0(+0(${ARG2})):string arg2=+0(+${OFFS}(${ARG2})):string" > kprobe_events +echo 1 > events/kprobes/testprobe/enable +! echo test1 test2 >> kprobe_events +tail -n 1 trace | grep -qe "testprobe.* arg1=\"test1\" arg2=\"test2\"" + +echo 0 > events/enable +echo > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc new file mode 100644 index 0000000000000000000000000000000000000000..231bcd2c4eb59e9d3528be1e181650424fe13130 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -0,0 +1,97 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: Kprobe event argument syntax + +[ -f kprobe_events ] || exit_unsupported # this is configurable + +grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue + +echo 0 > events/enable +echo > kprobe_events + +PROBEFUNC="vfs_read" +GOODREG= +BADREG= +GOODSYM="_sdata" +if ! grep -qw ${GOODSYM} /proc/kallsyms ; then + GOODSYM=$PROBEFUNC +fi +BADSYM="deaqswdefr" +SYMADDR=0x`grep -w ${GOODSYM} /proc/kallsyms | cut -f 1 -d " "` +GOODTYPE="x16" +BADTYPE="y16" + +case `uname -m` in +x86_64|i[3456]86) + GOODREG=%ax + BADREG=%ex +;; +aarch64) + GOODREG=%x0 + BADREG=%ax +;; +arm*) + GOODREG=%r0 + BADREG=%ax +;; +esac + +test_goodarg() # Good-args +{ + while [ "$1" ]; do + echo "p ${PROBEFUNC} $1" > kprobe_events + shift 1 + done; +} + +test_badarg() # Bad-args +{ + while [ "$1" ]; do + ! echo "p ${PROBEFUNC} $1" > kprobe_events + shift 1 + done; +} + +echo > kprobe_events + +: "Register access" +test_goodarg ${GOODREG} +test_badarg ${BADREG} + +: "Symbol access" +test_goodarg "@${GOODSYM}" "@${SYMADDR}" "@${GOODSYM}+10" "@${GOODSYM}-10" +test_badarg "@" "@${BADSYM}" "@${GOODSYM}*10" "@${GOODSYM}/10" \ + "@${GOODSYM}%10" "@${GOODSYM}&10" "@${GOODSYM}|10" + +: "Stack access" +test_goodarg "\$stack" "\$stack0" "\$stack1" +test_badarg "\$stackp" "\$stack0+10" "\$stack1-10" + +: "Retval access" +echo "r ${PROBEFUNC} \$retval" > kprobe_events +! echo "p ${PROBEFUNC} \$retval" > kprobe_events + +: "Comm access" +test_goodarg "\$comm" + +: "Indirect memory access" +test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \ + "+0(\$stack1)" "+10(@${GOODSYM}-10)" "+0(+10(+20(\$stack)))" +test_badarg "+(${GOODREG})" "(${GOODREG}+10)" "-(${GOODREG})" "(${GOODREG})" \ + "+10(\$comm)" "+0(${GOODREG})+10" + +: "Name assignment" +test_goodarg "varname=${GOODREG}" +test_badarg "varname=varname2=${GOODREG}" + +: "Type syntax" +test_goodarg "${GOODREG}:${GOODTYPE}" +test_badarg "${GOODREG}::${GOODTYPE}" "${GOODREG}:${BADTYPE}" \ + "${GOODTYPE}:${GOODREG}" + +: "Combination check" + +test_goodarg "\$comm:string" "+0(\$stack):string" +test_badarg "\$comm:x64" "\$stack:string" "${GOODREG}:string" + +echo > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc new file mode 100644 index 0000000000000000000000000000000000000000..4fda01a08da463d540c2e5fefcf9ad2d21deb0e0 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc @@ -0,0 +1,43 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: Kprobe events - probe points + +[ -f kprobe_events ] || exit_unsupported # this is configurable + +TARGET_FUNC=create_trace_kprobe + +dec_addr() { # hexaddr + printf "%d" "0x"`echo $1 | tail -c 8` +} + +set_offs() { # prev target next + A1=`dec_addr $1` + A2=`dec_addr $2` + A3=`dec_addr $3` + TARGET="0x$2" # an address + PREV=`expr $A1 - $A2` # offset to previous symbol + NEXT=+`expr $A3 - $A2` # offset to next symbol + OVERFLOW=+`printf "0x%x" ${PREV}` # overflow offset to previous symbol +} + +# We have to decode symbol addresses to get correct offsets. +# If the offset is not an instruction boundary, it cause -EILSEQ. +set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs` + +UINT_TEST=no +# printf "%x" -1 returns (unsigned long)-1. +if [ `printf "%x" -1 | wc -c` != 9 ]; then + UINT_TEST=yes +fi + +echo 0 > events/enable +echo > kprobe_events +echo "p:testprobe ${TARGET_FUNC}" > kprobe_events +echo "p:testprobe ${TARGET}" > kprobe_events +echo "p:testprobe ${TARGET_FUNC}${NEXT}" > kprobe_events +! echo "p:testprobe ${TARGET_FUNC}${PREV}" > kprobe_events +if [ "${UINT_TEST}" = yes ]; then +! echo "p:testprobe ${TARGET_FUNC}${OVERFLOW}" > kprobe_events +fi +echo > kprobe_events +clear_trace diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index cea4adcd42b8877f7e5a05d57a837bcc61c1d97d..a63e8453984d2793eb38b706236f31dc49527e9b 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -12,9 +12,9 @@ all: BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ - if [ -e $$DIR/$(TEST_PROGS) ]; then - rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; - fi + if [ -e $$DIR/$(TEST_PROGS) ]; then \ + rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ + fi \ done override define RUN_TESTS diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile index 3926a0409dda3e09bc5c83c25cec31c44e622a16..36409cb7288c1eccffdaf5072a597ffa3868c001 100644 --- a/tools/testing/selftests/memfd/Makefile +++ b/tools/testing/selftests/memfd/Makefile @@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/ CFLAGS += -I../../../../usr/include/ TEST_PROGS := run_tests.sh +TEST_FILES := run_fuse_test.sh TEST_GEN_FILES := memfd_test fuse_mnt fuse_test fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config new file mode 100644 index 0000000000000000000000000000000000000000..835c7f4dadcd131b1687ed99329d22e4b42bfdeb --- /dev/null +++ b/tools/testing/selftests/memfd/config @@ -0,0 +1 @@ +CONFIG_FUSE_FS=m diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 989f917068d1ccfa1dd67ed8d2353b4b13904d2b..d4346b16b2c1ee88280307745b76f184d5ddff33 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -128,6 +128,8 @@ static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id) static void sock_fanout_set_ebpf(int fd) { + static char log_buf[65536]; + const int len_off = __builtin_offsetof(struct __sk_buff, len); struct bpf_insn prog[] = { { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 }, @@ -140,7 +142,6 @@ static void sock_fanout_set_ebpf(int fd) { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 }, { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 } }; - char log_buf[512]; union bpf_attr attr; int pfd; diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c index 35ade7406dcdbbc778dbf7f39d46fafee0b0148f..3ae77ba93208f15f0d720325c269c7a95af2c6fa 100644 --- a/tools/testing/selftests/powerpc/mm/subpage_prot.c +++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c @@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size) return 0; } +static int syscall_available(void) +{ + int rc; + + errno = 0; + rc = syscall(__NR_subpage_prot, 0, 0, 0); + + return rc == 0 || (errno != ENOENT && errno != ENOSYS); +} + int test_anon(void) { unsigned long align; @@ -145,6 +155,8 @@ int test_anon(void) void *mallocblock; unsigned long mallocsize; + SKIP_IF(!syscall_available()); + if (getpagesize() != 0x10000) { fprintf(stderr, "Kernel page size must be 64K!\n"); return 1; @@ -180,6 +192,8 @@ int test_file(void) off_t filesize; int fd; + SKIP_IF(!syscall_available()); + fd = open(file_name, O_RDWR); if (fd == -1) { perror("failed to open file"); diff --git a/tools/testing/selftests/pstore/config b/tools/testing/selftests/pstore/config index 6a8e5a9bfc1065a3e860cd5b31e7aea2d508bea8..d148f9f89fb64cf325a546a1855c1e1cc5fc8a21 100644 --- a/tools/testing/selftests/pstore/config +++ b/tools/testing/selftests/pstore/config @@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y CONFIG_PSTORE=y CONFIG_PSTORE_PMSG=y CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=m diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 0b457e8e0f0c103281b5b55006729ffac77ad8f0..e350cf3d4f901ab52c9c65446b2fa02139191e8b 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -134,11 +134,24 @@ struct seccomp_data { #endif #ifndef SECCOMP_FILTER_FLAG_TSYNC -#define SECCOMP_FILTER_FLAG_TSYNC 1 +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) #endif #ifndef SECCOMP_FILTER_FLAG_LOG -#define SECCOMP_FILTER_FLAG_LOG 2 +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#endif + +#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) +#endif + +#ifndef PTRACE_SECCOMP_GET_METADATA +#define PTRACE_SECCOMP_GET_METADATA 0x420d + +struct seccomp_metadata { + __u64 filter_off; /* Input: which filter */ + __u64 flags; /* Output: filter's flags */ +}; #endif #ifndef seccomp @@ -2063,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock) TEST(detect_seccomp_filter_flags) { unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, - SECCOMP_FILTER_FLAG_LOG }; + SECCOMP_FILTER_FLAG_LOG, + SECCOMP_FILTER_FLAG_SPEC_ALLOW }; unsigned int flag, all_flags; int i; long ret; /* Test detection of known-good filter flags */ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { + int bits = 0; + flag = flags[i]; + /* Make sure the flag is a single bit! */ + while (flag) { + if (flag & 0x1) + bits ++; + flag >>= 1; + } + ASSERT_EQ(1, bits); + flag = flags[i]; + ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); @@ -2845,6 +2870,58 @@ TEST(get_action_avail) EXPECT_EQ(errno, EOPNOTSUPP); } +TEST(get_metadata) +{ + pid_t pid; + int pipefd[2]; + char buf; + struct seccomp_metadata md; + + ASSERT_EQ(0, pipe(pipefd)); + + pid = fork(); + ASSERT_GE(pid, 0); + if (pid == 0) { + struct sock_filter filter[] = { + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), + }; + struct sock_fprog prog = { + .len = (unsigned short)ARRAY_SIZE(filter), + .filter = filter, + }; + + /* one with log, one without */ + ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, + SECCOMP_FILTER_FLAG_LOG, &prog)); + ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); + + ASSERT_EQ(0, close(pipefd[0])); + ASSERT_EQ(1, write(pipefd[1], "1", 1)); + ASSERT_EQ(0, close(pipefd[1])); + + while (1) + sleep(100); + } + + ASSERT_EQ(0, close(pipefd[1])); + ASSERT_EQ(1, read(pipefd[0], &buf, 1)); + + ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); + ASSERT_EQ(pid, waitpid(pid, NULL, 0)); + + md.filter_off = 0; + ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); + EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); + EXPECT_EQ(md.filter_off, 0); + + md.filter_off = 1; + ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md)); + EXPECT_EQ(md.flags, 0); + EXPECT_EQ(md.filter_off, 1); + + ASSERT_EQ(0, kill(pid, SIGKILL)); +} + /* * TODO: * - add microbenchmarks diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile index b3c8ba3cb66855ff93d6581f7428982be41fca60..d0121a8a3523a948af699e52b6adbf1cde37b030 100644 --- a/tools/testing/selftests/sync/Makefile +++ b/tools/testing/selftests/sync/Makefile @@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS) $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS) $(OBJS): $(OUTPUT)/%.o: %.c - $(CC) -c $^ -o $@ + $(CC) -c $^ -o $@ $(CFLAGS) $(TESTS): $(OUTPUT)/%.o: %.c $(CC) -c $^ -o $@ diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index 3d5a62ff7d31ed437fc5457a501ec9b606839f92..f5d7a7851e2177b315111f4e8ae3f0b0a716487d 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +include ../lib.mk + ifndef CROSS_COMPILE CFLAGS := -std=gnu99 CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector @@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y) LDLIBS += -lgcc_s endif -TEST_PROGS := vdso_test vdso_standalone_test_x86 +TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86 all: $(TEST_PROGS) -vdso_test: parse_vdso.c vdso_test.c -vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c +$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c +$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ vdso_standalone_test_x86.c parse_vdso.c \ - -o vdso_standalone_test_x86 + -o $@ -include ../lib.mk -clean: - rm -fr $(TEST_PROGS) +EXTRA_CLEAN := $(TEST_PROGS) endif diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests index cc826326de87accb0853c80ad498edd8fb2e5de1..45708aa3ce470bab45f81c2cc432342cdafb421d 100755 --- a/tools/testing/selftests/vm/run_vmtests +++ b/tools/testing/selftests/vm/run_vmtests @@ -2,25 +2,33 @@ # SPDX-License-Identifier: GPL-2.0 #please run as root -#we need 256M, below is the size in kB -needmem=262144 mnt=./huge exitcode=0 -#get pagesize and freepages from /proc/meminfo +#get huge pagesize and freepages from /proc/meminfo while read name size unit; do if [ "$name" = "HugePages_Free:" ]; then freepgs=$size fi if [ "$name" = "Hugepagesize:" ]; then - pgsize=$size + hpgsize_KB=$size fi done < /proc/meminfo +# Simple hugetlbfs tests have a hardcoded minimum requirement of +# huge pages totaling 256MB (262144KB) in size. The userfaultfd +# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take +# both of these requirements into account and attempt to increase +# number of huge pages available. +nr_cpus=$(nproc) +hpgsize_MB=$((hpgsize_KB / 1024)) +half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128)) +needmem_KB=$((half_ufd_size_MB * 2 * 1024)) + #set proper nr_hugepages -if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then +if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` - needpgs=`expr $needmem / $pgsize` + needpgs=$((needmem_KB / hpgsize_KB)) tries=2 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do lackpgs=$(( $needpgs - $freepgs )) @@ -107,8 +115,9 @@ fi echo "---------------------------" echo "running userfaultfd_hugetlb" echo "---------------------------" -# 256MB total huge pages == 128MB src and 128MB dst -./userfaultfd hugetlb 128 32 $mnt/ufd_test_file +# Test requires source and destination huge pages. Size of source +# (half_ufd_size_MB) is passed as argument to test. +./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file if [ $? -ne 0 ]; then echo "[FAIL]" exitcode=1 diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c index 1c12536f2081e89a5f112c39cf3aa4716bd7dc94..18f523557983b22608d73a70c830fd463ea33e26 100644 --- a/tools/thermal/tmon/sysfs.c +++ b/tools/thermal/tmon/sysfs.c @@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst) int update_thermal_data() { int i; + int next_thermal_record = cur_thermal_record + 1; char tz_name[256]; static unsigned long samples; @@ -495,9 +496,9 @@ int update_thermal_data() } /* circular buffer for keeping historic data */ - if (cur_thermal_record >= NR_THERMAL_RECORDS) - cur_thermal_record = 0; - gettimeofday(&trec[cur_thermal_record].tv, NULL); + if (next_thermal_record >= NR_THERMAL_RECORDS) + next_thermal_record = 0; + gettimeofday(&trec[next_thermal_record].tv, NULL); if (tmon_log) { fprintf(tmon_log, "%lu ", ++samples); fprintf(tmon_log, "%3.1f ", p_param.t_target); @@ -507,11 +508,12 @@ int update_thermal_data() snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE, ptdata.tzi[i].instance); sysfs_get_ulong(tz_name, "temp", - &trec[cur_thermal_record].temp[i]); + &trec[next_thermal_record].temp[i]); if (tmon_log) fprintf(tmon_log, "%lu ", - trec[cur_thermal_record].temp[i]/1000); + trec[next_thermal_record].temp[i] / 1000); } + cur_thermal_record = next_thermal_record; for (i = 0; i < ptdata.nr_cooling_dev; i++) { char cdev_name[256]; unsigned long val; diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c index 9aa19652e8e859a34a5db1edbae0c02170499967..b43138f8b8628825ead8bb938da374cdd00d253a 100644 --- a/tools/thermal/tmon/tmon.c +++ b/tools/thermal/tmon/tmon.c @@ -336,7 +336,6 @@ int main(int argc, char **argv) show_data_w(); show_cooling_device(); } - cur_thermal_record++; time_elapsed += ticktime; controller_handler(trec[0].temp[target_tz_index] / 1000, &yk); diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 5a11f4d3972cdb992250672f09c8061ccd9c589a..d72b8481f2500ca470a8e3eb935589ff29dc4926 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -279,8 +279,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, u8 prop; int ret; - ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, - &prop, 1); + ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET, + &prop, 1); if (ret) return ret; @@ -413,8 +413,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) * this very same byte in the last iteration. Reuse that. */ if (byte_offset != last_byte_offset) { - ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, - &pendmask, 1); + ret = kvm_read_guest_lock(vcpu->kvm, + pendbase + byte_offset, + &pendmask, 1); if (ret) { kfree(intids); return ret; @@ -740,7 +741,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, return false; /* Each 1st level entry is represented by a 64-bit value. */ - if (kvm_read_guest(its->dev->kvm, + if (kvm_read_guest_lock(its->dev->kvm, BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), &indirect_ptr, sizeof(indirect_ptr))) return false; @@ -1297,8 +1298,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) cbaser = CBASER_ADDRESS(its->cbaser); while (its->cwriter != its->creadr) { - int ret = kvm_read_guest(kvm, cbaser + its->creadr, - cmd_buf, ITS_CMD_SIZE); + int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr, + cmd_buf, ITS_CMD_SIZE); /* * If kvm_read_guest() fails, this could be due to the guest * programming a bogus value in CBASER or something else going @@ -1830,7 +1831,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, int next_offset; size_t byte_offset; - ret = kvm_read_guest(kvm, gpa, entry, esz); + ret = kvm_read_guest_lock(kvm, gpa, entry, esz); if (ret) return ret; @@ -2191,7 +2192,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) int ret; BUG_ON(esz > sizeof(val)); - ret = kvm_read_guest(kvm, gpa, &val, esz); + ret = kvm_read_guest_lock(kvm, gpa, &val, esz); if (ret) return ret; val = le64_to_cpu(val); diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index c1e4bdd66131e578eb55fe5d8ebcc8a52a6c2acb..b4c5baf4af45d93f7530de5e5247e02abe3647d4 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -110,9 +110,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, /* Loop over all IRQs affected by this read */ for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + unsigned long flags; + spin_lock_irqsave(&irq->irq_lock, flags); if (irq_is_pending(irq)) value |= (1U << i); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9dcc31600a8bd21f50709edf4d7e628c6f303e4d..6b4fcd52f14c86f84e414092a6985e8510a11b0a 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -297,7 +297,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) bit_nr = irq->intid % BITS_PER_BYTE; ptr = pendbase + byte_offset; - ret = kvm_read_guest(kvm, ptr, &val, 1); + ret = kvm_read_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; @@ -350,7 +350,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) ptr = pendbase + byte_offset; if (byte_offset != last_byte_offset) { - ret = kvm_read_guest(kvm, ptr, &val, 1); + ret = kvm_read_guest_lock(kvm, ptr, &val, 1); if (ret) return ret; last_byte_offset = byte_offset; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index f7450dc41ab3e021ff86f7a538fe06eecf691d84..21a2240164f330b81e7f5c62f9becfeec1f98859 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -96,6 +96,7 @@ /* we only support 64 kB translation table page size */ #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) +/* Requires the irq_lock to be held by the caller. */ static inline bool irq_is_pending(struct vgic_irq *irq) { if (irq->config == VGIC_CONFIG_EDGE)