diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl index 640f65e79ef1c00c94508b6b9f9fe8b63a1305a6..267920a1874b9b17e8bb48992748665533865b2c 100644 --- a/Documentation/ABI/testing/sysfs-class-cxl +++ b/Documentation/ABI/testing/sysfs-class-cxl @@ -69,7 +69,9 @@ Date: September 2014 Contact: linuxppc-dev@lists.ozlabs.org Description: read/write Set the mode for prefaulting in segments into the segment table - when performing the START_WORK ioctl. Possible values: + when performing the START_WORK ioctl. Only applicable when + running under hashed page table mmu. + Possible values: none: No prefaulting (default) work_element_descriptor: Treat the work element descriptor as an effective address and diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 51ec539c8ddbf56d14bdee77e88cc9f6b71d7d2b..0613573ed177d8eb7c7dc35c4c09fd89f6338462 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4006,6 +4006,23 @@ expediting. Set to zero to disable automatic expediting. + ssbd= [ARM64,HW] + Speculative Store Bypass Disable control + + On CPUs that are vulnerable to the Speculative + Store Bypass vulnerability and offer a + firmware based mitigation, this parameter + indicates how the mitigation should be used: + + force-on: Unconditionally enable mitigation for + for both kernel and userspace + force-off: Unconditionally disable mitigation for + for both kernel and userspace + kernel: Always enable mitigation in the + kernel, and offer a prctl interface + to allow userspace to register its + interest in being mitigated too. + stack_guard_gap= [MM] override the default stack gap protection. The value is in page units and it defines how many pages prior diff --git a/Documentation/arm/msm/remote_debug_drv.txt b/Documentation/arm/msm/remote_debug_drv.txt new file mode 100644 index 0000000000000000000000000000000000000000..13a35f43e86befa1ea75d4133d44f883c3a95ea9 --- /dev/null +++ b/Documentation/arm/msm/remote_debug_drv.txt @@ -0,0 +1,468 @@ +Introduction +============ + +The goal of this debug feature is to provide a reliable, responsive, +accurate and secure debug capability to developers interested in +debugging MSM subsystem processor images without the use of a hardware +debugger. + +The Debug Agent along with the Remote Debug Driver implements a shared +memory based transport mechanism that allows for a debugger (ex. GDB) +running on a host PC to communicate with a remote stub running on +peripheral subsystems such as the ADSP, MODEM etc. + +The diagram below depicts end to end the components involved to +support remote debugging: + + +: : +: HOST (PC) : MSM +: ,--------, : ,-------, +: | | : | Debug | ,--------, +: |Debugger|<--:-->| Agent | | Remote | +: | | : | App | +----->| Debug | +: `--------` : |-------| ,--------, | | Stub | +: : | Remote| | |<---+ `--------` +: : | Debug |<-->|--------| +: : | Driver| | |<---+ ,--------, +: : `-------` `--------` | | Remote | +: : LA Shared +----->| Debug | +: : Memory | Stub | +: : `--------` +: : Peripheral Subsystems +: : (ADSP, MODEM, ...) + + +Debugger: Debugger application running on the host PC that + communicates with the remote stub. + Examples: GDB, LLDB + +Debug Agent: Software that runs on the Linux Android platform + that provides connectivity from the MSM to the + host PC. This involves two portions: + 1) User mode Debug Agent application that discovers + processes running on the subsystems and creates + TCP/IP sockets for the host to connect to. In addition + to this, it creates an info (or meta) port that + users can connect to discover the various + processes and their corresponding debug ports. + +Remote Debug A character based driver that the Debug +Driver: Agent uses to transport the payload received from the + host to the debug stub running on the subsystem + processor over shared memory and vice versa. + +Shared Memory: Shared memory from the SMEM pool that is accessible + from the Applications Processor (AP) and the + subsystem processors. + +Remote Debug Privileged code that runs in the kernels of the +Stub: subsystem processors that receives debug commands + from the debugger running on the host and + acts on these commands. These commands include reading + and writing to registers and memory belonging to the + subsystem's address space, setting breakpoints, + single stepping etc. + +Hardware description +==================== + +The Remote Debug Driver interfaces with the Remote Debug stubs +running on the subsystem processors and does not drive or +manage any hardware resources. + +Software description +==================== + +The debugger and the remote stubs use Remote Serial Protocol (RSP) +to communicate with each other. This is widely used protocol by both +software and hardware debuggers. RSP is an ASCII based protocol +and used when it is not possible to run GDB server on the target under +debug. + +The Debug Agent application along with the Remote Debug Driver +is responsible for establishing a bi-directional connection from +the debugger application running on the host to the remote debug +stub running on a subsystem. The Debug Agent establishes connectivity +to the host PC via TCP/IP sockets. + +This feature uses ADB port forwarding to establish connectivity +between the debugger running on the host and the target under debug. + +Please note the Debug Agent does not expose HLOS memory to the +remote subsystem processors. + +Design +====== + +Here is the overall flow: + +1) When the Debug Agent application starts up, it opens up a shared memory +based transport channel to the various subsystem processor images. + +2) The Debug Agent application sends messages across to the remote stubs +to discover the various processes that are running on the subsystem and +creates debug sockets for each of them. + +3) Whenever a process running on a subsystem exits, the Debug Agent +is notified by the stub so that the debug port and other resources +can be reclaimed. + +4) The Debug Agent uses the services of the Remote Debug Driver to +transport payload from the host debugger to the remote stub and vice versa. + +5) Communication between the Remote Debug Driver and the Remote Debug stub +running on the subsystem processor is done over shared memory (see figure). +SMEM services are used to allocate the shared memory that will +be readable and writeable by the AP and the subsystem image under debug. + +A separate SMEM allocation takes place for each subsystem processor +involved in remote debugging. The remote stub running on each of the +subsystems allocates a SMEM buffer using a unique identifier so that both +the AP and subsystem get the same physical block of memory. It should be +noted that subsystem images can be restarted at any time. +However, when a subsystem comes back up, its stub uses the same unique +SMEM identifier to allocate the SMEM block. This would not result in a +new allocation rather the same block of memory in the first bootup instance +is provided back to the stub running on the subsystem. + +An 8KB chunk of shared memory is allocated and used for communication +per subsystem. For multi-process capable subsystems, 16KB chunk of shared +memory is allocated to allow for simultaneous debugging of more than one +process running on a single subsystem. + +The shared memory is used as a circular ring buffer in each direction. +Thus we have a bi-directional shared memory channel between the AP +and a subsystem. We call this SMQ. Each memory channel contains a header, +data and a control mechanism that is used to synchronize read and write +of data between the AP and the remote subsystem. + +Overall SMQ memory view: +: +: +------------------------------------------------+ +: | SMEM buffer | +: |-----------------------+------------------------| +: |Producer: LA | Producer: Remote | +: |Consumer: Remote | subsystem | +: | subsystem | Consumer: LA | +: | | | +: | Producer| Consumer| +: +-----------------------+------------------------+ +: | | +: | | +: | +--------------------------------------+ +: | | +: | | +: v v +: +--------------------------------------------------------------+ +: | Header | Data | Control | +: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ +: | | b | b | b | | S |n |n | | S |n |n | | +: | Producer | l | l | l | | M |o |o | | M |o |o | | +: | Ver | o | o | o | | Q |d |d | | Q |d |d | | +: |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | +: | | k | k | k | | O | | | | I | | | | +: | Consumer | | | | | u |0 |1 | | n |0 |1 | | +: | Ver | 0 | 1 | 2 | | t | | | | | | | | +: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ +: | | +: + | +: | +: +------------------------+ +: | +: v +: +----+----+----+----+ +: | SMQ Nodes | +: |----|----|----|----| +: Node # | 0 | 1 | 2 | ...| +: |----|----|----|----| +: Starting Block Index # | 0 | 3 | 8 | ...| +: |----|----|----|----| +: # of blocks | 3 | 5 | 1 | ...| +: +----+----+----+----+ +: + +Header: Contains version numbers for software compatibility to ensure +that both producers and consumers on the AP and subsystems know how to +read from and write to the queue. +Both the producer and consumer versions are 1. +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 1 byte | Producer Version | +: +---------+-------------------+ +: | 1 byte | Consumer Version | +: +---------+-------------------+ + + +Data: The data portion contains multiple blocks [0..N] of a fixed size. +The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. +Payload sent from the debug agent app is split (if necessary) and placed +in these blocks. The first data block is placed at the next 8 byte aligned +address after the header. + +The number of blocks for a given SMEM allocation is derived as follows: + Number of Blocks = ((Total Size - Alignment - Size of Header + - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) + +The producer maintains a private block map of each of these blocks to +determine which of these blocks in the queue is available and which are free. + +Control: +The control portion contains a list of nodes [0..N] where N is number +of available data blocks. Each node identifies the data +block indexes that contain a particular debug message to be transferred, +and the number of blocks it took to hold the contents of the message. + +Each node has the following structure: +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 2 bytes |Staring Block Index| +: +---------+-------------------+ +: | 2 bytes |Number of Blocks | +: +---------+-------------------+ + +The producer and the consumer update different parts of the control channel +(SMQOut / SMQIn) respectively. Each of these control data structures contains +information about the last node that was written / read, and the actual nodes +that were written/read. + +SMQOut Structure (R/W by producer, R by consumer): +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 4 bytes | Magic Init Number | +: +---------+-------------------+ +: | 4 bytes | Reset | +: +---------+-------------------+ +: | 4 bytes | Last Sent Index | +: +---------+-------------------+ +: | 4 bytes | Index Free Read | +: +---------+-------------------+ + +SMQIn Structure (R/W by consumer, R by producer): +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 4 bytes | Magic Init Number | +: +---------+-------------------+ +: | 4 bytes | Reset ACK | +: +---------+-------------------+ +: | 4 bytes | Last Read Index | +: +---------+-------------------+ +: | 4 bytes | Index Free Write | +: +---------+-------------------+ + +Magic Init Number: +Both SMQ Out and SMQ In initialize this field with a predefined magic +number so as to make sure that both the consumer and producer blocks +have fully initialized and have valid data in the shared memory control area. + Producer Magic #: 0xFF00FF01 + Consumer Magic #: 0xFF00FF02 + +SMQ Out's Last Sent Index and Index Free Read: + Only a producer can write to these indexes and they are updated whenever + there is new payload to be inserted into the SMQ in order to be sent to a + consumer. + + The number of blocks required for the SMQ allocation is determined as: + (payload size + SM_BLOCKSIZE - 1) / SM_BLOCKSIZE + + The private block map is searched for a large enough continuous set of blocks + and the user data is copied into the data blocks. + + The starting index of the free block(s) is updated in the SMQOut's Last Sent + Index. This update keeps track of which index was last written to and the + producer uses it to determine where the the next allocation could be done. + + Every allocation, a producer updates the Index Free Read from its + collaborating consumer's Index Free Write field (if they are unequal). + This index value indicates that the consumer has read all blocks associated + with allocation on the SMQ and that the producer can reuse these blocks for + subsquent allocations since this is a circular queue. + + At cold boot and restart, these indexes are initialized to zero and all + blocks are marked as available for allocation. + +SMQ In's Last Read Index and Index Free Write: + These indexes are written to only by a consumer and are updated whenever + there is new payload to be read from the SMQ. The Last Read Index keeps + track of which index was last read by the consumer and using this, it + determines where the next read should be done. + After completing a read, Last Read Index is incremented to the + next block index. A consumer updates Index Free Write to the starting + index of an allocation whenever it has completed processing the blocks. + This is an optimization that can be used to prevent an additional copy + of data from the queue into a client's data buffer and the data in the queue + itself can be used. + Once Index Free Write is updated, the collaborating producer (on the next + data allocation) reads the updated Index Free Write value and it then + updates its corresponding SMQ Out's Index Free Read and marks the blocks + associated with that index as available for allocation. At cold boot and + restart, these indexes are initialized to zero. + +SMQ Out Reset# and SMQ In Reset ACK #: + Since subsystems can restart at anytime, the data blocks and control channel + can be in an inconsistent state when a producer or consumer comes up. + We use Reset and Reset ACK to manage this. At cold boot, the producer + initializes the Reset# to a known number ex. 1. Every other reset that the + producer undergoes, the Reset#1 is simply incremented by 1. All the producer + indexes are reset. + When the producer notifies the consumer of data availability, the consumer + reads the producers Reset # and copies that into its SMQ In Reset ACK# + field when they differ. When that occurs, the consumer resets its + indexes to 0. + +6) Asynchronous notifications between a producer and consumer are +done using the SMP2P service which is interrupt based. + +Power Management +================ + +None + +SMP/multi-core +============== + +The driver uses completion to wake up the Debug Agent client threads. + +Security +======== + +From the perspective of the subsystem, the AP is untrusted. The remote +stubs consult the secure debug fuses to determine whether or not the +remote debugging will be enabled at the subsystem. + +If the hardware debug fuses indicate that debugging is disabled, the +remote stubs will not be functional on the subsystem. Writes to the +queue will only be done if the driver sees that the remote stub has been +initialized on the subsystem. + +Therefore even if any untrusted software running on the AP requests +the services of the Remote Debug Driver and inject RSP messages +into the shared memory buffer, these RSP messages will be discarded and +an appropriate error code will be sent up to the invoking application. + +Performance +=========== + +During operation, the Remote Debug Driver copies RSP messages +asynchronously sent from the host debugger to the remote stub and vice +versa. The debug messages are ASCII based and relatively short +(<25 bytes) and may once in a while go up to a maximum 700 bytes +depending on the command the user requested. Thus we do not +anticipate any major performance impact. Moreover, in a typical +functional debug scenario performance should not be a concern. + +Interface +========= + +The Remote Debug Driver is a character based device that manages +a piece of shared memory that is used as a bi-directional +single producer/consumer circular queue using a next fit allocator. +Every subsystem, has its own shared memory buffer that is managed +like a separate device. + +The driver distinguishes each subsystem processor's buffer by +registering a node with a different minor number. + +For each subsystem that is supported, the driver exposes a user space +interface through the following node: + - /dev/rdbg- + Ex. /dev/rdbg-adsp (for the ADSP subsystem) + +The standard open(), close(), read() and write() API set is +implemented. + +The open() syscall will fail if a subsystem is not present or supported +by the driver or a shared memory buffer cannot be allocated for the +AP - subsystem communication. It will also fail if the subsytem has +not initialized the queue on its side. Here are the error codes returned +in case a call to open() fails: +ENODEV - memory was not yet allocated for the device +EEXIST - device is already opened +ENOMEM - SMEM allocation failed +ECOMM - Subsytem queue is not yet setup +ENOMEM - Failure to initialize SMQ + +read() is a blocking call that will return with the number of bytes written +by the subsystem whenever the subsystem sends it some payload. Here are the +error codes returned in case a call to read() fails: +EINVAL - Invalid input +ENODEV - Device has not been opened yet +ERESTARTSYS - call to wait_for_completion_interruptible is interrupted +ENODATA - call to smq_receive failed + +write() attempts to send user mode payload out to the subsystem. It can fail +if the SMQ is full. The number of bytes written is returned back to the user. +Here are the error codes returned in case a call to write() fails: +EINVAL - Invalid input +ECOMM - SMQ send failed + +In the close() syscall, the control information state of the SMQ is +initialized to zero thereby preventing any further communication between +the AP and the subsystem. Here is the error code returned in case +a call to close() fails: +ENODEV - device wasn't opened/initialized + +The Remote Debug driver uses SMP2P for bi-directional AP to subsystem +notification. Notifications are sent to indicate that there are new +debug messages available for processing. Each subsystem that is +supported will need to add a device tree entry per the usage +specification of SMP2P driver. + +In case the remote stub becomes non operational or the security configuration +on the subsystem does not permit debugging, any messages put in the SMQ will +not be responded to. It is the responsibility of the Debug Agent app and the +host debugger application such as GDB to timeout and notify the user of the +non availability of remote debugging. + +Driver parameters +================= + +None + +Config options +============== + +The driver is configured with a device tree entry to map an SMP2P entry +to the device. The SMP2P entry name used is "rdbg". Please see +kernel\Documentation\arm\msm\msm_smp2p.txt for information about the +device tree entry required to configure SMP2P. + +The driver uses the SMEM allocation type SMEM_LC_DEBUGGER to allocate memory +for the queue that is used to share data with the subsystems. + +Dependencies +============ + +The Debug Agent driver requires services of SMEM to +allocate shared memory buffers. + +SMP2P is used as a bi-directional notification +mechanism between the AP and a subsystem processor. + +User space utilities +==================== + +This driver is meant to be used in conjunction with the user mode +Remote Debug Agent application. + +Other +===== + +None + +Known issues +============ +For targets with an external subsystem, we cannot use +shared memory for communication and would have to use the prevailing +transport mechanisms that exists between the AP and the external subsystem. + +This driver cannot be leveraged for such targets. + +To do +===== + +None diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index 58c94abd503df94b68e0bdec4691e9e421fc2de2..1741ef5653cc0507474c6ac5fec9ee06b458562f 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -53,6 +53,9 @@ SoCs: - QCS405 compatible = "qcom,qcs405" +- QCS403 + compatible = "qcom,qcs403" + - SDXPRAIRIE compatible = "qcom,sdxprairie" @@ -100,6 +103,8 @@ Generic board variants: - RUMI device: compatible = "qcom,rumi" +- IOT device: + compatible = "qcom,iot" Boards (SoC type + board variant): @@ -156,10 +161,11 @@ compatible = "qcom,sm6150-cdp" compatible = "qcom,sm6150-qrd" compatible = "qcom,sm6150-idp" compatible = "qcom,qcs405-rumi" -compatible = "qcom,qcs405-mtp" -compatible = "qcom,qcs405-cdp" -compatible = "qcom,sm8150-auto-adp-star" -compatible = "qcom,auto-adp-star" +compatible = "qcom,qcs405-iot" +compatible = "qcom,qcs403-iot" +compatible = "qcom,sa8155-adp-star" +compatible = "qcom,sa8155p-adp-star" +compatible = "qcom,adp-star" compatible = "qcom,sdxprairie-rumi" compatible = "qcom,sdxprairie-mtp" compatible = "qcom,sdxprairie-cdp" diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt index 3b6cf9c034639c2496c073a79a7be596661cd17d..5ef7cc1fc58396d522c9e9baf32e2e15474faa7a 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt @@ -19,6 +19,7 @@ Required properties for Ion heaps the following: - "SYSTEM" - "CARVEOUT" + - "SECURE_CARVEOUT" - "DMA" - "HYP_CMA" - "SYSTEM_SECURE" @@ -28,6 +29,7 @@ Optional properties for Ion heaps - memory-region: phandle to memory region associated with heap. + Example: qcom,ion { compatible = "qcom,msm-ion"; @@ -57,3 +59,32 @@ Example: }; }; + +"SECURE_CARVEOUT" + +This heap type is expected to contain multiple child nodes. Each child node +shall contain the following required properties: + +- memory-region: +Refer to Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt + +- token: +A u32 containing the set of secure domains which will be able to access the +memory-region. + +Example: +qcom,ion { + compatible = "qcom,msm-ion"; + #address-cells = <1>; + #size-cells = <0>; + + qcom,ion-heap@14 { + reg = <14>; + qcom,ion-heap-type = "SECURE_CARVEOUT"; + + node1 { + memory-region = <&cp_region>; + token = ; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt index 53ad68e2e1b29baff430616a832082a0f4fc1507..82d86ab39553235b12d29d7ecba58d73f0b9cce6 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt @@ -33,6 +33,9 @@ Optional properties: - qcom,ipi-ping : (boolean) send keep alive ping to other cpus if present - qcom,wakeup-enable : (boolean) enable non secure watchdog to freeze / unfreeze automatically across suspend / resume path. +- qcom,scandump-sizes : an array of 32-bit values that contains the size of the + scandump memory region for each CPU, such that the nth + 32 bit value maps to the scandump size for CPU n. Example: @@ -45,4 +48,5 @@ Example: qcom,pet-time = <10000>; qcom,ipi-ping; qcom,wakeup-enable; + qcom,scandump-size = <0x10100 0x10100 0x10100 0x10100>; }; diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt new file mode 100644 index 0000000000000000000000000000000000000000..3965ec54dacf01950d57bd0b35e2b1cb2fe489a7 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt @@ -0,0 +1,15 @@ +Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver + +Required properties: +-compatible : Should be one of + To communicate with adsp + qcom,smp2p-interrupt-rdbg-2-in (inbound) + qcom,smp2p-interrupt-rdbg-2-out (outbound) + To communicate with cdsp + qcom,smp2p-interrupt-rdbg-5-in (inbound) + qcom,smp2p-interrupt-rdbg-5-out (outbound) + +Example: + qcom,smp2p_interrupt_rdbg_2_in { + compatible = "qcom,smp2p-interrupt-rdbg-2-in"; + }; diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt index 4cba3ecaeb90bf244975b460dc1610dcf8b947fc..5c5aa5143013f9b63db6a42e9ae72fed2d47a81f 100644 --- a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt +++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt @@ -10,18 +10,18 @@ the application processor, the modem processor, as well as hardware accelerators. The RPM driver communicates with the hardware engine using SMD. -The devicetree representation of the SPM block should be: +The devicetree representation of the RPM block should be: Required properties -- compatible: "qcom,rpm-smd" or "qcom,rpm-glink" +- compatible: "qcom,rpm-smd" - rpm-channel-name: The string corresponding to the channel name of the peripheral subsystem. Required for both smd and glink transports. - rpm-channel-type: The interal SMD edge for this subsystem found in -- qcom,glink-edge: Logical name of the remote subsystem. This is a required - property when rpm-smd driver using glink as trasport. +- interrupts: The IRQ used by remote processor to inform APSS about + reception of response message packet. Optional properties - rpm-standalone: Allow RPM driver to run in standalone mode irrespective of RPM @@ -32,10 +32,10 @@ Optional properties Example: qcom,rpm-smd@68150 { - compatible = "qcom,rpm-smd", "qcom,rpm-glink"; + compatible = "qcom,rpm-smd"; reg = <0x68150 0x3200>; + interrupts = ; qcom,rpm-channel-name = "rpm_requests"; qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */ - qcom,glink-edge = "rpm"; } } diff --git a/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt b/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt index 02dab4c8eb8d166ec025dbd9538038e6c6a7bee2..7fe9e5ddc06a6ea7aacef58059cdcb259ffa4395 100644 --- a/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt +++ b/Documentation/devicetree/bindings/arm/msm/rpm_stats.txt @@ -24,10 +24,16 @@ PROPERTIES Value type: Definition: Provides labels for the reg property. +- qcom,num-records: + Usage: optional + Value type: + Definition: Specifies number of records to read from RPM RAM. + EXAMPLE: qcom,rpm-stats@c000000 { compatible = "qcom,rpm-stats"; reg = <0xC000000 0x1000>, <0x3F0000 0x4>; reg-names = "phys_addr_base", "offset_addr"; + qcom,num-records = <3>; }; diff --git a/Documentation/devicetree/bindings/arm/msm/spm-v2.txt b/Documentation/devicetree/bindings/arm/msm/spm-v2.txt index 194059c39c683db0bbd91c88ed5cc956511831a9..d44ab56b9f48776ba0aa3469aa5b45af5bbdba68 100644 --- a/Documentation/devicetree/bindings/arm/msm/spm-v2.txt +++ b/Documentation/devicetree/bindings/arm/msm/spm-v2.txt @@ -45,6 +45,8 @@ Optional properties for only Non-PSCI targets index to send the PMIC data to - qcom,vctl-port: The PVC (PMIC Virtual Channel) port used for changing voltage +- qcom,vctl-port-ub: The PVC (PMIC Virtual Channel) port used for changing + voltage - qcom,phase-port: The PVC port used for changing the number of phases - qcom,pfm-port: The PVC port used for enabling PWM/PFM modes - qcom,cpu-vctl-mask: Mask of cpus, whose voltage the spm device can control. @@ -105,6 +107,8 @@ Optional properties for only PSCI targets: between AVS controller requests - qcom,vctl-port: The PVC (PMIC Virtual Channel) port used for changing voltage +- qcom,vctl-port-ub: The PVC (PMIC Virtual Channel) port used for changing + voltage - qcom,phase-port: The PVC port used for changing the number of phases - qcom,pfm-port: The PVC port used for enabling PWM/PFM modes - qcom,cpu-vctl-list: List of cpu node phandles, whose voltage the spm device diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt index 9d80eb9d1859f8e5ec2ebbd8e0de6d96ebacc6b7..d00fd94a4812aa761f23e67dce074aaecf41437d 100644 --- a/Documentation/devicetree/bindings/batterydata/batterydata.txt +++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt @@ -69,6 +69,14 @@ Profile data node optional properties: size 5. - qcom,therm-center-offset: Specifies the resistor divide ratio between pull-up resistor and the thermistor for GEN4 FG. +- qcom,therm-pull-up: Specifies the thermistor pull-up resistor value in + KOhms. +- qcom,rslow-normal-coeffs: Array of Rslow coefficients that will be applied + when the battery temperature is greater than 0 degree + Celsius for GEN4 FG. This should be exactly of size 4. +- qcom,rslow-low-coeffs: Array of Rslow coefficients that will be applied + when the battery temperature is lower than 0 degree + Celsius for GEN4 FG. This should be exactly of size 4. - qcom,soc-based-step-chg: A bool property to indicate if the battery will perform SoC (State of Charge) based step charging. If yes, the low and high thresholds defined in diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt index a204510a2cf5fdbed003fe27aac13d16fc5dfc35..efb9510595e53d61dda9ba397955008618098587 100644 --- a/Documentation/devicetree/bindings/bus/mhi.txt +++ b/Documentation/devicetree/bindings/bus/mhi.txt @@ -19,31 +19,6 @@ Main node properties: Value type: Definition: Maximum timeout in ms wait for state and cmd completion -- mhi,fw-name - Usage: optional - Value type: - Definition: Firmware image name to upload - -- mhi,edl-name - Usage: optional - Value type: - Definition: Firmware image name for emergency download - -- mhi,fbc-download - Usage: optional - Value type: - Definition: If set true, image specified by fw-name is for full image - -- mhi,sbl-size - Usage: optional - Value type: - Definition: Size of SBL image in bytes - -- mhi,seg-len - Usage: optional - Value type: - Definition: Size of each segment to allocate for BHIe vector table - - mhi,time-sync Usage: optional Value type: @@ -281,7 +256,6 @@ mhi_controller { mhi,bstmode = <2>; }; - mhi,fw-name = "sbl1.mbn"; mhi,timeout = <500>; children_node { @@ -316,11 +290,21 @@ MHI netdev properties Value type: Definition: Set true if interface support recycling buffers. +- aliases + Usage: required + Value type: + Definition: mhi net_device should have numbered alias in the alias node, + in the form of mhi_netdevN, N = 0, 1..n for each network interface. + ======== Example: ======== -mhi_rmnet@0 { +aliases { + mhi_netdev0 = &mhi_netdev_0; +}; + +mhi_netdev_0: mhi_rmnet@0 { mhi,chan = "IP_HW0"; mhi,interface-name = "rmnet_mhi"; mhi,mru = <0x4000>; diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt index d238475136be7bc94191a9d4ca36543264259df2..23e542e7b3a2cae75b6786b6e83038964e0ad04e 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt @@ -23,6 +23,7 @@ Required properties : "qcom,gcc-sdmshrike" "qcom,gcc-qcs405" "qcom,gcc-mdss-qcs405" + "qcom,gcc-sm6150" - reg : shall contain base register location and length - #clock-cells : shall contain 1 diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt index 129e3a79fbb90dcaf351a9a317529dd3504bd705..c0532804724107d0013d25f064e393e956d5ab95 100644 --- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt @@ -4,21 +4,22 @@ Qualcomm Technologies, Inc. Graphics Clock & Reset Controller Binding Required properties : - compatible : shall contain one of the following: "qcom,gpucc-sm8150", - "qcom,gpucc-sdmshrike". + "qcom,gpucc-sdmshrike", + "qcom,gpucc-sm6150". - reg : shall contain base register offset and size. - reg-names: names of registers listed in the same order as in the reg property. Must contain "cc_base". -- #clock-cells : shall contain 1. -- #reset-cells : shall contain 1. +- #clock-cells : from common clock binding, shall contain 1. +- #reset-cells : from common reset binding, shall contain 1. - vdd_cx-supply : The vdd_cx logic rail supply. - vdd_mx-supply : The vdd_mx logic rail supply. Optional properties : -- #power-domain-cells : shall contain 1. +- #power-domain-cells : from generic power domain binding, shall contain 1. Example: - clock_gpucc: qcom,gpucc { + clock_gpucc: clock-controller@0x2c90000 { compatible = "qcom,gpucc-sm8150"; reg = <0x2c90000 0x9000>; reg-names = "cc_base"; diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt index 967eeb062b69968fb7df2d8ff076fe1a3012bec9..3776ae9d1f5c6fecbcfd3972889230442498a58e 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmh.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmh.txt @@ -2,7 +2,7 @@ Qualcomm Technologies, Inc. RPMh Clocks Required properties: - compatible: Must contain "qcom,rpmh-clk-sm8150" or - "qcom,rpmh-clk-sdmshrike". + "qcom,rpmh-clk-sdmshrike" or "qcom,rpmh-clk-sm6150". - #clock-cells: Must contain 1. - mboxes: List of RPMh mailbox phandle and channel identifier tuples. - mbox-names: List of names to identify the RPMh mailboxes used. diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt index b758de540298cacfe0bc00d35c8a86ce8f391ef9..1c7dfcd9027041ed2e9c7f2281e48b688e9a3c03 100644 --- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt @@ -1,7 +1,8 @@ Qualcomm Technologies, Inc. Video Clock & Reset Controller Bindings Required properties: -- compatible: shall contain "qcom,videocc-sm8150" or "qcom,videocc-sm8150-v2". +- compatible: shall contain "qcom,videocc-sm8150" or "qcom,videocc-sm8150-v2" or + "qcom,videocc-sm6150". - reg: shall contain base register location and length. - reg-names: names of registers listed in the same order as in the reg property. - vdd_mm-supply: the logic rail supply. diff --git a/Documentation/devicetree/bindings/display/bridge/anx7625.txt b/Documentation/devicetree/bindings/display/bridge/anx7625.txt new file mode 100644 index 0000000000000000000000000000000000000000..0bebd441f08263ce69d8d8d1491a71e348f7dd55 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/anx7625.txt @@ -0,0 +1,40 @@ +Analogix ANX7625 SlimPort (Full-HD Transmitter) +----------------------------------------------- + +The ANX7625 is DSI to DisplayPort bridge. + +Required properties: + + - compatible : "analogix,anx7625" + - reg : I2C address of the device + - interrupt-parent : Should be the phandle of the interrupt controller + that services interrupts for this device + - interrupts : Should contain the INTP interrupt + - cbl_det-gpios : Which GPIO to use for cable detection + - power_en-gpios : Which GPIO to use for power enable + - reset_n-gpios : Which GPIO to use for reset + - ports : Port 0 for DSI input, should be a reference to a + valid mipi_dsi_host device + +Example: + + anx7625: anx7625@ee { + compatible = "analogix,anx7625"; + reg = <0xee>; + interrupt-parent = <&qup15>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; /* INTP */ + cbl_det-gpio = <&qup15 1 GPIO_ACTIVE_HIGH>; + power_en-gpio = <&pio 27 GPIO_ACTIVE_HIGH>; + reset_n-gpio = <&pio 49 GPIO_ACTIVE_HIGH>; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + anx7625_in: endpoint { + remote-endpoint = <&dsi_out>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index 17f9d7516ffbd8426281da228be32a1aaf21f1a4..8951557d42d308b9ea1a1928b886e5055a1e7ecd 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -127,6 +127,7 @@ Optional properties: turns off PHY pmic power supply, phy ldo and DSI Lane ldo during idle screen (footswitch control off) when this property is enabled. - qcom,dsi-phy-regulator-min-datarate-bps: Minimum per lane data rate (bps) to turn on PHY regulator. +- qcom,panel-force-clock-lane-hs: A boolean property indicates that panel needs clock lanes in HS mode only [1] Documentation/devicetree/bindings/clock/clock-bindings.txt [2] Documentation/devicetree/bindings/graph.txt @@ -237,4 +238,5 @@ Example: qcom,dsi-phy-regulator-ldo-mode; qcom,panel-allow-phy-poweroff; qcom,dsi-phy-regulator-min-datarate-bps = <1200000000>; + qcom,panel-force-clock-lane-hs; }; diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt index ec52c472c8459b920643af0592a30c385dd27a8e..0603af877155949803d3a2265e8159689de798e5 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-common.txt +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt @@ -38,7 +38,7 @@ Display Timings require specific display timings. The panel-timing subnode expresses those timings as specified in the timing subnode section of the display timing bindings defined in - Documentation/devicetree/bindings/display/display-timing.txt. + Documentation/devicetree/bindings/display/panel/display-timing.txt. Connectivity diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 891db41e94201ce69944133854eaedc97b9e672a..98d7898fcd78058ed29b0af2b61d4d6fd8b0f64e 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -25,6 +25,7 @@ Required Properties: - "renesas,dmac-r8a7794" (R-Car E2) - "renesas,dmac-r8a7795" (R-Car H3) - "renesas,dmac-r8a7796" (R-Car M3-W) + - "renesas,dmac-r8a77965" (R-Car M3-N) - "renesas,dmac-r8a77970" (R-Car V3M) - reg: base address and length of the registers block for the DMAC diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt index 1d20423775be66c09c865d91eefc51903b448a92..43c9d0bbe06d1fa2417d81a593716b78fd7529dc 100644 --- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt @@ -528,6 +528,7 @@ Optional properties: to identify the default topology for the display. The first set is indexed by the value 0. +- qcom,mdss-dsi-ext-bridge-mode: External bridge chip is connected instead of panel. Required properties for sub-nodes: None Optional properties: diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt index 0b660ce672d1f0833e9120caba33735d8a01b397..b706967565a7db7dbd381fc384fbbb8ef6a04d87 100644 --- a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt +++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt @@ -105,3 +105,8 @@ Optional properties: only with simulator panel. It should not be enabled for normal DSI panels. - - qcom,null-insertion-enabled: A boolean to enable NULL packet insertion feature for DSI controller. +- ports: This video port is used when external bridge is present. + The connection is modeled using the OF graph bindings + specified in Documentation/devicetree/bindings/graph.txt. + Video port 0 reg 0 is for the bridge output. The remote + endpoint phandle should be mipi_dsi_device device node. diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt index 6b9238c4bf302d0dc9824b79b23a5596741d02b9..4583f4e8d364070e876011fa1e805deb8c8ced49 100644 --- a/Documentation/devicetree/bindings/fb/mdss-pll.txt +++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt @@ -1,20 +1,24 @@ Qualcomm Technologies, Inc. MDSS pll for DSI/EDP/HDMI -mdss-pll is a pll controller device which supports pll devices that are -compatiable with MIPI display serial interface specification, HDMI and edp. +mdss-pll is a pll controller device which supports pll devices that +are compatible with MIPI display serial interface specification, +HDMI and edp. Required properties: - compatible: Compatible name used in the driver. Should be one of: - "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939", - "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994", - "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909", - "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994", - "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992", - "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996", - "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2", - "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_dsi_pll_8952", - "qcom,mdss_dsi_pll_8937", "qcom,mdss_hdmi_pll_8996_v3_1p8", - "qcom,mdss_dsi_pll_8953" + "qcom,mdss_dsi_pll_8916", "qcom,mdss_dsi_pll_8939", + "qcom,mdss_dsi_pll_8974", "qcom,mdss_dsi_pll_8994", + "qcom,mdss_dsi_pll_8994", "qcom,mdss_dsi_pll_8909", + "qcom,mdss_hdmi_pll", "qcom,mdss_hdmi_pll_8994", + "qcom,mdss_dsi_pll_8992", "qcom,mdss_hdmi_pll_8992", + "qcom,mdss_dsi_pll_8996", "qcom,mdss_hdmi_pll_8996", + "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2", + "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8", + "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8", + "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998", + "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm", + "qcom,mdss_dsi_pll_7nm", "qcom,mdss_dp_pll_7nm", + "qcom,mdss_dsi_pll_28lpm" - cell-index: Specifies the controller used - reg: offset and length of the register set for the device. - reg-names : names to refer to register sets related to this device diff --git a/Documentation/devicetree/bindings/input/qti-haptics.txt b/Documentation/devicetree/bindings/input/qti-haptics.txt index 8982fc733154b388fb557a9ea2b66a921b79dec5..2c4f4f02ae349af256a6a23dd4f5c60b260503ab 100644 --- a/Documentation/devicetree/bindings/input/qti-haptics.txt +++ b/Documentation/devicetree/bindings/input/qti-haptics.txt @@ -66,19 +66,6 @@ Properties: specified in the LRA actuator datasheet. Allowed values are: 0 to 20475. If this is not specified, 5715us play rate is used. -- qcom,brake-pattern - Usage: optional - Value type: - Definition: Specifies the brake pattern with 4 elements used to enable the - internal reverse braking. Allowed values for each element are: - 0: no brake; - 1: brake with (Vmax / 2) strength; - 2: brake with Vmax strength; - 3: brake with (2 * Vmax) strength; - If this property is specified with an array of non-zero values, - then the brake pattern is applied at the end of the playing - waveform. - - qcom,external-waveform-source Usage: optional Value type: @@ -108,11 +95,12 @@ Following properties are specific only when LRA actuator is used: values: "sine", "square". If this is not specified, sinusoid resonance driver signal is used. -- qcom,lra-auto-resonance-en +- qcom,lra-allow-variable-play-rate Usage: optional Value type: - Definition: If specified, the hardware feature of LRA auto resonance detection - is enabled for correcting the resonance frequency variation. + Definition: If specified, "qcom,wf-play-rate-us" for LRA defined in each + effect could be different with the resonance period of the + LRA actuator. - qcom,lra-auto-resonance-mode Usage: optional @@ -136,16 +124,20 @@ waveforms/effects: Usage: required Value type: Definition: Specifies the waveform pattern in a byte array that will be - played for the effect-id. Allowed values for each element - are: 0x00 to 0x1F. + played for the effect-id. The bit fields of each byte are: + [7]: drive direction, 0 - forward; 1 - reverse + [6]: overdrive, 0 -- 1x drive; 1 -- 2x drive + [5:1]: waveform amplitude + [0]: reserved. - qcom,wf-play-rate-us Usage: optional Value type: Definition: Specifies the play period in microseconds for each byte pattern. - For LRA actuator, For LRA, it should be set equal to the resonance - period specified in the LRA actuator datasheet. Allowed values - are: 0 to 20475. + Allowed values are: 0 to 20475. For LRA actuator, if + "qcom,lra-allow-variable-play-rate" is defined, it could be + set to other values not equal to the resonance period of the + LRA actuator. - qcom,wf-repeat-count Usage: optional @@ -159,6 +151,25 @@ waveforms/effects: Definition: Specifies the repeat times for each sample defined in qcom,wf-pattern. Allowed values are: 1, 2, 4, 8. +- qcom,wf-brake-pattern + Usage: optional + Value type: + Definition: Specifies the brake pattern with 4 elements used to enable the + internal reverse braking. Allowed values for each element are: + 0: no brake + 1: brake with (Vmax / 2) strength + 2: brake with Vmax strength + 3: brake with (2 * Vmax) strength + If this property is specified with an array of non-zero values, + then the brake pattern is applied at the end of the playing + waveform. + +- qcom,lra-auto-resonance-disable + Usage: optional + Value type: + Definition: If specified, the hardware feature of LRA auto resonance detection + is disabled. + Example: qcom,haptics@c000 { compatible = "qcom,haptics"; @@ -169,20 +180,23 @@ Example: qcom,actuator-type = "lra"; qcom,vmax-mv = <1800>; qcom,ilim-ma = <400>; - qcom,play-rate-us = <4255>; - qcom,brake-pattern = [03 02 01 00]; + qcom,play-rate-us = <8000>; qcom,lra-resonance-sig-shape = "sine"; - qcom,lra-auto-resonance-mode; + qcom,lra-auto-resonance-mode = "qwd"; + qcom,lra-allow-variable-play-rate; wf_0 { /* CLICK effect */ qcom,effect-id = <0>; - qcom,wf-pattern = [0a 14 1f 1f 1f 1f 14 0a]; + qcom,wf-play-rate-us = <6250>; + qcom,wf-pattern = [3e 3e 3e]; + qcom,lra-auto-resonance-disable; }; wf_5 { /* HEAVY_CLICK effect */ qcom,effect-id = <5>; - qcom,wf-pattern = [08 0a 1a 1f 1f 1a 0a 08]; + qcom,wf-play-rate-us = <6250>; + qcom,wf-pattern = [7e 7e 7e]; }; }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt index 717b8901fb6833c3a641190cd0ba455e4f9b45cb..969bf6531e31f1bdb1a4f957c4aa40cafc3368be 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qti,mpm.txt @@ -17,7 +17,11 @@ Properties: - compatible: Usage: required Value type: - Definition: Should contain "qcom,mpm-gic" and the respective target compatible flag. + Definition: Should contain "qcom,mpm-gic" and the respective target compatible flag + from below ones. + "qcom,mpm-gic-msm8953", + "qcom,mpm-gic-msm8937", + "qcom,mpm-gic-qcs405" - interrupts: Usage: required @@ -52,7 +56,7 @@ Properties: Example: wakegic: wake-gic@7781b8 { - compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8953", "qcom,mpm-gic-msm8937"; + compatible = "qcom,mpm-gic", "qcom,mpm-gic-msm8953"; interrupts = ; reg = <0x601d4 0x1000>, <0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */ @@ -70,7 +74,11 @@ properties: - compatible: Usage: required Value type: - Definition: Should contain "qcom,mpm-gpio" and the respective target compatible flag. + Definition: Should contain "qcom,mpm-gpio" and the respective target compatible flag + from below ones. + "qcom,mpm-gpio-msm8953", + "qcom,mpm-gpio-msm8937", + "qcom,mpm-gpio-qcs405" - interrupt-parent: Usage: required @@ -85,7 +93,7 @@ properties: Example: wakegpio: wake-gpio { - compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8953", "qcom,mpm-gpio-msm8937"; + compatible = "qcom,mpm-gpio", "qcom,mpm-gpio-msm8953"; interrupt-controller; interrupt-parent = <&tlmm>; #interrupt-cells = <2>; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc-sdmmagpie.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc-sdmmagpie.txt new file mode 100644 index 0000000000000000000000000000000000000000..968019c5ad99ac6bbc8cb62d5f4a3edde690ff9e --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc-sdmmagpie.txt @@ -0,0 +1,63 @@ +QTI PDC interrupt controller + +PDC is QTI's platform parent interrupt controller that serves as wakeup source. + +Newer QTI SOCs are replacing MPM (MSM sleep Power Manager) with PDC (Power +Domain Controller) to manage subsystem wakeups and resources during sleep. +This driver marks the wakeup interrupts in APSS PDC such that it monitors the +interrupts when the system is asleep, wakes up the APSS when one of these +interrupts occur and replays it to the subsystem interrupt controller after it +becomes operational. + +Earlier MPM architecture used arch-extension of GIC interrupt +controller to mark enabled wake-up interrupts and monitor these when the +system goes to sleep. Since the arch-extensions are no-longer available +on newer kernel versions, this driver is implemented as hierarchical irq +domain. GIC is parent interrupt controller at the highest level. +Platform interrupt controller PDC is next in hierarchy, followed by others. +This driver only configures the interrupts, does not handle them. + +PDC interrupt configuration involves programming of 2 set of registers: +IRQ_ENABLE_BANK - Enable the irq +IRQ_i_CFG - Configure the interrupt i + +Properties: + +- compatible: + Usage: required + Value type: + Definition: Should contain "qcom,pdc-" + +- reg: + Usage: required + Value type: + Definition: Specifies the base physical address for PDC hardware + block for DRV2. + +- interrupt-cells: + Usage: required + Value type: + Definition: Specifies the number of cells needed to encode an interrupt source. + Value must be 3. + The encoding of these cells are same as described in + Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt + +- interrupt-parent: + Usage: required + Value type: + Definition: Specifies the interrupt parent necessary for hierarchical domain to operate. + +- interrupt-controller: + Usage: required + Value type: + Definition: Identifies the node as an interrupt controller. + +Example: + +pdcgic: interrupt-controller@0xb220000{ + compatible = "qcom,pdc-sdmmagpie"; + reg = <0xb220000 0x30000>; + #interrupt-cells = <3>; + interrupt-parent = <&intc>; + interrupt-controller; +}; diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt index 5e7eca0fad17d10e932ff2a395e3edda541df412..ab81329df08ed7601fa9bdb42db67bc28dcccd4a 100644 --- a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt +++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt @@ -65,6 +65,11 @@ First Level Node - CAM FLASH device Value type: Definition: Max duration in ms flash can glow. +- wled-flash-support + Usage: optional + Value type: + Definition: To identity wled flash hardware support. + - gpios Usage: optional Value type: @@ -109,7 +114,8 @@ led_flash_rear: qcom,camera-flash@0 { flash-source = <&pmi8998_flash0 &pmi8998_flash1>; torch-source = <&pmi8998_torch0 &pmi8998_torch1>; switch-source = <&pmi8998_switch0>; - qcom,slave-id = <0x00 0x00 0x0011>; + wled-flash-support; + qcom,slave-id = <0x00 0x00 0x0011>; qcom,cci-master = <0>; gpios = <&msmgpio 23 0>, <&msmgpio 24 0>; diff --git a/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt new file mode 100644 index 0000000000000000000000000000000000000000..5dc958dab32fc8a5eff516e313336eaedbfb8d5d --- /dev/null +++ b/Documentation/devicetree/bindings/net/qcom,emac-dwc-eqos.txt @@ -0,0 +1,86 @@ +Qualcomm Technologies Inc. EMAC Gigabit Ethernet controller + +This network controller consists of the MAC and +RGMII IO Macro for interfacing with PHY. + +Required properties: + +emac_hw node: +- compatible: Should be "qcom,emac-dwc-eqos" +- reg: Offset and length of the register regions for the mac and io-macro +- interrupts: Interrupt number used by this controller +- io-macro-info: Internal io-macro-info +- emac_emb_smmu: Internal emac smmu node + +Optional: +- qcom,msm-bus,name: String representing the client-name +- qcom,msm-bus,num-cases: Total number of usecases +- qcom,msm-bus,num-paths: Total number of master-slave pairs +- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing: + master-id, slave-id, arbitrated bandwidth + in KBps, instantaneous bandwidth in KBps +qcom,bus-vector-names: specifies string IDs for the corresponding bus vectors + in the same order as qcom,msm-bus,vectors-KBps property. +- qcom,arm-smmu: Boolean, if present enables EMAC SMMU support in sdxpoorwills. + +Internal io-macro-info: +- io-macro-bypass-mode: <0 or 1> internal or external delay configuration +- io-interface: PHY interface used + +Internal emac_emb_smmu: +- compatible: Should be "qcom,emac-smmu-embedded". +- qcom,smmu-s1-bypass: Boolean, if present S1 bypass is enabled. +- iommus: Includes the <&smmu_phandle stream_id size> pair for each context + bank. +- qcom,iova-mapping: of the smmu context bank. + +Example: + +soc { + emac_hw: qcom,emac@00020000 { + compatible = "qcom,emac-dwc-eqos"; + qcom,arm-smmu; + reg = <0x20000 0x10000>, + <0x36000 0x100>; + <0x3D00000 0x300000>; + reg-names = "emac-base", "rgmii-base"; + interrupts = <0 62 4>, <0 60 4>, + <0 49 4>, <0 50 4>, + <0 51 4>, <0 52 4>, + <0 53 4>, <0 54 4>, + <0 55 4>, <0 56 4>, + <0 57 4>; + interrupt-names = "sbd-intr", "lpi-intr", + "phy-intr", "tx-ch0-intr", + "tx-ch1-intr", "tx-ch2-intr", + "tx-ch3-intr", "tx-ch4-intr", + "rx-ch0-intr", "rx-ch1-intr", + "rx-ch2-intr", "rx-ch3-intr"; + qcom,msm-bus,name = "emac"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <98 512 0 0>, <1 781 0 0>, /* No vote */ + <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + qcom,bus-vector-names = "10", "100", "1000"; + clock-names = "emac_axi_clk", "emac_ptp_clk", + "emac_rgmii_clk", "emac_slv_ahb_clk"; + qcom,phy-reset = <&tlmm 79 GPIO_ACTIVE_HIGH>; + qcom,phy-intr-redirect = <&tlmm 124 GPIO_ACTIVE_LOW>; + gdsc_emac-supply = <&emac_gdsc>; + + io-macro-info { + io-macro-bypass-mode = <0>; + io-interface = "rgmii"; + }; + + emac_emb_smmu: emac_emb_smmu { + compatible = "qcom,emac-smmu-embedded"; + qcom,smmu-s1-bypass; + iommus = <&apps_smmu 0x3C0 0x0>; + qcom,iova-mapping = <0x80000000 0x40000000>; + }; + }; +} diff --git a/Documentation/devicetree/bindings/nfc/nq-ntag.txt b/Documentation/devicetree/bindings/nfc/nq-ntag.txt new file mode 100644 index 0000000000000000000000000000000000000000..0009fb98c7aebad5cdceb8d2b98f9d5163e147bc --- /dev/null +++ b/Documentation/devicetree/bindings/nfc/nq-ntag.txt @@ -0,0 +1,25 @@ +Qualcomm Technologies, Inc NTx NTAG device + +Near Field Communication wireless tag integrated circuit device(NTAG) is compliant +with NFC Forum Type 2 Tag specification. + +Required properties: + +- compatible: "qcom,nq-ntag" +- reg: i2c slave address. +- pinctrl-names, pinctrl-0, pinctrl-1: references to our pinctrl settings +- qcom,nq-ntagfd : specific gpio for field detect interrupt. + +Example: + + nq-ntag@55 { + compatible = "qcom,nq-ntag"; + reg = <0x55>; + qcom,nq-ntagfd = <&tlmm 22 0x00>; + interrupt-parent = <&tlmm>; + interrupts = <22 0>; + interrupt-names = "ntag_fd"; + pinctrl-names = "ntag_active","ntag_suspend"; + pinctrl-0 = <&ntag_int_active>; + pinctrl-1 = <&ntag_int_suspend>; + }; diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-28nm-hs-usb.txt b/Documentation/devicetree/bindings/phy/qcom,snps-28nm-hs-usb.txt new file mode 100644 index 0000000000000000000000000000000000000000..3505c7fb96ef0d0ae1120b960efbd2854bea5a48 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/qcom,snps-28nm-hs-usb.txt @@ -0,0 +1,76 @@ +Qualcomm Synopsys 28nm Femto phy controller +=========================================== + +Synopsys 28nm femto phy controller supports LS/FS/HS usb connectivity on +Qualcomm chipsets. + +Required properties: +- compatible: + Value type: + Definition: Should contain "qcom,usb-snps-hsphy". + +- reg: + Value type: + Definition: USB PHY base address and length of the register map + +- reg-names: + Value type: + Definition: Names of the reg bases in 1-1 correspondence with the + "reg" property. "phy_csr" is a mandatory reg base. + +- clocks: + Value type: + Definition: See clock-bindings.txt section "consumers". List of + two clock specifiers for interface and core controller + clocks. + +- clock-names: + Value type: + Definition: Names of the clocks in 1-1 correspondence with the "clocks" + property. Must contain "phy_csr_clk" and "ref_clk" clocks. + +- vdd-supply: + Value type: + Definition: phandle to the regulator VDD supply node. + +- vdda18-supply: + Value type: + Definition: phandle to the regulator 1.8V supply node. + +- vdda33-supply: + Value type: + Definition: phandle to the regulator 3.3V supply node. + +- qcom,vdd-voltage-level: + Value type: + Definition: This is a list of three integer values (no, min, max) where + each value corresponding to voltage corner. + +- resets: + Value type: + Definition: See reset.txt section "consumers". PHY reset specifier. + +- reset-names: + Value type: + Definition: Must contain "phy_por_reset" and "phy_reset" strings. + +Example: + usb_phy: phy@78d9000 { + compatible = "qcom,usb-snps-hsphy"; + reg = <0x78d9000 0x400>; + reg-names = "phy_csr"; + + vdd-supply = <&pm8916_s1_corner>; + vdda18-supply = <&pm8916_l7>; + vdda33-supply = <&pm8916_l13>; + + clocks = <&gcc GCC_USB_HS_AHB_CLK>, + <&gcc GCC_USB_HS_SYSTEM_CLK>; + clock-names = "phy_csr_clk", "ref_clk"; + + resets = <&gcc GCC_QUSB2_PHY_BCR>, + <&gcc GCC_USB2_HS_PHY_ONLY_BCR>; + reset-names = "phy_por_reset", "phy_reset"; + + }; + diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt index 36808c888b1877fec6925d083f871d3690c08daf..3606df280e7b2d82441532b0d5d73c5cba42bf99 100644 --- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt +++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt @@ -71,6 +71,7 @@ Optional properties: - qcom,signal-aop: Boolean. If set, when subsystem is brought up, pil will send a notification to AOP through qmp mailbox driver. +- qcom,mas-crypto: phandle to the bus master of crypto core. Example: qcom,venus@fdce0000 { diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt index 6f2ec9af0de288af23bd42d5138d56d990efce41..dee9520224a939acd4a56200b795a2a12e1ef6ce 100644 --- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt @@ -55,9 +55,9 @@ pins it needs, and how they should be configured, with regard to muxer configuration, drive strength and pullups. If one of these options is not set, its actual value will be unspecified. -This driver supports the generic pin multiplexing and configuration -bindings. For details on each properties, you can refer to -./pinctrl-bindings.txt. +Allwinner A1X Pin Controller supports the generic pin multiplexing and +configuration bindings. For details on each properties, you can refer to + ./pinctrl-bindings.txt. Required sub-node properties: - pins diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d09a6a01457a8b81e970b5329d9ff6594eb5723 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpi-pinctrl.txt @@ -0,0 +1,172 @@ +Qualcomm Technologies, Inc. LPI GPIO controller driver + +This DT bindings describes the GPIO controller driver +being added for supporting LPI (Low Power Island) TLMM +from QTI chipsets. + +Following properties are for LPI GPIO controller device main node. +- compatible: + Usage: required + Value type: + Definition: must be "qcom,lpi-pinctrl" + +- reg: + Usage: required + Value type: + Definition: Register base of the GPIO controller and length. + +- qcom,num-gpios: + Usage: required + Value type: + Definition: Number of GPIOs supported by the controller. + +- qcom,lpi-offset-tbl + Usage: required + Value type: + Definition: Offset table of GPIOs supported by the controller. + +- gpio-controller: + Usage: required + Value type: + Definition: Used to mark the device node as a GPIO controller. + +- #gpio-cells: + Usage: required + Value type: + Definition: Must be 2; + The first cell will be used to define gpio number and the + second denotes the flags for this gpio. + +Please refer to ../gpio/gpio.txt for general description of GPIO bindings. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + +The pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin or a list of pins. This configuration can include the +mux function to select on those pin(s), and various pin configuration +parameters, as listed below. + +SUBNODES: + +The name of each subnode is not important; all subnodes should be enumerated +and processed purely based on their content. + +Each subnode only affects those parameters that are explicitly listed. In +other words, a subnode that lists a mux function but no pin configuration +parameters implies no information about any pin configuration parameters. +Similarly, a pin subnode that describes a pullup parameter implies no +information about e.g. the mux function. + +The following generic properties as defined in pinctrl-bindings.txt are valid +to specify in a pin configuration subnode: + +- pins: + Usage: required + Value type: + Definition: List of gpio pins affected by the properties specified in + this subnode. Valid pins are: gpio0-gpio31 for LPI. + +- function: + Usage: required + Value type: + Definition: Specify the alternative function to be configured for the + specified pins. Valid values are: + "gpio", + "func1", + "func2", + "func3", + "func4", + "func5" + +- bias-disable: + Usage: optional + Value type: + Definition: The specified pins should be configured as no pull. + +- bias-pull-down: + Usage: optional + Value type: + Definition: The specified pins should be configured as pull down. + +- bias-bus-hold: + Usage: optional + Value type: + Definition: The specified pins should be configured as bus-keeper mode. + +- bias-pull-up: + Usage: optional + Value type: + Definition: The specified pins should be configured as pull up. + +- input-enable: + Usage: optional + Value type: + Definition: The specified pins are put in input mode. + +- output-high: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + high. + +- output-low: + Usage: optional + Value type: + Definition: The specified pins are configured in output mode, driven + low. + +- qcom,drive-strength: + Usage: optional + Value type: + Definition: Selects the drive strength for the specified pins. + +Example: + + lpi_tlmm: lpi_pinctrl@152c000 { + compatible = "qcom,lpi-pinctrl"; + qcom,num-gpios = <32>; + reg = <0x152c000 0>; + gpio-controller; + #gpio-cells = <2>; + qcom,lpi-offset-tbl = <0x00000010>, <0x00000020>, + <0x00000030>, <0x00000040>, + <0x00000050>, <0x00000060>, + <0x00000070>, <0x00000080>, + <0x00000090>, <0x00000100>, + <0x00000110>, <0x00000120>, + <0x00000130>, <0x00000140>, + <0x00000150>, <0x00000160>, + <0x00000170>, <0x00000180>, + <0x00000190>, <0x00000200>, + <0x00000210>; + + + hph_comp_active: hph_comp_active { + mux { + pins = "gpio22"; + function = "func1"; + }; + + config { + pins = "gpio22"; + output-high; + qcom,drive-strength = <8>; + }; + }; + + hph_comp_sleep: hph_comp_sleep { + mux { + pins = "gpio22"; + function = "func1"; + }; + + config { + pins = "gpio22"; + qcom,drive-strength = <2>; + }; + }; + }; + diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt index 6983ad330f03d295733310c639e4e610827b4f5c..ff106b318c91bc60b88d095db12fbac58964adc6 100644 --- a/Documentation/devicetree/bindings/platform/msm/ipa.txt +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -89,6 +89,8 @@ memory allocation over a PCIe bridge monitoring of holb via IPA uc is required. -qcom,ipa-fltrt-not-hashable: Boolean context flag to indicate filter/route rules hashing not supported. +- qcom,wlan-ce-db-over-pcie: Boolean context flag to represent WLAN CE DB + over pcie bus or not. IPA pipe sub nodes (A2 static pipes configurations): @@ -120,6 +122,10 @@ Optional properties: controller phandle and "clk_ipa_clk" as macro for "iface_clk" - clock-names: This property shall contain the clock input names used by driver in same order as the clocks property.This should be "iface_clk" +- emulator-bar0-offset: Specifies the offset, within PCIe BAR0, where + IPA/GSI programmable registers reside. This property is used only + with the IPA/GSI emulation system, which is connected to and + communicated with via PCIe. IPA SMMU sub nodes diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt index ceb97b552e3e24c9ed0111b24945380f3296c1db..e6158732517a62aa1b742db634711c83f9347761 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt @@ -111,6 +111,46 @@ First Level Node - FG Gen4 device Element 0 - Retry value for timer Element 1 - Maximum value for timer +- qcom,fg-esr-cal-soc-thresh + Usage: optional + Value type: + Definition: SOC thresholds applied when ESR fast calibration is done. + Array of 2 elements if specified. This should be specified + if ESR fast calibration algorithm is needed. + Element 0 - Minimum SOC threshold in percentage + Element 1 - Maximum SOC threshold in percentage + +- qcom,fg-esr-cal-temp-thresh + Usage: optional + Value type: + Definition: Battery temperature thresholds applied when ESR fast + calibration is done. Array of 2 elements if specified. + This should be specified if ESR fast calibration algorithm + is needed. + Element 0 - Minimum temperature threshold in Celsius + Element 1 - Maximum temperature threshold in Celsius + +- qcom,fg-delta-esr-disable-count + Usage: optional + Value type: + Definition: Value after which delta ESR interrupt will be disabled. + This is applicable only when ESR fast calibration is + enabled. Default value is 10. + +- qcom,fg-delta-esr-thr + Usage: optional + Value type: + Definition: Threshold for delta ESR interrupt in uOhms. Default value + is 1832. If ESR fast calibration algorithm is enabled, this + will be overridden with a maximum value. + +- qcom,fg-esr-filter-factor + Usage: optional + Value type: + Definition: ESR filter factor used in ESR fast calibration algorithm. + This factor will be used when ESR correction delta is + applied after the calculation. Default value is 2. + - qcom,fg-esr-pulse-thresh-ma Usage: optional Value type: @@ -229,6 +269,17 @@ First Level Node - FG Gen4 device This should be defined in the ascending order and in the range of 0-100. Array limit is set to 3. +- qcom,ki-coeff-low-dischg + Usage: optional + Value type: + Definition: Array of ki coefficient values for low discharge current + during discharge. These values will be applied when the + monotonic SOC goes below the SOC threshold specified under + qcom,ki-coeff-soc-dischg. Array limit is set to 3. This + property should be specified if qcom,ki-coeff-soc-dischg + is specified to make it fully functional. Value has no + unit. Allowed range is 62 to 15564 in micro units. + - qcom,ki-coeff-med-dischg Usage: optional Value type: @@ -251,6 +302,27 @@ First Level Node - FG Gen4 device is specified to make it fully functional. Value has no unit. Allowed range is 62 to 15564 in micro units. +- qcom,ki-coeff-low-chg + Usage: optional + Value type: + Definition: ki coefficient value for low charge current during + charging. Value has no unit. Allowed range is 62 to 15564 + in micro units. + +- qcom,ki-coeff-med-chg + Usage: optional + Value type: + Definition: ki coefficient value for medium charge current during + charging. Value has no unit. Allowed range is 62 to 15564 + in micro units. + +- qcom,ki-coeff-hi-chg + Usage: optional + Value type: + Definition: ki coefficient value for high charge current during + charging. Value has no unit. Allowed range is 62 to 15564 + in micro units. + - qcom,fg-rconn-uohms Usage: optional Value type: diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt index afeb65dc86d0b585f9d0d43dce87cc590b5b2ac6..0da71a3e163ff3651d213ae27941c26a6c1af727 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qg.txt @@ -262,6 +262,45 @@ First Level Node - QGAUGE device capacity learning cycle. If this is not specified, then the default value is 0. Unit is in decipercentage. +- qcom,esr-disable + Usage: optional + Value type: + Definition: Boolean property to disable ESR estimation. If not defined + ESR estimation stays enabled for charge-cycles. + +- qcom,esr-discharge-enable + Usage: optional + Value type: + Definition: Boolean property to enable ESR estimation during discharge. + Only valid if 'qcom,esr-disable' is not defined. + +- qcom,esr-qual-current-ua + Usage: optional + Value type: + Definition: Minimum current differential in uA to qualify an ESR + reading as valid. If not defined the value defaults + to 130mA. + +- qcom,esr-qual-vbatt-uv + Usage: optional + Value type: + Definition: Minimum vbatt differential in uV to qualify an ESR + reading as valid. If not defined the value defaults + to 7mV. + +- qcom,esr-disable-soc + Usage: optional + Value type: + Definition: Minimum battery SOC below which ESR will not be + attempted by QG. If not defined the value defaults + to 10%. + +- qcom,qg-ext-sns + Usage: optional + Value type: + Definition: Boolean property to support external-rsense based + configuration. + ========================================================== Second Level Nodes - Peripherals managed by QGAUGE driver ========================================================== diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt new file mode 100644 index 0000000000000000000000000000000000000000..f27cb33a7335583cf315adaf3a9b4f856eab069d --- /dev/null +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo5.txt @@ -0,0 +1,40 @@ +QPNP Qnovo5 pulse engine + +QPNP Qnovo5 is a PBS based pulse charging engine which works in tandem with the +QPNP SMB5 Charger device. It configures the QPNP SMB5 charger to +charge/discharge as per pulse characteristics. + +The QPNP Qnovo5 pulse engine has a single peripheral assigned to it. + +Properties: +- compatible: + Usage: required + Value type: + Definition: It must be "qcom,qpnp-qnovo5". + +- reg: + Usage: required + Value type: + Definition: Specifies the base address of the module. Qnovo5 is using a + SDAM peripheral so this is the address of the SDAM module + being used. + +- interrupts: + Usage: required + Value type: + Definition: Specifies the interrupt associated with Qnovo5. + +- interrupt-names: + Usage: required + Value type: + Definition: Specifies the interrupt name for Qnovo5. There is only one + interrupt named as "ptrain-done". + +Example: + +qcom,qpnp-qnovo@b000 { + compatible = "qcom,qpnp-qnovo5"; + reg = <0xb000 0x100>; + interrupts = <0x2 0xb0 0x0 IRQ_TYPE_NONE>; + interrupt-names = "ptrain-done"; +}; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt index f50211139a317804ccb1bc608da3555b60b0b023..ed38a7c15dd2cef4f4875b0d8cb14e1876cce3d0 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt @@ -52,6 +52,18 @@ Charger specific properties: addition battery properties will be faked such that the device assumes normal operation. +- qcom,charger-temp-max + Usage: optional + Value type: + Definition: Specifies the charger temp REG_H_THRESHOLD for PM8150B in deciDegC. + If the value is not present, use the setting read from the device. + +- qcom,smb-temp-max + Usage: optional + Value type: + Definition: Specifies the charger temp REG_H_THRESHOLD for SMB1355 in deciDegC. + If the value is not present, use the setting read from the device. + - qcom,fcc-max-ua Usage: optional Value type: diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt index ff5bb515b4c05beb43f083607ae44662319f2e91..4076f3d3fff3a2cc46596fc5302bd116ef28bf6c 100644 --- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt +++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt @@ -14,6 +14,9 @@ Optional properties: - qcom,rpc-latency-us: FastRPC QoS latency vote - qcom,adsp-remoteheap-vmid: FastRPC remote heap VMID list - qcom,secure-context-bank : Bool indicating secure FastRPC context bank. +- qcom,fastrpc-legacy-remote-heap : Bool indicating hypervisor is not supported. +- qcom,fastrpc-adsp-audio-pdr: Flag to enable ADSP Audio PDR +- qcom,secure-domains: FastRPC secure domain configuration Optional subnodes: - qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt index 8ff65fa632fdedd739b005027e198051f62c8c34..c06c045126fc9070ca8f4faefc18b17a8de85ee6 100644 --- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt +++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt @@ -21,7 +21,7 @@ Required properties: - interrupts : identifier to the device interrupt - clocks : a list of phandle + clock-specifier pairs, one for each entry in clock names. -- clocks-names : +- clock-names : * "xtal" for external xtal clock identifier * "pclk" for the bus core clock, either the clk81 clock or the gate clock * "baud" for the source of the baudrate generator, can be either the xtal diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index cf504d0380aeb9d0749cc2ab1f90e2f35a781146..88f947c47adc280f3b4e8a1d3babe47ac6776933 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt @@ -41,6 +41,8 @@ Required properties: - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. + - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART. + - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART. - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART. - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART. - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. diff --git a/Documentation/devicetree/bindings/smcinvoke/smcinvoke.txt b/Documentation/devicetree/bindings/smcinvoke/smcinvoke.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0e201c83bf4a671a47d205d40fb1cd2ba1700fb --- /dev/null +++ b/Documentation/devicetree/bindings/smcinvoke/smcinvoke.txt @@ -0,0 +1,11 @@ +* SMCInvoke driver to provide transport between TZ and Linux + +Required properties: +- compatible : Should be "qcom,smcinvoke" +- reg : should contain memory region address reserved for loading secure apps. + +Example: + qcom_smcinvoke: smcinvoke@87900000 { + compatible = "qcom,smcinvoke"; + reg = <0x87900000 0x2200000>; + }; diff --git a/Documentation/devicetree/bindings/soc/qcom/cdsprm.txt b/Documentation/devicetree/bindings/soc/qcom/cdsprm.txt new file mode 100644 index 0000000000000000000000000000000000000000..92304dc56a4282758162180dc7984df16175640b --- /dev/null +++ b/Documentation/devicetree/bindings/soc/qcom/cdsprm.txt @@ -0,0 +1,43 @@ +Qualcomm Technologies, Inc. CDSP Request Manager driver + +CDSP Request Manager driver implements an rpmsg interface with +CDSP subsystem to serve L3 frequency and CPU QoS requests from CDSP. + +Required properties: +- compatible: Must be "qcom,msm-cdsprm-rpmsg" +- qcom,glink-channels: Glink channel for communication with CDSP +- qcom,intents: A list of + +- qcom,msm-cdsp-rm: A sub-device node to define CDSPM RM configuration + parameters + Required properties: + - compatible: Must be "qcom,msm-cdsp-rm" + - qcom,qos-latency-us: pm_qos latency vote to be applied on CDSP request in + micro seconds + - qcom,qos-maxhold-ms: Maximum hold time for pm_qos latency vote from CDSP + in milli seconds + +- qcom,cdsp-l3: A sub-device node to define CDSP L3 target device for L3 + clock voting + Required properties: + - compatible: Must be "qcom,cdsp-l3" + - qcom,target-dev: The DT device that corresponds to the CDSP L3 + devfreq-simple-dev + +Example: + qcom,msm_cdsprm_rpmsg { + compatible = "qcom,msm-cdsprm-rpmsg"; + qcom,glink-channels = "cdsprmglink-apps-dsp"; + qcom,intents = <0x14 64>; + + qcom,cdsp-l3 { + compatible = "qcom,cdsp-l3"; + qcom,target-dev = <&cdsp-cdsp-l3-lat>; + }; + + qcom,msm_cdsp_rm { + compatible = "qcom,msm-cdsp-rm"; + qcom,qos-latency-us = <100>; + qcom,qos-maxhold-ms = <20>; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index f1273d30b081a3176375af5ef0e7494d778c21c8..628033f8247e5fc4f16f398cf2a2b17fba11ecc9 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -220,6 +220,30 @@ Required properties: inCall Music Delivery port ID is 32773. incall Music 2 Delivery port ID is 32770. +* msm_dai_cdc_dma + +[First Level Nodes] + +Required properties: + + - compatible : "qcom,msm-dai-cdc-dma" + +[Second Level Nodes] + +Required properties: + + - compatible : "qcom,msm-dai-cdc-dma-dev" + - qcom,msm-dai-cdc-dma-dev-id : WSA codec dma port ID + Value is from 45056 to 45061. + VA codec dma port ID Value is from 45089 to 45091. + +Optional properties: + +- qcom,msm-dai-is-island-supported: Defines whether this dai supported in + island mode or not. + 0 - Unsupported + 1 - Supported + * msm-auxpcm Required properties: @@ -286,6 +310,10 @@ Optional properties: interface to be used for enabling PCM clock. If not defined, selects default AFE clock interface. +- qcom,msm-dai-is-island-supported: Defines whether this dai supported in + island mode or not. + 0 - Unsupported + 1 - Supported * msm-pcm-hostless @@ -367,6 +395,9 @@ Optional properties: - qcom,mclk-clk-reg: Indicate the register address for mclk. + - qcom,use-pinctrl: Indicates pinctrl required or not for this + clock node. + * audio_slimslave Required properties: @@ -656,6 +687,12 @@ Example: audio_apr: qcom,msm-audio-apr { compatible = "qcom,msm-audio-apr"; qcom,subsys-name = "apr_adsp"; + q6core { + compatible = "qcom,q6core-audio"; + bolero: bolero-cdc { + compatible = "qcom,bolero-codec"; + }; + }; }; qcom,msm-ocmem-audio { @@ -768,9 +805,13 @@ Required properties: Optional properties: -- pinctrl-names: Pinctrl state names for each pin group - configuration. -- pinctrl-x: Defines pinctrl state for each pin group +- pinctrl-names: Pinctrl state names for each pin group + configuration. +- pinctrl-x: Defines pinctrl state for each pin group +- qcom,msm-dai-is-island-supported: Defines whether this dai supported in + island mode or not. + 0 - Unsupported + 1 - Supported Example: @@ -954,6 +995,11 @@ Optional properties: - pinctrl-x: Defines pinctrl state for each pin group. + - qcom,msm-dai-is-island-supported: Defines whether this dai supported in + island mode or not. + 0 - Unsupported + 1 - Supported + Example: qcom,msm-dai-tdm-quat-rx { @@ -1191,3 +1237,149 @@ Example: qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", "SpkrLeft", "SpkrRight"; }; + + +* QCS405 ASoC Machine driver + +Required properties: +- compatible : "qcom,qcs405-asoc-snd". +- qcom,model : The user-visible name of this sound card. +- qcom,audio-routing : A list of the connections between audio components. +- asoc-platform: This is phandle list containing the references to platform device + nodes that are used as part of the sound card dai-links. +- asoc-platform-names: This property contains list of platform names. The order of + the platform names should match to that of the phandle order + given in "asoc-platform". +- asoc-cpu: This is phandle list containing the references to cpu dai device nodes + that are used as part of the sound card dai-links. +- asoc-cpu-names: This property contains list of cpu dai names. The order of the + cpu dai names should match to that of the phandle order given + in "asoc-cpu". The cpu names are in the form of "%s.%d" form, + where the id (%d) field represents the back-end AFE port id that + this CPU dai is associated with. +- asoc-codec: This is phandle list containing the references to codec dai device + nodes that are used as part of the sound card dai-links. +- asoc-codec-names: This property contains list of codec dai names. The order of the + codec dai names should match to that of the phandle order given + in "asoc-codec". +Optional properties: +- clock-names : clock name defined for external clock. +- clocks : external clock defined for codec clock. +- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target +- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target +- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device +- qcom,wcn-btfm : Property to specify if WCN BT/FM chip is used for the target +- qcom,wsa_bolero_codec : Property to specify if WSA macro in Bolero codec is used for this target +- qcom,va_bolero_codec : Property to specify if VA macro in Bolero codec is used for this target +- qcom,tasha_codec : Property to specify if Tasha codec is used for this target +- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios. +Example: + + qcs405_snd { + compatible = "qcom,qcs405-asoc-snd"; + qcom,wsa_bolero_codec = <1>; + qcom,va_bolero_codec = <1>; + qcom,tasha_codec = <1>; + qcom,ext-disp-audio-rx = <1>; + qcom,wcn-btfm = <1>; + qcom,mi2s-audio-intf = <1>; + qcom,auxpcm-audio-intf = <1>; + qcom,msm-mi2s-master = <1>, <1>, <1>, <1>; + + qcom,audio-routing = + "MADINPUT", "MCLK", + "AMIC2", "MIC BIAS2", + "AMIC3", "MIC BIAS2", + "AMIC4", "MIC BIAS2", + "AMIC5", "MIC BIAS3", + "MIC BIAS3", "Handset Mic", + "DMIC0", "MIC BIAS1", + "MIC BIAS1", "Digital Mic0", + "DMIC1", "MIC BIAS1", + "MIC BIAS1", "Digital Mic1", + "DMIC2", "MIC BIAS3", + "MIC BIAS3", "Digital Mic2", + "DMIC3", "MIC BIAS3", + "MIC BIAS3", "Digital Mic3", + "DMIC4", "MIC BIAS4", + "MIC BIAS4", "Digital Mic4", + "DMIC5", "MIC BIAS4", + "MIC BIAS4", "Digital Mic5", + "SpkrLeft IN", "SPK1 OUT", + "SpkrRight IN", "SPK2 OUT"; + + asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, + <&loopback>, <&compress>, <&hostless>, + <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>, + <&pcm_noirq>; + asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1", + "msm-pcm-dsp.2", "msm-voip-dsp", + "msm-pcm-voice", "msm-pcm-loopback", + "msm-compress-dsp", "msm-pcm-hostless", + "msm-pcm-afe", "msm-lsm-client", + "msm-pcm-routing", "msm-cpe-lsm", + "msm-compr-dsp", "msm-pcm-dsp-noirq"; + asoc-cpu = <&dai_hdmi>, <&dai_dp>, + <&dai_mi2s0>, <&dai_mi2s1>, + <&dai_mi2s2>, <&dai_mi2s3>, + <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, + <&dai_tert_auxpcm>, <&dai_quat_auxpcm>, + <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>, + <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>, + <&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>, + <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, + <&afe_proxy_tx>, <&incall_record_rx>, + <&incall_record_tx>, <&incall_music_rx>, + <&incall_music_2_rx>, <&sb_5_rx>, <&sb_6_rx>, + <&sb_7_rx>, <&sb_7_tx>, <&sb_8_tx>, + <&usb_audio_rx>, <&usb_audio_tx>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>, + <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, + <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>, + <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>, + <&wsa_cdc_dma_0_rx>, <&wsa_cdc_dma_0_tx>, + <&wsa_cdc_dma_1_rx>, <&wsa_cdc_dma_1_tx>, + <&wsa_cdc_dma_2_tx>, <&va_cdc_dma_0_tx>, + <&va_cdc_dma_1_tx>; + asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608", + "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", + "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", + "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2", + "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4", + "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385", + "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387", + "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389", + "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391", + "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393", + "msm-dai-q6-dev.16395", "msm-dai-q6-dev.224", + "msm-dai-q6-dev.225", "msm-dai-q6-dev.241", + "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", + "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", + "msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394", + "msm-dai-q6-dev.16396", "msm-dai-q6-dev.16398", + "msm-dai-q6-dev.16399", "msm-dai-q6-dev.16401", + "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865", + "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", + "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", + "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", + "msm-dai-q6-cdc-dma-dev.45056", + "msm-dai-q6-cdc-dma-dev.45057", + "msm-dai-q6-cdc-dma-dev.45058", + "msm-dai-q6-cdc-dma-dev.45059", + "msm-dai-q6-cdc-dma-dev.45061", + "msm-dai-q6-cdc-dma-dev.45089", + "msm-dai-q6-cdc-dma-dev.45091"; + asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>, + <&bolero>;; + asoc-codec-names = "msm-stub-codec.1", + "msm-ext-disp-audio-codec-rx", + "bolero_codec"; + qcom,wsa-max-devs = <2>; + qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, + <&wsa881x_0213>, <&wsa881x_0214>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", + "SpkrLeft", "SpkrRight"; + qcom,cdc-dmic-gpios = <&cdc_dmic12_gpios>, <&cdc_dmic34_gpios>, + <&cdc_dmic56_gpios>, <&cdc_dmic78_gpios>; + }; diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt index 4cfe918dd17b57b6cd56310eff959491cf93a28f..28556a1fb2cf42f649db97e7b1c9b1e28612b630 100644 --- a/Documentation/devicetree/bindings/sound/wcd_codec.txt +++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt @@ -4,6 +4,7 @@ Required properties: - compatible : "qcom,pahu-slim-pgd" or "qcom,pahu-i2c" for pahu Codec "qcom,tavil-slim-pgd" or "qcom,tavil-i2c-pgd" for Tavil codec + "qcom,tasha-slim-pgd" or "qcom,tasha-i2c-pgd" for Tasha Codec - elemental-addr: codec slimbus slave PGD enumeration address.(48 bits) - qcom,cdc-reset-gpio: gpio used for codec SOC reset. @@ -439,3 +440,45 @@ msm_sdw_codec: qcom,msm-sdw-codec@152c1000 { }; }; }; + +WSA macro in Bolero codec + +Required properties: + - compatible = "qcom,wsa-macro"; + - reg: Specifies the WSA macro base address for Bolero + soundwire core registers. + - clock-names : clock names defined for WSA macro + - clocks : clock handles defined for WSA macro + +Example: + +&bolero { + wsa_macro: wsa-macro { + compatible = "qcom,wsa-macro"; + reg = <0x0C2C0000 0x0>; + clock-names = "wsa_core_clk", "wsa_npl_clk"; + clocks = <&clock_audio_wsa_1 AUDIO_LPASS_MCLK>, + <&clock_audio_wsa_2 AUDIO_LPASS_MCLK>; + qcom,wsa-swr-gpios = &wsa_swr_gpios; + }; +}; + +VA macro in bolero codec + +Required properties: + - compatible = "qcom,va-macro"; + - reg: Specifies the VA macro base address for Bolero + soundwire core registers. + - clock-names : clock names defined for WSA macro + - clocks : clock handles defined for WSA macro + +Example: + +&bolero { + va_macro: va-macro { + compatible = "qcom,va-macro"; + reg = <0x0C490000 0x0>; + clock-names = "va_core_clk"; + clocks = <&clock_audio_va AUDIO_LPASS_MCLK>; + }; +}; diff --git a/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt b/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba92e093b1343650f037e3ab64c7e70a1b6a6fe1 --- /dev/null +++ b/Documentation/devicetree/bindings/soundwire/swr-mstr-ctrl.txt @@ -0,0 +1,49 @@ +Qualcomm Technologies, Inc. SoundWire Master controller + +* swr_master + +Required properties: + +- compatible : should be "qcom,swr-mstr" +- #address-cells: must be 2 +- #size-cells: must be 0 +- qcom,swr-num-ports: number of ports in the master. +- qcom,swr-port-mapping: contains codec port_type and + corresponding ch-mask entries for possible port types + of the master port. + +* wsa881x + +Required properties: + +- compatible : should be "qcom,wsa881x" +- reg : Unique device ID(48 bits) of Soundwire slave device node. + In the below example, wsa881x is soundwire slave device for + which the swr-devid is <0x0 0x032000> where 0x03 represents + device Unique_ID, 0x20 represents Part_Id1 and 0x00 + represents part_Id2. + + Example: + +swr0: swr_master { + compatible = "qcom,swr-mstr"; + #address-cells = <2>; + #size-cells = <0>; + + qcom,swr-num-ports = <8>; + qcom,swr-port-mapping = <1 SPKR_L 0x1>, + <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, + <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, + <6 SPKR_R_BOOST 0x3>, <7 SPKR_L_VI 0x3>, + <8 SPKR_R_VI 0x3>; + + wsa881x@32000 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x032000>; + }; + + wsa881x@42000 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x042000>; + }; + }; diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt index 17c3c40a3146d74ff8f73fe46a1ba6b508f54cdf..6ba6e0cf7777e8bf67b71f94b8101bdea8826388 100644 --- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt +++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt @@ -36,6 +36,8 @@ Optional properties: - vdda-pll-max-microamp : specifies max. load that can be drawn from pll supply - vddp-ref-clk-supply : phandle to UFS device ref_clk pad power supply - vddp-ref-clk-max-microamp : specifies max. load that can be drawn from this supply +- vddp-ref-clk-min-uV : specifies min voltage that can be set for reference clock supply +- vddp-ref-clk-max-uV : specifies max voltage that can be set for reference clock supply - qcom,disable-lpm : disable various LPM mechanisms in UFS for platform compatibility (limit link to PWM Gear-1, 1-lane slow mode; disable hibernate, and avoid suspend/resume) diff --git a/Documentation/devicetree/bindings/usb/onsemi_redriver.txt b/Documentation/devicetree/bindings/usb/onsemi_redriver.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b3f1c0e130c71112dd8e23f81d430de24b05270 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/onsemi_redriver.txt @@ -0,0 +1,29 @@ +ON Semiconductor USB Type-C and display port 10Gbps Linear Re-Driver + +Required properties: +- compatible: Must be "onnn,redriver". +- reg: I2C address on the selected bus. +- extcon: phandles to external connector devices. The first phandle + should point to the external connector which provides + both "USB" cable events and "USB-HOST" cable events. + An optional second phandle may be specified for DP + lane events. + +Optional properties: +- eq: Equalization value of re-driver channel A/B/C/D, 8 bit. +- flat-gain: Flat gain control value of re-driver channel A/B/C/D, 8 bit. +- output-comp: Output compression value of re-driver channel A/B/C/D, + 8 bit. +- loss-match: Loss profile matching control value of re-driver channel + A/B/C/D, 8 bit. + +Example: + redriver@19 { + compatible = "onnn,redriver"; + reg = <0x19>; + extcon = <&pm8150b_pdphy>, <&pm8150b_pdphy>; + eq = /bits/ 8 <0x5 0x4 0x4 0x5>; + flat-gain = /bits/ 8 <0x3 0x1 0x1 0x3>; + output-comp = /bits/ 8 <0x2 0x2 0x2 0x2>; + loss-match = /bits/ 8 <0x0 0x3 0x3 0x0>; + }; diff --git a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt index 95a5977fe66b3b6c1618ed49ddc3e103c49467ee..3aeb49eda1e618596cd12f2168be15181ecba084 100644 --- a/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/qcom,msm-phy.txt @@ -138,3 +138,93 @@ Example: "phy_phy_reset"; }; + +QUSB2 High-Speed PHY + +Required properties: + - compatible: Should be "qcom,qusb2phy" or "qcom,qusb2phy-v2" + - reg: Address and length of the QUSB2 PHY register set + - reg-names: Should be "qusb_phy_base". + - -supply: phandle to the regulator device tree node + Required supplies are: + "vdd" : vdd supply for digital circuit operation + "vdda18" : 1.8v high-voltage analog supply + "vdda33" : 3.3v high-voltage analog supply + - clocks: a list of phandles to the PHY clocks. Use as per + Documentation/devicetree/bindings/clock/clock-bindings.txt + - clock-names: Names of the clocks in 1-1 correspondence with the "clocks" + property. "ref_clk_src" is a mandatory clock. + - qcom,vdd-voltage-level: This property must be a list of three integer + values (no, min, max) where each value represents either a voltage in + microvolts or a value corresponding to voltage corner + - phy_type: Should be one of "ulpi" or "utmi". ChipIdea core uses "ulpi" mode. + - resets: reset specifier pair consists of phandle for the reset controller + and reset lines used by this controller. + - reset-names: reset signal name strings sorted in the same order as the resets + property. + - qcom,qusb-phy-reg-offset: Provides important phy register offsets in an order defined in phy driver. + +Optional properties: + - reg-names: Additional registers corresponding with the following: + "efuse_addr": EFUSE address to read and update analog tune parameter. + "emu_phy_base" : phy base address used for programming emulation target phy. + "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset. + "tcsr_clamp_dig_n" : To enable/disable digital clamp to the phy. When + de-asserted, it will prevent random leakage from qusb2 phy resulting from + out of sequence turn on/off of 1p8, 3p3 and DVDD regulators. + "refgen_north_bg_reg" : address used to read REFGEN status for overriding QUSB PHY register. + - clocks: a list of phandles to the PHY clocks. Use as per + Documentation/devicetree/bindings/clock/clock-bindings.txt + - clock-names: Names of the clocks in 1-1 correspondence with the "clocks" + property. "cfg_ahb_clk" and "ref_clk" are optional clocks. + - qcom,qusb-phy-init-seq: QUSB PHY initialization sequence with value,reg pair. + - qcom,qusb-phy-host-init-seq: QUSB PHY initialization sequence for host mode + with value,reg pair. + - qcom,emu-init-seq : emulation initialization sequence with value,reg pair. + - qcom,phy-pll-reset-seq : emulation PLL reset sequence with value,reg pair. + - qcom,emu-dcm-reset-seq : emulation DCM reset sequence with value,reg pair. + - qcom,efuse-bit-pos: start bit position within EFUSE register + - qcom,efuse-num-bits: Number of bits to read from EFUSE register + - qcom,emulation: Indicates that we are running on emulation platform. + - qcom,hold-reset: Indicates that hold QUSB PHY into reset state. + - qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided. + - qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0 + - pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active" + state when attached in host mode and "suspend" state when detached. + - qcom,tune2-efuse-correction: The value to be adjusted from fused value for + improved rise/fall times. + - qcom,host-chirp-erratum: Indicates host chirp fix is required. + - qcom,override-bias-ctrl2: Indicates override is done from driver for + BIAS_CTRL2 register. + - nvmem-cells: specifies the handle to represent the SoC revision. + usually it is defined by qfprom device node. + - nvmem-cell-names: specifies the given nvmem cell name as defined in + qfprom node. + +Example: + qusb_phy: qusb@f9b39000 { + compatible = "qcom,qusb2phy"; + reg = <0x00079000 0x7000>; + reg-names = "qusb_phy_base"; + vdd-supply = <&pm8994_s2_corner>; + vdda18-supply = <&pm8994_l6>; + vdda33-supply = <&pm8994_l24>; + qcom,vdd-voltage-level = <1 5 7>; + qcom,qusb-phy-reg-offset = + <0x240 /* QUSB2PHY_PORT_TUNE1 */ + 0x1a0 /* QUSB2PHY_PLL_COMMON_STATUS_ONE */ + 0x210 /* QUSB2PHY_PWR_CTRL1 */ + 0x230 /* QUSB2PHY_INTR_CTRL */ + 0x0a8 /* QUSB2PHY_PLL_CORE_INPUT_OVERRIDE */ + 0x254 /* QUSB2PHY_TEST1 */ + 0x198>; /* QUSB2PHY_PLL_BIAS_CONTROL_2 */ + qcom,efuse-bit-pos = <21>; + qcom,efuse-num-bits = <3>; + + clocks = <&clock_rpm clk_ln_bb_clk>, + <&clock_gcc clk_gcc_rx2_usb1_clkref_clk>, + <&clock_gcc clk_gcc_usb_phy_cfg_ahb2phy_clk>; + clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk"; + resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>; + reset-names = "phy_reset"; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index b23fd7e585ab1be86de31f4ae366ca287ef690c6..629d03627d9ef606f7c88c0d7967672d8b31ee7e 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -173,6 +173,7 @@ karo Ka-Ro electronics GmbH keithkoep Keith & Koep GmbH keymile Keymile GmbH khadas Khadas +kiebackpeter Kieback & Peter GmbH kinetic Kinetic Technologies kingnovel Kingnovel Technology Co., Ltd. kosagi Sutajio Ko-Usagi PTE Ltd. diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 99ca8e30a4cad4bd93a641e0154f7dccb63247e0..112e96ee1ab33113f6d79ba863167b521de0e33f 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -139,6 +139,7 @@ Table 1-1: Process specific entries in /proc maps Memory maps to executables and library files (2.4) mem Memory held by this process root Link to the root directory of this process + reclaim Reclaim pages in this process stat Process status statm Process memory status information status Process status in human readable form @@ -538,6 +539,25 @@ current value: Any other value written to /proc/PID/clear_refs will have no effect. +The file /proc/PID/reclaim is used to reclaim pages in this process. +To reclaim file-backed pages, + > echo file > /proc/PID/reclaim + +To reclaim anonymous pages, + > echo anon > /proc/PID/reclaim + +To reclaim all pages, + > echo all > /proc/PID/reclaim + +Also, you can specify address range of process so part of address space +will be reclaimed. The format is following as + > echo addr size-byte > /proc/PID/reclaim + +NOTE: addr should be page-aligned. + +Below is example which try to reclaim 2M from 0x100000. + > echo 0x100000 2M > /proc/PID/reclaim + The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags using /proc/kpageflags and number of times a page is mapped using /proc/kpagecount. For detailed explanation, see Documentation/vm/pagemap.txt. diff --git a/Documentation/input/touchscreen/himax.txt b/Documentation/input/touchscreen/himax.txt new file mode 100644 index 0000000000000000000000000000000000000000..37ddc6749b2499d30de12460055e9cb1f74fe2ea --- /dev/null +++ b/Documentation/input/touchscreen/himax.txt @@ -0,0 +1,28 @@ +Himax touch controller + +The Himax touch controller is connected to host processor +via i2c. The controller generates interrupts when the +user touches the panel. The host controller is expected +to read the touch coordinates over i2c and pass the coordinates +to the rest of the system. + +Required properties: + + - compatible : Should be "himax,hxcommon" + - reg : i2c slave address of the device. + - interrupt-parent : parent of interrupt. + - himax,irq-gpio : irq gpio. + - himax,reset-gpio : reset gpio. + - vdd-supply : Power supply needed to power up the device. + - avdd-supply : Power source required to power up i2c bus. + - himax,panel-coords : panel coordinates for the chip in pixels. + It is a four tuple consisting of min x, + min y, max x and max y values. + - himax,display-coords : display coordinates in pixels. It is a four + tuple consisting of min x, min y, max x and + max y values + +Optional properties: + - himax,3v3-gpio : gpio acting as 3.3 v supply. + - himax,report_type : Multi-touch protocol type. Default 0. + 0 for protocol A, 1 for protocol B. diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt index cfc66ea72329ba4d953da900bec39e3c0c19d70a..a365656e48737fca354656299db4c03fb2284aa0 100644 --- a/Documentation/networking/netdev-FAQ.txt +++ b/Documentation/networking/netdev-FAQ.txt @@ -176,6 +176,15 @@ A: No. See above answer. In short, if you think it really belongs in dash marker line as described in Documentation/process/submitting-patches.rst to temporarily embed that information into the patch that you send. +Q: Are all networking bug fixes backported to all stable releases? + +A: Due to capacity, Dave could only take care of the backports for the last + 2 stable releases. For earlier stable releases, each stable branch maintainer + is supposed to take care of them. If you find any patch is missing from an + earlier stable branch, please notify stable@vger.kernel.org with either a + commit ID or a formal patch backported, and CC Dave and other relevant + networking developers. + Q: Someone said that the comment style and coding convention is different for the networking content. Is this true? diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt index 361789df51ecf58d5083436792588d2f12faa7f2..d1aecf53badb6aac0dc99b424149fc0793131bf1 100644 --- a/Documentation/printk-formats.txt +++ b/Documentation/printk-formats.txt @@ -397,11 +397,10 @@ struct clk %pC pll1 %pCn pll1 - %pCr 1560000000 For printing struct clk structures. ``%pC`` and ``%pCn`` print the name (Common Clock Framework) or address (legacy clock framework) of the -structure; ``%pCr`` prints the current clock rate. +structure. Passed by reference. diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index b60e950d3a6a07509fe6015c9235cb3270bd5bb0..3213305f746a329d01e42683ad0eb6789d5124c4 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -65,6 +65,7 @@ Currently, these files are in /proc/sys/vm: - vfs_cache_pressure - watermark_scale_factor - zone_reclaim_mode +- want_old_faultaround_pte ============================================================== @@ -932,4 +933,25 @@ Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. +============================================================= + +want_old_faultaround_pte: + +By default faultaround code produces young pte. When want_old_faultaround_pte is +set to 1, faultaround produces old ptes. + +During sparse file access faultaround gets more pages mapped and when all of +them are young (default), under memory pressure, this makes vmscan swap out anon +pages instead, or to drop other page cache pages which otherwise stay resident. +Setting want_old_faultaround_pte to 1 avoids this. + +Making the faultaround ptes old can result in performance regression on some +architectures. This is due to cycles spent in micro-faults which would take page +walk to set young bit in the pte. One such known test that shows a regression on +x86 is unixbench shell8. Set want_old_faultaround_pte to 1 on architectures +which does not show this regression or if the workload shows overall performance +benefit with old faultaround ptes. + +The default value is 0. + ============ End of Document ================================= diff --git a/Makefile b/Makefile index d946b1ed4ed738d45a825b50a79c304aa80e2ff1..459e099cbfd00772add3d9c7ee1f956cef800533 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 48 +SUBLEVEL = 53 EXTRAVERSION = NAME = Petit Gorille diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a4a62a217ed3613edecda83f32ff3d5df17307cb..9756f8ab7867f9ed0e1fb3cc41247eb0264448fc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1902,6 +1902,26 @@ config BUILD_ARM_APPENDED_DTB_IMAGE Enabling this option will cause a concatenated zImage and list of DTBs to be built by default (instead of a standalone zImage.) The image will built in arch/arm/boot/zImage-dtb +choice + prompt "Appended DTB Kernel Image name" + depends on BUILD_ARM_APPENDED_DTB_IMAGE + default ZIMG_DTB + help + Enabling this option will cause a specific kernel image Image or + Image.gz to be used for final image creation. + The image will built in arch/arm/boot/IMAGE-NAME-dtb + + config ZIMG_DTB + bool "zImage-dtb" + config IMG_DTB + bool "Image-dtb" +endchoice + +config BUILD_ARM_APPENDED_KERNEL_IMAGE_NAME + string + depends on BUILD_ARM_APPENDED_DTB_IMAGE + default "zImage-dtb" if ZIMG_DTB + default "Image-dtb" if IMG_DTB config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES string "Default dtb names" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 45baf1b38f2937e152df4ecc34d332d47b19f244..3d806f9026100a8f76c98a56d57fdec872c7411e 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -307,7 +307,7 @@ boot := arch/arm/boot ifeq ($(CONFIG_XIP_KERNEL),y) KBUILD_IMAGE := $(boot)/xipImage else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y) -KBUILD_IMAGE := $(boot)/zImage-dtb +KBUILD_IMAGE := $(boot)/$(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_KERNEL_IMAGE_NAME)) else KBUILD_IMAGE := $(boot)/zImage endif @@ -361,6 +361,10 @@ endif zImage-dtb: vmlinux scripts dtbs $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@ +Image-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) DTSSUBDIR=$(DTSSUBDIR) $(boot)/$@ + + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index ce1c5ff746e7d2930fc0ec2005b6e65b35bd9a91..c8ed363b6a4424cb7b8cbd08be06ecbab7b3e99c 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore @@ -3,3 +3,5 @@ zImage xipImage bootpImage uImage +Image-dtb-hdr +Image-dtb diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index 4175dfe4322531831877fdfaf57908a38776de34..2fa123e315cc481b00f65327b37be81c33d4db5e 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -59,6 +59,14 @@ $(obj)/xipImage: FORCE $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) +$(obj)/Image-dtb-hdr: $(obj)/Image FORCE + echo -n 'UNCOMPRESSED_IMG' > $@ && \ + $(call size_append, $(filter-out FORCE,$^)) >> $@ + +$(obj)/Image-dtb: $(obj)/Image-dtb-hdr $(obj)/Image $(DTB_OBJS) FORCE + $(call if_changed,cat) + @echo ' Kernel: $@ is ready' + $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 5b9e2d4bc1b3fb06c85a40132170b0c2dad4fe30..03c122779783d36ee9c86936933b20ffee954362 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -29,19 +29,19 @@ #if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c0, c5, 0 .endm #elif defined(CONFIG_CPU_XSCALE) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c8, c0, 0 .endm #else - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 .endm .macro writeb, ch, rb mcr p14, 0, \ch, c1, c0, 0 @@ -57,7 +57,7 @@ .endm #if defined(CONFIG_ARCH_SA1100) - .macro loadsp, rb, tmp + .macro loadsp, rb, tmp1, tmp2 mov \rb, #0x80000000 @ physical base address #ifdef CONFIG_DEBUG_LL_SER3 add \rb, \rb, #0x00050000 @ Ser3 @@ -66,8 +66,8 @@ #endif .endm #else - .macro loadsp, rb, tmp - addruart \rb, \tmp + .macro loadsp, rb, tmp1, tmp2 + addruart \rb, \tmp1, \tmp2 .endm #endif #endif @@ -559,8 +559,6 @@ not_relocated: mov r0, #0 bl decompress_kernel bl cache_clean_flush bl cache_off - mov r1, r7 @ restore architecture number - mov r2, r8 @ restore atags pointer #ifdef CONFIG_ARM_VIRT_EXT mrs r0, spsr @ Get saved CPU boot mode @@ -1297,7 +1295,7 @@ phex: adr r3, phexbuf b 1b @ puts corrupts {r0, r1, r2, r3} -puts: loadsp r3, r1 +puts: loadsp r3, r2, r1 1: ldrb r2, [r0], #1 teq r2, #0 moveq pc, lr @@ -1314,8 +1312,8 @@ puts: loadsp r3, r1 @ putc corrupts {r0, r1, r2, r3} putc: mov r2, r0 + loadsp r3, r1, r0 mov r0, #0 - loadsp r3, r1 b 2b @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} @@ -1365,6 +1363,8 @@ __hyp_reentry_vectors: __enter_kernel: mov r0, #0 @ must be 0 + mov r1, r7 @ restore architecture number + mov r2, r8 @ restore atags pointer ARM( mov pc, r4 ) @ call kernel M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class THUMB( bx r4 ) @ entry point is always ARM for A/R classes diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 7c957ea06c66ce6b603277e452584dc8df05e5ef..9a9902974b1b86d16c0a9e8280c0fa6da0de8222 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -69,7 +69,7 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index af68ef7b0caadee75e8d8fc9bffee1153ed355b9..8a15f7193c829bcea8cda3553d0622a4c9222406 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -34,8 +34,6 @@ pmx_core: pinmux@14120 { compatible = "pinctrl-single"; reg = <0x14120 0x50>; - #address-cells = <1>; - #size-cells = <0>; #pinctrl-cells = <2>; pinctrl-single,bit-per-mux; pinctrl-single,register-width = <32>; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index e262fa9ef3346207940f62af3aa626738a637491..c335b923753a3a6ebb9220c06c106409566e6164 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -26,7 +26,7 @@ gpio = <&gpio1 3 0>; /* gpio_3 */ startup-delay-us = <70000>; enable-active-high; - vin-supply = <&vmmc2>; + vin-supply = <&vaux3>; }; /* HS USB Host PHY on PORT 1 */ @@ -108,6 +108,7 @@ twl_audio: audio { compatible = "ti,twl4030-audio"; codec { + ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>; }; }; }; @@ -221,6 +222,7 @@ pinctrl-single,pins = < OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */ >; }; }; @@ -235,7 +237,7 @@ }; wl127x_gpio: pinmux_wl127x_gpio_pin { pinctrl-single,pins = < - OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ + OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */ OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */ >; }; @@ -270,6 +272,11 @@ #include "twl4030.dtsi" #include "twl4030_omap3.dtsi" +&vaux3 { + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; +}; + &twl { twl_power: power { compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle"; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index 36983a7d7cfd4915a5f22b2e5d0c6e28f77125c8..1853573235724419002fd7617ddb9bf0e9a20fc3 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -22,11 +22,12 @@ #include #include #include -#include "skeleton64.dtsi" / { compatible = "mediatek,mt7623"; interrupt-parent = <&sysirq>; + #address-cells = <2>; + #size-cells = <2>; cpu_opp_table: opp_table { compatible = "operating-points-v2"; diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts index 7de704575aeeb80bc912cf7e0760e5cb1479a965..e96c0ca97972098615d065f9155c1f9e0c3da685 100644 --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts @@ -100,6 +100,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi index 256c5fd947bf4344bc6fbd42156ca523a52c3489..43c9d7ca23a0d86a4db1ba55d8e963eb9a256d7b 100644 --- a/arch/arm/boot/dts/mt7623n-rfb.dtsi +++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi @@ -47,6 +47,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 8d9f42a422cbe3c630bc4bca70ebb9a155bcb286..10d2fa183a9ff417f5165a2d8808a991cf96fd22 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -744,13 +744,13 @@ nand0: nand@ff900000 { #address-cells = <0x1>; #size-cells = <0x1>; - compatible = "denali,denali-nand-dt"; + compatible = "altr,socfpga-denali-nand"; reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; interrupts = <0x0 0x90 0x4>; dma-mask = <0xffffffff>; - clocks = <&nand_clk>; + clocks = <&nand_x_clk>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index bead79e4b2aa2b624b8f7d21cef4751d6536b724..791ca15c799eba98850cbc3d4b96be7a509c422f 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -593,8 +593,7 @@ #size-cells = <0>; reg = <0xffda5000 0x100>; interrupts = <0 102 4>; - num-chipselect = <4>; - bus-num = <0>; + num-cs = <4>; /*32bit_access;*/ tx-dma-channel = <&pdma 16>; rx-dma-channel = <&pdma 17>; @@ -633,7 +632,7 @@ nand: nand@ffb90000 { #address-cells = <1>; #size-cells = <1>; - compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand"; + compatible = "altr,socfpga-denali-nand"; reg = <0xffb90000 0x72000>, <0xffb80000 0x10000>; reg-names = "nand_data", "denali_reg"; diff --git a/arch/arm/configs/qcs405_defconfig b/arch/arm/configs/qcs405_defconfig deleted file mode 100644 index b6ee98ca78ef9be69f8e98c3e72485fb46704f02..0000000000000000000000000000000000000000 --- a/arch/arm/configs/qcs405_defconfig +++ /dev/null @@ -1,488 +0,0 @@ -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_DEBUG=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_QCS405=y -# CONFIG_VDSO is not set -CONFIG_SMP=y -CONFIG_ARM_PSCI=y -CONFIG_PREEMPT=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_CMA_DEBUGFS=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y -CONFIG_CPU_IDLE=y -CONFIG_VFP=y -CONFIG_NEON=y -CONFIG_KERNEL_MODE_NEON=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM_DEBUG=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_BT=y -CONFIG_BT_RFCOMM=y -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=y -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=y -CONFIG_CFG80211=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_MSM_QPIC_NAND=y -CONFIG_MTD_NAND=y -CONFIG_MTD_UBI=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_UID_SYS_STATS=y -CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_KS8851=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_USB_NET_SMSC75XX=y -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_EVBUG=m -CONFIG_INPUT_KEYRESET=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_XPAD=y -CONFIG_INPUT_TABLET=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ATMEL_MXT=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_KEYCHORD=y -CONFIG_INPUT_UINPUT=y -CONFIG_INPUT_GPIO=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_I2C_CHARDEV=y -CONFIG_SPI=y -CONFIG_SPI_DEBUG=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_PINCTRL_QCS405=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_GPIOLIB=y -CONFIG_POWER_SUPPLY=y -CONFIG_SMB1351_USB_CHARGER=y -CONFIG_THERMAL=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_CPR=y -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_RPM_SMD=y -CONFIG_REGULATOR_SPM=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SOC_CAMERA=y -CONFIG_SOC_CAMERA_PLATFORM=y -CONFIG_FB=y -CONFIG_FB_MSM=y -CONFIG_FB_MSM_MDSS=y -CONFIG_FB_MSM_MDSS_WRITEBACK=y -CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y -CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_SOC=y -CONFIG_HIDRAW=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_STORAGE_DATAFAB=y -CONFIG_USB_STORAGE_FREECOM=y -CONFIG_USB_STORAGE_ISD200=y -CONFIG_USB_STORAGE_USBAT=y -CONFIG_USB_STORAGE_SDDR09=y -CONFIG_USB_STORAGE_SDDR55=y -CONFIG_USB_STORAGE_JUMPSHOT=y -CONFIG_USB_STORAGE_ALAUDA=y -CONFIG_USB_STORAGE_KARMA=y -CONFIG_USB_STORAGE_CYPRESS_ATACB=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_DEBUG_FILES=y -CONFIG_USB_GADGET_DEBUG_FS=y -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m -CONFIG_MMC_RING_BUFFER=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_COMMON_CLK_QCOM=y -CONFIG_QCOM_CLK_SMD_RPM=y -CONFIG_MDM_GCC_QCS405=y -CONFIG_MDM_DEBUGCC_QCS405=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_MAILBOX=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_QCOM_IOMMU=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_RPM=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_SMD=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_MSM_SPM=y -CONFIG_MSM_L2_SPM=y -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_DCC_V2=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -# CONFIG_MSM_JTAGV8 is not set -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QTI_MPM=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y -CONFIG_PAGE_OWNER=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_OBJECTS=y -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_ATOMIC_SLEEP=y -CONFIG_DEBUG_LIST=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_FAULT_INJECTION_DEBUG_FS=y -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_IPC_LOGGING=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_LSM_MMAP_MIN_ADDR=4096 -CONFIG_HARDENED_USERCOPY=y -CONFIG_SECURITY_SELINUX=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm/configs/qcs405_defconfig b/arch/arm/configs/qcs405_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..372fd3e54d888c039678b077724b27f28e68fa29 --- /dev/null +++ b/arch/arm/configs/qcs405_defconfig @@ -0,0 +1 @@ +vendor/qcs405_defconfig \ No newline at end of file diff --git a/arch/arm/configs/sdxprairie-perf_defconfig b/arch/arm/configs/sdxprairie-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..771ec5304d7df0e57cb9309b2b6f64c0deebd528 --- /dev/null +++ b/arch/arm/configs/sdxprairie-perf_defconfig @@ -0,0 +1 @@ +vendor/sdxprairie-perf_defconfig \ No newline at end of file diff --git a/arch/arm/configs/sdxprairie_defconfig b/arch/arm/configs/sdxprairie_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..744d827b789a1fc949ecdefc472eac9a8cf802e6 --- /dev/null +++ b/arch/arm/configs/sdxprairie_defconfig @@ -0,0 +1 @@ +vendor/sdxprairie_defconfig \ No newline at end of file diff --git a/arch/arm/configs/vendor/qcs405_defconfig b/arch/arm/configs/vendor/qcs405_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..27ab2f3f794e8cb0b4baa5c932d13582b9b7556e --- /dev/null +++ b/arch/arm/configs/vendor/qcs405_defconfig @@ -0,0 +1,519 @@ +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QCS405=y +# CONFIG_VDSO is not set +CONFIG_SMP=y +CONFIG_ARM_PSCI=y +CONFIG_PREEMPT=y +CONFIG_HIGHMEM=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_SECCOMP=y +CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_KERNEL_MODE_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_RFKILL=y +CONFIG_NTAG_NQ=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y +CONFIG_MTD_NAND=y +CONFIG_MTD_UBI=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_UID_SYS_STATS=y +CONFIG_QPNP_MISC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_KS8851=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y +CONFIG_USB_NET_SMSC75XX=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=m +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL_MXT=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_PINCTRL_QCS405=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_SUPPLY=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_THERMAL=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=y +CONFIG_SOC_CAMERA_PLATFORM=y +CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_MSM_SNPS_FEMTO_PHY=y +CONFIG_USB_MSM_SSPHY=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=m +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQ_HCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MDM_GCC_QCS405=y +CONFIG_MDM_DEBUGCC_QCS405=y +CONFIG_CLOCK_CPU_QCS405=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_IOMMU=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_SMD=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_SCM=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +# CONFIG_MSM_JTAGV8 is not set +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QTI_MPM=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_EVENT=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm/configs/vendor/sdxprairie-perf_defconfig b/arch/arm/configs/vendor/sdxprairie-perf_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..c4f409d24d54a862d666a8cfc8b617aa97f7ea2e --- /dev/null +++ b/arch/arm/configs/vendor/sdxprairie-perf_defconfig @@ -0,0 +1,279 @@ +CONFIG_LOCALVERSION="-perf" +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +# CONFIG_FAIR_GROUP_SCHED is not set +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SDXPRAIRIE=y +# CONFIG_VDSO is not set +CONFIG_PREEMPT=y +CONFIG_CMA=y +CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V2=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_IP_SET=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_PRIO=y +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=y +CONFIG_BT_HCIUART=y +CONFIG_BT_HCIUART_H4=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_CFG80211_WEXT=y +CONFIG_RFKILL=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=12 +CONFIG_MTD=y +CONFIG_MTD_TESTS=m +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_UBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +CONFIG_KS8851=y +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_USB_USBNET=y +CONFIG_USB_NET_SMSC75XX=y +CONFIG_USB_NET_SMSC95XX=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=m +CONFIG_SERIO_LIBPS2=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_MSM=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=m +CONFIG_PINCTRL_SDXPRAIRIE=y +CONFIG_GPIOLIB=y +CONFIG_POWER_RESET=y +CONFIG_POWER_SUPPLY=y +CONFIG_THERMAL=y +CONFIG_REGULATOR=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_MSM=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DEBUG=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_RTC_CLASS=y +CONFIG_DMADEVICES=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_ECM_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_SMEM=y +CONFIG_PWM=y +CONFIG_ANDROID=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_IPC_LOGGING=y +# CONFIG_FTRACE is not set diff --git a/arch/arm/configs/vendor/sdxprairie_defconfig b/arch/arm/configs/vendor/sdxprairie_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..090d5983b41e1d9c13abb6ee0f9e30770542b8f7 --- /dev/null +++ b/arch/arm/configs/vendor/sdxprairie_defconfig @@ -0,0 +1,288 @@ +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +# CONFIG_FAIR_GROUP_SCHED is not set +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SDXPRAIRIE=y +# CONFIG_VDSO is not set +CONFIG_PREEMPT=y +CONFIG_CMA=y +CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V2=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_CT_NETLINK_TIMEOUT=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_IP_SET=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE_EBT_T_FILTER=y +CONFIG_BRIDGE_EBT_T_NAT=y +CONFIG_BRIDGE_EBT_ARP=y +CONFIG_BRIDGE_EBT_IP=y +CONFIG_BRIDGE_EBT_IP6=y +CONFIG_BRIDGE_EBT_ARPREPLY=y +CONFIG_BRIDGE_EBT_DNAT=y +CONFIG_BRIDGE_EBT_SNAT=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_PRIO=y +CONFIG_CFG80211=y +CONFIG_RFKILL=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=12 +CONFIG_MTD=y +CONFIG_MTD_TESTS=m +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_UBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +CONFIG_KS8851=y +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_PPP=y +CONFIG_PPP_ASYNC=y +CONFIG_USB_USBNET=y +CONFIG_USB_NET_SMSC75XX=y +CONFIG_USB_NET_SMSC95XX=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=m +CONFIG_SERIO_LIBPS2=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_HVC_DCC=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=m +CONFIG_SLIMBUS=y +CONFIG_PINCTRL_SDXPRAIRIE=y +CONFIG_GPIOLIB=y +CONFIG_POWER_RESET=y +CONFIG_POWER_SUPPLY=y +CONFIG_THERMAL=y +CONFIG_REGULATOR=y +CONFIG_FB=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DEBUG=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_RTC_CLASS=y +CONFIG_DMADEVICES=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_ECM_IPA=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_SMEM=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_PWM=y +CONFIG_ANDROID=y +CONFIG_STM=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +# CONFIG_FTRACE is not set +CONFIG_DEBUG_USER=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_SHA256=y +CONFIG_XZ_DEC=y diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index f7ba4389f0fa73a3a2f2b89b2b61ea2c51718653..072773152928244b268592e80370fb797aaa90b9 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -178,10 +178,26 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +extern void __dma_map_area(const void *addr, size_t size, int dir); +extern void __dma_unmap_area(const void *addr, size_t size, int dir); extern void dmac_inv_range(const void *start, const void *end); extern void dmac_clean_range(const void *start, const void *end); -extern void dmac_flush_range(const void *, const void *); +extern void dmac_flush_range(const void *start, const void *end); +static inline void __dma_inv_area(const void *start, size_t len) +{ + dmac_inv_range(start, start + len); +} + +static inline void __dma_clean_area(const void *start, size_t len) +{ + dmac_clean_range(start, start + len); +} + +static inline void __dma_flush_area(const void *start, size_t len) +{ + dmac_flush_range(start, start + len); +} #endif /* diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 6821f1249300d99ee165d53053880ade369b0d78..f567f3da27fd25be05f46105151ac0edd2982ed3 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -8,13 +8,15 @@ #include #include #include +#include #define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; - + bool init; + const struct dma_map_ops *ops; unsigned long **bitmaps; /* array of bitmaps */ unsigned int nr_bitmaps; /* nr of elements in array */ unsigned int extensions; @@ -24,6 +26,8 @@ struct dma_iommu_mapping { spinlock_t lock; struct kref kref; + + struct dma_fast_smmu_mapping *fast; }; struct dma_iommu_mapping * diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index d14f31047a5c75442c69a238b21b4e714c15ddb2..f7c75dcae829a0087da799a4b97414205c578169 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -157,6 +157,11 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } #define dmac_flush_range __glue(_CACHE,_dma_flush_range) #define dmac_inv_range __glue(_CACHE, _dma_inv_range) #define dmac_clean_range __glue(_CACHE, _dma_clean_range) +#define dmac_map_area __glue(_CACHE, _dma_map_area) +#define dmac_unmap_area __glue(_CACHE, _dma_unmap_area) + +#define __dma_map_area dmac_map_area +#define __dma_unmap_area dmac_unmap_area #endif #endif diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 3b73fdcf3627af25207be6df16be69ac7a95ea0f..8de1100d1067453e39ed1858b9f140e4bae61773 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -77,7 +77,7 @@ extern int kgdb_fault_expected; #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) +#define NUMREGBYTES (GDB_MAX_REGS << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 776757d1604ab3901996bb24bb02748e54c2aee7..f23454db246fe2e9315d8c9aaa8b18fd70571f4a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 +#define TIF_MM_RELEASED 21 /* task MM has been released */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 6b38d7a634c19ffd279f98ca8cc3a113484d1fd3..c153184319866a3334d9c6c951dfaaca514dfd9c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused) cpu_relax(); } +void crash_smp_send_stop(void) +{ + static int cpus_stopped; + unsigned long msecs; + + if (cpus_stopped) + return; + + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + cpus_stopped = 1; +} + static void machine_kexec_mask_interrupts(void) { unsigned int i; @@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void) void machine_crash_shutdown(struct pt_regs *regs) { - unsigned long msecs; - local_irq_disable(); - - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - smp_call_function(machine_crash_nonpanic_core, NULL, false); - msecs = 1000; /* Wait at most a second for the other cpus to stop */ - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { - mdelay(1); - msecs--; - } - if (atomic_read(&waiting_for_crash_ipi) > 0) - pr_warn("Non-crashing CPUs did not react to IPI\n"); + crash_smp_send_stop(); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index f673cd7a67665896f849cab26a394ce012e44e19..fb7c44cdadcb0bd45796ff03ee28a5388ee90699 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = { -1 }; +#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1) +#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 1 contains gpio range 32-63 */ - GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index cbde0030c092fe1213c4cddb6ffe19383d200e3c..25f12118c364839c7f8337852789a30c22655e8e 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = { -1 }; +#define DA850_MMCSD_CD_PIN GPIO_TO_PIN(4, 0) +#define DA850_MMCSD_WP_PIN GPIO_TO_PIN(4, 1) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { /* gpio chip 2 contains gpio range 64-95 */ - GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 62e7bc3018f07f0ba13dbc4910fca27601dca5f1..8e64685b1941968e2d2dc7fd0e9a20e8c43bc04e 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -168,11 +169,16 @@ static struct resource dm355evm_dm9000_rsrc[] = { }, }; +static struct dm9000_plat_data dm335evm_dm9000_platdata; + static struct platform_device dm355evm_dm9000 = { .name = "dm9000", .id = -1, .resource = dm355evm_dm9000_rsrc, .num_resources = ARRAY_SIZE(dm355evm_dm9000_rsrc), + .dev = { + .platform_data = &dm335evm_dm9000_platdata, + }, }; static struct tvp514x_platform_data tvp5146_pdata = { diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index cb0a41e835829edb14a25ddb17c28cccf9df85d2..4c458f71410135d33aa3aacd2a03ea0ca5ec5305 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -534,11 +534,12 @@ static struct vpif_display_config dm646x_vpif_display_config = { .set_clock = set_vpif_clock, .subdevinfo = dm646x_vpif_subdev, .subdev_count = ARRAY_SIZE(dm646x_vpif_subdev), + .i2c_adapter_id = 1, .chan_config[0] = { .outputs = dm6467_ch0_outputs, .output_count = ARRAY_SIZE(dm6467_ch0_outputs), }, - .card_name = "DM646x EVM", + .card_name = "DM646x EVM Video Display", }; /** @@ -676,6 +677,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { .setup_input_channel_mode = setup_vpif_input_channel_mode, .subdev_info = vpif_capture_sdev_info, .subdev_count = ARRAY_SIZE(vpif_capture_sdev_info), + .i2c_adapter_id = 1, .chan_config[0] = { .inputs = dm6467_ch0_inputs, .input_count = ARRAY_SIZE(dm6467_ch0_inputs), @@ -696,6 +698,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = { .fid_pol = 0, }, }, + .card_name = "DM646x EVM Video Capture", }; static void __init evm_init_video(void) diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 62eb7d6688900f244f546bd88da951e4ef934ac0..10a027253250edfa1093ad81571df138e0088705 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = { -1 }; +#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12) +#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13) + static struct gpiod_lookup_table mmc_gpios_table = { .dev_id = "da830-mmc.0", .table = { - /* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/ - GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW), - GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", + GPIO_ACTIVE_LOW), + GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", + GPIO_ACTIVE_LOW), }, }; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index da21353cac450d2f8dddaa75a7291670220ba17c..d869369ca2bccaf520309d4d0182687876555183 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -495,7 +495,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { [IRQ_DM646X_MCASP0TXINT] = 7, [IRQ_DM646X_MCASP0RXINT] = 7, [IRQ_DM646X_RESERVED_3] = 7, - [IRQ_DM646X_MCASP1TXINT] = 7, /* clockevent */ + [IRQ_DM646X_MCASP1TXINT] = 7, + [IRQ_TINT0_TINT12] = 7, /* clockevent */ [IRQ_TINT0_TINT34] = 7, /* clocksource */ [IRQ_TINT1_TINT12] = 7, /* DSP timer */ [IRQ_TINT1_TINT34] = 7, /* system tick */ diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index fe57e26926292fe3dccc1adaf7d6c5e8552fd055..abca83d22ff3f1d217d642ed31e6719354ae4a3f 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = { static struct pm_clk_notifier_block platform_domain_notifier = { .pm_domain = &keystone_pm_domain, + .con_ids = { NULL }, }; static const struct of_device_id of_keystone_table[] = { diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index 793a24a53c5261c20d1e9a39955615bf863fa9df..d7ca9e2b40d274c096333c7488011ac7dcc746db 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) irq_num = gpio_to_irq(gpio); fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; - while (irq_counter[gpio] < fiq_count) { - if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { - struct irq_data *d = irq_get_irq_data(irq_num); - - /* - * It looks like handle_edge_irq() that - * OMAP GPIO edge interrupts default to, - * expects interrupt already unmasked. - */ - if (irq_chip && irq_chip->irq_unmask) + if (irq_counter[gpio] < fiq_count && + gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { + struct irq_data *d = irq_get_irq_data(irq_num); + + /* + * handle_simple_irq() that OMAP GPIO edge + * interrupts default to since commit 80ac93c27441 + * requires interrupt already acked and unmasked. + */ + if (irq_chip) { + if (irq_chip->irq_ack) + irq_chip->irq_ack(d); + if (irq_chip->irq_unmask) irq_chip->irq_unmask(d); } - generic_handle_irq(irq_num); - - irq_counter[gpio]++; } + for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) + generic_handle_irq(irq_num); } return IRQ_HANDLED; } diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 76eb6ec5f157e9753cf7bc9773801a35d55ffd27..1e6a967cd2d5890342fb76bbe3b0c8c42ec6491d 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c @@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) ((prev & OMAP_POWERSTATE_MASK) << 0)); trace_power_domain_target_rcuidle(pwrdm->name, trace_state, - smp_processor_id()); + raw_smp_processor_id()); } break; default: @@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { /* Trace the pwrdm desired target state */ trace_power_domain_target_rcuidle(pwrdm->name, pwrst, - smp_processor_id()); + raw_smp_processor_id()); /* Program the pwrdm desired target state */ ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 600e4ac82485a2ad9552f75520d399c7b2f4093f..7d5cfe48b1cb677832e080a7de1e2cdae7250be4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include @@ -2283,24 +2285,48 @@ const struct dma_map_ops iommu_coherent_ops = { * IO address ranges, which is required to perform memory allocation and * mapping with IOMMU aware functions. * - * The client device need to be attached to the mapping with - * arm_iommu_attach_device function. + * Clients may use iommu_domain_set_attr() to set additional flags prior + * to calling arm_iommu_attach_device() to complete initialization. */ struct dma_iommu_mapping * arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) { unsigned int bits = size >> PAGE_SHIFT; - unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); struct dma_iommu_mapping *mapping; + + if (!bits) + return ERR_PTR(-EINVAL); + + mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); + if (!mapping) + return ERR_PTR(-ENOMEM); + + mapping->base = base; + mapping->bits = bits; + + mapping->domain = iommu_domain_alloc(bus); + if (!mapping->domain) + goto err_domain_alloc; + + mapping->init = false; + return mapping; + +err_domain_alloc: + kfree(mapping); + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(arm_iommu_create_mapping); + +static int +iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping) +{ + unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long); + u64 size = mapping->bits << PAGE_SHIFT; int extensions = 1; int err = -ENOMEM; - /* currently only 32-bit DMA address space is supported */ - if (size > DMA_BIT_MASK(32) + 1) - return ERR_PTR(-ERANGE); - if (!bitmap_size) - return ERR_PTR(-EINVAL); + return -EINVAL; WARN(!IS_ALIGNED(size, SZ_128M), "size is not aligned to 128M, alignment enforced"); @@ -2310,50 +2336,40 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) bitmap_size = PAGE_SIZE; } - mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); - if (!mapping) - goto err; - mapping->bitmap_size = bitmap_size; mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), - GFP_KERNEL); + GFP_KERNEL); + if (!mapping->bitmaps) - goto err2; + return -ENOMEM; mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); if (!mapping->bitmaps[0]) - goto err3; + goto err; mapping->nr_bitmaps = 1; mapping->extensions = extensions; - mapping->base = base; - mapping->bits = BITS_PER_BYTE * bitmap_size; spin_lock_init(&mapping->lock); - - mapping->domain = iommu_domain_alloc(bus); - if (!mapping->domain) - goto err4; - - kref_init(&mapping->kref); - return mapping; -err4: - kfree(mapping->bitmaps[0]); -err3: - kfree(mapping->bitmaps); -err2: - kfree(mapping); + mapping->ops = &iommu_ops; + return 0; err: - return ERR_PTR(err); + kfree(mapping->bitmaps); + return err; } -EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); -static void release_iommu_mapping(struct kref *kref) +static void iommu_release_mapping(struct kref *kref) { int i; + int is_fast = 0; + struct dma_iommu_mapping *mapping = container_of(kref, struct dma_iommu_mapping, kref); + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); + if (is_fast) + fast_smmu_release_mapping(kref); + iommu_domain_free(mapping->domain); for (i = 0; i < mapping->nr_bitmaps; i++) kfree(mapping->bitmaps[i]); @@ -2379,13 +2395,70 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) return 0; } +/* + * arm_iommu_release_mapping + * @mapping: allocted via arm_iommu_create_mapping() + * + * Frees all resources associated with the iommu mapping. + * The device associated with this mapping must be in the 'detached' state + */ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) { - if (mapping) - kref_put(&mapping->kref, release_iommu_mapping); + int is_fast = 0; + void (*release)(struct kref *kref); + + if (!mapping) + return; + + if (!mapping->init) { + iommu_domain_free(mapping->domain); + kfree(mapping); + return; + } + + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); + + if (is_fast) + release = fast_smmu_release_mapping; + else + release = iommu_release_mapping; + + kref_put(&mapping->kref, release); } EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); +static int arm_iommu_init_mapping(struct device *dev, + struct dma_iommu_mapping *mapping) +{ + int err = -EINVAL; + u64 size = mapping->bits << PAGE_SHIFT; + int is_fast = 0; + + if (mapping->init) { + kref_get(&mapping->kref); + return 0; + } + + /* currently only 32-bit DMA address space is supported */ + if (size > DMA_BIT_MASK(32) + 1) { + dev_err(dev, "dma mask %llx too small\n", dma_get_mask(dev)); + return -ERANGE; + } + + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast); + + if (is_fast) + err = fast_smmu_init_mapping(dev, mapping); + else + err = iommu_init_mapping(dev, mapping); + if (!err) { + kref_init(&mapping->kref); + mapping->init = true; + } + return err; +} + + static int __arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) { @@ -2395,13 +2468,31 @@ static int __arm_iommu_attach_device(struct device *dev, if (err) return err; - kref_get(&mapping->kref); to_dma_iommu_mapping(dev) = mapping; pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; } +static void __arm_iommu_detach_device(struct device *dev) +{ + struct dma_iommu_mapping *mapping; + + mapping = to_dma_iommu_mapping(dev); + if (!mapping) { + dev_warn(dev, "Not attached\n"); + return; + } + + if (msm_dma_unmap_all_for_dev(dev)) + dev_warn(dev, "IOMMU detach with outstanding mappings\n"); + + iommu_detach_device(mapping->domain, dev); + to_dma_iommu_mapping(dev) = NULL; + + pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); +} + /** * arm_iommu_attach_device * @dev: valid struct device pointer @@ -2420,15 +2511,32 @@ int arm_iommu_attach_device(struct device *dev, { int err; int s1_bypass = 0; + struct iommu_group *group = dev->iommu_group; + + if (!group) { + dev_err(dev, "No iommu associated with device\n"); + return -EINVAL; + } + + if (iommu_get_domain_for_dev(dev)) { + dev_err(dev, "Device already attached to other iommu_domain\n"); + return -EINVAL; + } err = __arm_iommu_attach_device(dev, mapping); if (err) return err; + err = arm_iommu_init_mapping(dev, mapping); + if (err) { + __arm_iommu_detach_device(dev); + return err; + } + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, - &s1_bypass); + &s1_bypass); if (!s1_bypass) - set_dma_ops(dev, &iommu_ops); + set_dma_ops(dev, mapping->ops); return 0; } EXPORT_SYMBOL_GPL(arm_iommu_attach_device); @@ -2451,9 +2559,10 @@ void arm_iommu_detach_device(struct device *dev) return; } - iommu_detach_device(mapping->domain, dev); - kref_put(&mapping->kref, release_iommu_mapping); - to_dma_iommu_mapping(dev) = NULL; + __arm_iommu_detach_device(dev); + + iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, + &s1_bypass); if (!s1_bypass) set_dma_ops(dev, NULL); diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h index aaef64b7f1775367ec5b2fe31e33acb252d81488..ca148192d5e623165d771444731796b5db1b5cd6 100644 --- a/arch/arm/mm/dma.h +++ b/arch/arm/mm/dma.h @@ -5,9 +5,6 @@ #include #ifndef MULTI_CACHE -#define dmac_map_area __glue(_CACHE,_dma_map_area) -#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) - /* * These are private to the dma-mapping API. Do not use directly. * Their sole purpose is to ensure that data held in the cache diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 5d73327f849175e9a6842f28ce6b0d9071860e9c..0bb76731ba118f5f10258dd0741e8aab2fa2a5f4 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -150,7 +150,7 @@ void *kmap_atomic_pfn(unsigned long pfn) } #ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH -static void kmap_remove_unused_cpu(int cpu) +int kmap_remove_unused_cpu(unsigned int cpu) { int start_idx, idx, type; @@ -167,6 +167,7 @@ static void kmap_remove_unused_cpu(int cpu) set_top_pte(vaddr, __pte(0)); } pagefault_enable(); + return 0; } static void kmap_remove_unused(void *unused) @@ -179,27 +180,4 @@ void kmap_atomic_flush_unused(void) on_each_cpu(kmap_remove_unused, NULL, 1); } -static int hotplug_kmap_atomic_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - switch (action & (~CPU_TASKS_FROZEN)) { - case CPU_DYING: - kmap_remove_unused_cpu((int)hcpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block hotplug_kmap_atomic_notifier = { - .notifier_call = hotplug_kmap_atomic_callback, -}; - -static int __init init_kmap_atomic(void) -{ - return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier); -} -early_initcall(init_kmap_atomic); #endif diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c05aa8ad7bdf8c22d949a95970f5c5e0a560bd05..af97a7c8f46096ef5504fcfd8e2ec5586b23c126 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -140,6 +140,7 @@ config ARM64 select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT help ARM 64-bit (AArch64) Linux support. @@ -942,6 +943,14 @@ config PRINT_VMEMLAYOUT If unsure, say N. +config ARM64_SSBD + bool "Speculative Store Bypass Disable" if EXPERT + help + This enables mitigation of the bypassing of previous stores + by speculative loads. + + If unsure, say Y. + menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" depends on COMPAT diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 64c54c92e214d686154de4b237f71a5212c5eb20..d71cbf596d1f77ed615f092e1f00310990f76a87 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -205,9 +205,6 @@ bus-width = <4>; cap-sd-highspeed; - sd-uhs-sdr12; - sd-uhs-sdr25; - sd-uhs-sdr50; max-frequency = <100000000>; disable-wp; diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi index 54f418d05e154814734707ff48a50392c216c083..2306b1a0c09a9f6bdebb4420f738be7d809199fb 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi @@ -46,7 +46,7 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0x0>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index b27f1d9ab513827649d3c2bfa41d2d4dd43b0a19..48755200f711e8f30e66d2da14897a74304f915a 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -7,8 +7,16 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb dtb-$(CONFIG_ARCH_QCS405) += qcs405-rumi.dtb \ - qcs405-mtp.dtb \ - qcs405-cdp.dtb + qcs405-iot-sku1.dtb \ + qcs405-iot-sku2.dtb \ + qcs405-iot-sku3.dtb \ + qcs405-iot-sku4.dtb \ + qcs405-iot-sku5.dtb \ + qcs405-iot-sku6.dtb \ + qcs405-iot-sku7.dtb \ + qcs405-iot-sku8.dtb \ + qcs403-iot-sku1.dtb \ + qcs403-iot-sku2.dtb ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) dtbo-$(CONFIG_ARCH_SM8150) += \ @@ -16,7 +24,9 @@ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) sm8150-mtp-overlay.dtbo \ sm8150-rumi-overlay.dtbo \ sm8150-qrd-overlay.dtbo \ - sm8150-auto-adp-star-overlay.dtbo \ + sm8150-qrd-dvt-overlay.dtbo \ + sa8155-adp-star-overlay.dtbo \ + sa8155p-adp-star-overlay.dtbo \ sm8150p-cdp-overlay.dtbo \ sm8150p-mtp-overlay.dtbo \ sm8150p-qrd-overlay.dtbo \ @@ -29,7 +39,9 @@ sm8150-cdp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-mtp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-rumi-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-qrd-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb -sm8150-auto-adp-star-overlay.dtbo-base := sm8150-auto.dtb +sm8150-qrd-dvt-overlay.dtbo-base := sm8150-v2.dtb +sa8155-adp-star-overlay.dtbo-base := sa8155.dtb +sa8155p-adp-star-overlay.dtbo-base := sa8155p.dtb sm8150-sdx50m-cdp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-sdx50m-mtp-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb sm8150-sdx50m-mtp-2.5k-panel-overlay.dtbo-base := sm8150.dtb sm8150-v2.dtb @@ -41,11 +53,13 @@ dtb-$(CONFIG_ARCH_SM8150) += sm8150-rumi.dtb \ sm8150-mtp.dtb \ sm8150-cdp.dtb \ sm8150-qrd.dtb \ - sm8150-auto-adp-star.dtb \ + sa8155-adp-star.dtb \ + sa8155p-adp-star.dtb \ sm8150-v2-rumi.dtb \ sm8150-v2-mtp.dtb \ sm8150-v2-cdp.dtb \ sm8150-v2-qrd.dtb \ + sm8150-v2-qrd-dvt.dtb \ sm8150p-mtp.dtb \ sm8150p-cdp.dtb \ sm8150p-qrd.dtb \ @@ -69,21 +83,15 @@ endif ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) dtbo-$(CONFIG_ARCH_SM6150) += \ - sm6150-cdp-overlay.dtbo \ - sm6150-mtp-overlay.dtbo \ sm6150-rumi-overlay.dtbo \ sm6150-qrd-overlay.dtbo \ sm6150-idp-overlay.dtbo \ -sm6150-cdp-overlay.dtbo-base := sm6150.dtb -sm6150-mtp-overlay.dtbo-base := sm6150.dtb sm6150-rumi-overlay.dtbo-base := sm6150.dtb sm6150-qrd-overlay.dtbo-base := sm6150.dtb sm6150-idp-overlay.dtbo-base := sm6150.dtb else dtb-$(CONFIG_ARCH_SM6150) += sm6150-rumi.dtb \ - sm6150-mtp.dtb \ - sm6150-cdp.dtb \ sm6150-qrd.dtb \ sm6150-idp.dtb endif @@ -103,6 +111,9 @@ dtb-$(CONFIG_ARCH_SDMMAGPIE) += sdmmagpie-rumi.dtb \ sdmmagpie-qrd.dtb endif +dtb-$(CONFIG_ARCH_SDXPRAIRIE) += sdxprairie-rumi.dtb \ + sdxprairie-cdp.dtb \ + sdxprairie-mtp.dtb ifeq ($(CONFIG_ARM64),y) always := $(dtb-y) diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..0d518a6901eb7dbc8b58dbed15530f33318391b7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi @@ -0,0 +1,53 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_ext_bridge_1080p: qcom,mdss_dsi_ext_bridge_1080p { + qcom,mdss-dsi-panel-name = "ext video mode dsi bridge"; + qcom,mdss-dsi-panel-type = "dsi_video_mode"; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-t-clk-post = <0x03>; + qcom,mdss-dsi-t-clk-pre = <0x24>; + qcom,mdss-dsi-force-clock-lane-hs; + qcom,mdss-dsi-ext-bridge-mode; + + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-width = <1920>; + qcom,mdss-dsi-panel-height = <1080>; + qcom,mdss-dsi-h-front-porch = <88>; + qcom,mdss-dsi-h-back-porch = <148>; + qcom,mdss-dsi-h-pulse-width = <44>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <36>; + qcom,mdss-dsi-v-front-porch = <4>; + qcom,mdss-dsi-v-pulse-width = <5>; + qcom,mdss-dsi-h-sync-pulse = <0>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,display-topology = <1 0 1>; + qcom,default-topology-index = <0>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi index 0d0e7f7f8316743d0afdde3155f7de1e78289534..c83fd873560469d32dde769fa035505bbb2f630d 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi @@ -29,7 +29,6 @@ qcom,mdss-dsi-lane-1-state; qcom,mdss-dsi-lane-2-state; qcom,mdss-dsi-lane-3-state; - qcom,cmd-sync-wait-broadcast; qcom,mdss-dsi-dma-trigger = "trigger_sw"; qcom,mdss-dsi-mdp-trigger = "none"; qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..553ba0bac78a80912389a5feeb6d5f4b3e574144 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi @@ -0,0 +1,106 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_sw43404_amoled_fhd_plus_cmd: qcom,mdss_dsi_sw43404_fhd_plus_cmd { + qcom,mdss-dsi-panel-name = + "sw43404 amoled boe fhd+ panel with DSC"; + qcom,mdss-dsi-panel-type = "dsi_cmd_mode"; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-color-order = "rgb_swap_rgb"; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + + qcom,mdss-dsi-traffic-mode = "non_burst_sync_event"; + qcom,mdss-dsi-lane-map = "lane_map_0123"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>; + qcom,mdss-dsi-bl-max-level = <1023>; + qcom,mdss-dsi-te-pin-select = <1>; + qcom,mdss-dsi-wr-mem-start = <0x2c>; + qcom,mdss-dsi-wr-mem-continue = <0x3c>; + qcom,mdss-dsi-te-dcs-command = <1>; + qcom,mdss-dsi-te-check-enable; + qcom,mdss-dsi-te-using-te-pin; + qcom,mdss-pan-physical-width-dimension = <68>; + qcom,mdss-pan-physical-height-dimension = <138>; + + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <2160>; + qcom,mdss-dsi-h-front-porch = <160>; + qcom,mdss-dsi-h-back-porch = <72>; + qcom,mdss-dsi-h-pulse-width = <16>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-h-sync-pulse = <0>; + qcom,mdss-dsi-v-back-porch = <8>; + qcom,mdss-dsi-v-front-porch = <8>; + qcom,mdss-dsi-v-pulse-width = <1>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-panel-jitter = <0x1 0x1>; + qcom,mdss-dsi-on-command = [ + 39 01 00 00 00 00 03 b0 a5 00 + 07 01 00 00 00 00 02 01 00 + 0a 01 00 00 00 00 80 11 00 00 89 30 80 + 08 70 04 38 02 1c 02 1c 02 1c 02 00 + 02 0e 00 20 34 29 00 07 00 0C 00 2e + 00 31 18 00 10 F0 03 0C 20 00 06 0B + 0B 33 0E 1C 2A 38 46 54 62 69 70 77 + 79 7B 7D 7E 01 02 01 00 09 40 09 BE + 19 FC 19 FA 19 F8 1A 38 1A 78 1A B6 + 2A F6 2B 34 2B 74 3B 74 6B F4 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 + 39 01 00 00 00 00 03 b0 a5 00 + 15 01 00 00 00 00 02 5e 10 + 39 01 00 00 00 00 06 b9 bf 11 40 00 30 + 39 01 00 00 00 00 09 F8 00 08 10 08 2D + 00 00 2D + 15 01 00 00 00 00 02 55 0c + 05 01 00 00 1e 00 02 11 00 + 15 01 00 00 78 00 02 3d 01 + 39 01 00 00 00 00 03 b0 a5 00 + 05 01 00 00 78 00 02 35 00 + 05 01 00 00 3c 00 02 29 00 + ]; + qcom,mdss-dsi-off-command = [ + 05 01 00 00 14 00 02 28 00 + 05 01 00 00 78 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,compression-mode = "dsc"; + qcom,mdss-dsc-slice-height = <540>; + qcom,mdss-dsc-slice-width = <540>; + qcom,mdss-dsc-slice-per-pkt = <1>; + qcom,mdss-dsc-bit-per-component = <8>; + qcom,mdss-dsc-bit-per-pixel = <8>; + qcom,mdss-dsc-block-prediction-enable; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi index c7e17b531b90cd40b5880b3c99e90e16982b7fed..a39150e4112f7ed9b86aa68d05d8458e60115d88 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi @@ -46,9 +46,9 @@ qcom,mdss-dsi-panel-framerate = <60>; qcom,mdss-dsi-panel-width = <1440>; qcom,mdss-dsi-panel-height = <2880>; - qcom,mdss-dsi-h-front-porch = <160>; - qcom,mdss-dsi-h-back-porch = <72>; - qcom,mdss-dsi-h-pulse-width = <16>; + qcom,mdss-dsi-h-front-porch = <60>; + qcom,mdss-dsi-h-back-porch = <30>; + qcom,mdss-dsi-h-pulse-width = <12>; qcom,mdss-dsi-h-sync-skew = <0>; qcom,mdss-dsi-v-back-porch = <8>; qcom,mdss-dsi-v-front-porch = <8>; diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..aa0962b3e4b43b4833a54603f208bc5b2d7098e5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi @@ -0,0 +1,87 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_sw43404_amoled_video: qcom,mdss_dsi_sw43404_amoled_wqhd_video { + qcom,mdss-dsi-panel-name = + "sw43404 amoled video mode dsi boe panel with DSC"; + qcom,mdss-dsi-panel-type = "dsi_video_mode"; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-traffic-mode = "non_burst_sync_event"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>; + qcom,adjust-timer-wakeup-ms = <1>; + qcom,mdss-dsi-display-timings { + timing@0{ + qcom,mdss-dsi-panel-width = <1440>; + qcom,mdss-dsi-panel-height = <2880>; + qcom,mdss-dsi-h-front-porch = <10>; + qcom,mdss-dsi-h-back-porch = <10>; + qcom,mdss-dsi-h-pulse-width = <12>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-front-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <1>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-on-command = [ + 07 01 00 00 00 00 02 01 00 + 39 01 00 00 00 00 03 b0 a5 00 + 39 01 00 00 00 00 06 b2 00 5d 04 80 49 + 15 01 00 00 00 00 02 3d 10 + 15 01 00 00 00 00 02 36 00 + 15 01 00 00 00 00 02 55 0c + 39 01 00 00 00 00 09 f8 00 08 10 08 2d + 00 00 2d + 39 01 00 00 3c 00 03 51 00 00 + 05 01 00 00 50 00 02 11 00 + 39 01 00 00 00 00 03 b0 34 04 + 39 01 00 00 00 00 05 c1 00 00 00 46 + 0a 01 00 00 00 00 80 11 00 00 89 30 80 + 0B 40 05 A0 02 d0 02 D0 02 D0 02 00 + 02 68 00 20 4e a8 00 0A 00 0C 00 23 + 00 1c 18 00 10 F0 03 0C 20 00 06 0B + 0B 33 0E 1C 2A 38 46 54 62 69 70 77 + 79 7B 7D 7E 01 02 01 00 09 40 09 BE + 19 FC 19 FA 19 F8 1A 38 1A 78 1A B6 + 2A F6 2B 34 2B 74 3B 74 6B F4 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 00 00 00 00 00 00 00 00 00 00 + 00 00 + 05 01 00 00 78 00 02 29 00 + ]; + qcom,mdss-dsi-off-command = [05 01 00 00 78 00 + 02 28 00 05 01 00 00 78 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,compression-mode = "dsc"; + qcom,mdss-dsc-slice-height = <720>; + qcom,mdss-dsc-slice-width = <720>; + qcom,mdss-dsc-slice-per-pkt = <2>; + qcom,mdss-dsc-bit-per-component = <8>; + qcom,mdss-dsc-bit-per-pixel = <8>; + qcom,mdss-dsc-block-prediction-enable; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi index f1a47e24244fb1486d22703d5b3cd41c94555d5c..22a8d112a783b6ebc59f2c1810adc4d04d6feef7 100644 --- a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi +++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-alium-3600mah.dtsi @@ -11,46 +11,58 @@ * GNU General Public License for more details. */ -qcom,alium_860_89032_0000_3600mah_averaged_masterslave_feb7th2018 { - /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Feb7th2018*/ +qcom,alium_860_89032_0000_3600mah_averaged_masterslave_jun15th2018 { + /* #Alium_860_89032_0000_3600mAh_averaged_MasterSlave_Jun15th2018*/ qcom,max-voltage-uv = <4350000>; - qcom,fastchg-current-ma = <3600>; + qcom,fastchg-current-ma = <5400>; + qcom,jeita-fcc-ranges = <0 100 2500000 + 110 400 5400000 + 410 450 2500000>; + qcom,jeita-fv-ranges = <0 100 4250000 + 110 400 4350000 + 410 450 4250000>; + qcom,step-chg-ranges = <3600000 4200000 5400000 + 4201000 4300000 3600000 + 4301000 4350000 2500000>; qcom,batt-id-kohm = <107>; qcom,battery-beta = <4250>; qcom,therm-room-temp = <100000>; qcom,fg-cc-cv-threshold-mv = <4340>; - qcom,battery-type = "alium_860_89032_0000_3600mah_feb7th2018"; + qcom,battery-type = "alium_860_89032_0000_3600mah_jun15th2018"; qcom,therm-coefficients = <0x2318 0xd0c 0xdaf7 0xc556 0x848d>; qcom,therm-center-offset = <0x70>; - qcom,checksum = <0xB126>; - qcom,gui-version = "PM8150GUI - 0.0.0.26"; + qcom,therm-pull-up = <100>; + qcom,rslow-normal-coeffs = <0xa4 0x01 0x24 0x13>; + qcom,rslow-low-coeffs = <0xa7 0xd5 0x0e 0x13>; + qcom,checksum = <0xCDFB>; + qcom,gui-version = "PM8150GUI - 1.0.0.7"; qcom,fg-profile-data = [ 09 00 B5 EA - 40 CC 19 AA - EA C2 00 00 + 3F CC 33 AA + E7 C2 00 00 13 BC 83 8A - 03 80 D3 92 - AA 9D 47 80 - 18 00 07 02 - 58 13 47 FD + 03 80 D1 92 + AB 9D 47 80 + 18 00 A4 01 + 24 13 47 FD A9 F2 CE 07 - 00 00 A6 00 - 36 F5 07 FD - 64 0D 0B 23 - 34 2A BE 23 - B4 43 5C 4B + 32 00 A6 00 + EF F5 CB FD + 11 0D 4A 23 + 60 2A C1 23 + 6F 42 F8 43 40 00 3A 00 - 3F 00 47 00 - 3A 00 33 00 - 36 00 37 00 - 47 00 41 00 + 40 00 48 00 + 3B 00 34 00 + 38 00 38 00 + 48 00 42 00 40 00 40 00 3B 00 30 00 32 00 39 00 32 00 42 00 5C 64 47 00 3B 08 40 10 - 40 00 3A 00 + 40 00 3B 00 35 00 36 00 3C 00 35 00 44 20 53 40 diff --git a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi index 93bebb09cbc48fdfe4bd3887747df263f3e88227..45975eee13e01ac032d5360bf8b5fcb1711b277a 100644 --- a/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi +++ b/arch/arm64/boot/dts/qcom/fg-gen4-batterydata-mlp466076-3250mah.dtsi @@ -14,7 +14,7 @@ qcom,mlp466076_3250mah_averaged_masterslave_mar27th2018 { /* #mlp466076_3250mAh_averaged_MasterSlave_Mar27th2018 */ qcom,max-voltage-uv = <4400000>; - qcom,fastchg-current-ma = <6000000>; + qcom,fastchg-current-ma = <6000>; qcom,jeita-fcc-ranges = <0 150 650000 151 450 4875000 451 550 1625000>; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi index 0779eddcb0bbba84a5ebf09f6621f19524a6596e..2338738e7c551a0a4ad7840eb23e52a598921342 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-qcs405.dtsi @@ -13,7 +13,7 @@ &soc { gfx_iommu: qcom,iommu@1f00000 { - status = "disabled"; + status = "okay"; compatible = "qcom,qsmmu-v500"; reg = <0x1f00000 0x10000>, <0x1ee2000 0x20>; @@ -21,22 +21,23 @@ #iommu-cells = <2>; qcom,tz-device-id = "GPU"; qcom,skip-init; + qcom,disable-atos; qcom,dynamic; qcom,use-3-lvl-tables; #global-interrupts = <0>; #size-cells = <1>; #address-cells = <1>; ranges; - interrupts = , - , - , - ; + interrupts = , + , + , + ; clocks = <&clock_gcc GCC_SMMU_CFG_CLK>, <&clock_gcc GCC_GFX_TCU_CLK>; clock-names = "iface_clk", "core_clk"; }; - apps_iommu: qcom,iommu@1e00000 { + apps_smmu: qcom,iommu@1e00000 { status = "okay"; compatible = "qcom,qsmmu-v500"; reg = <0x1e00000 0x40000>, @@ -45,6 +46,7 @@ #iommu-cells = <2>; qcom,tz-device-id = "APPS"; qcom,skip-init; + qcom,disable-atos; qcom,enable-static-cb; qcom,use-3-lvl-tables; #global-interrupts = <0>; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..b5b2e87c9be2434110dcbb0b30cc1bfe841008ed --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdmmagpie.dtsi @@ -0,0 +1,245 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + kgsl_smmu: arm,smmu-kgsl@5040000 { + status = "ok"; + compatible = "qcom,smmu-v2"; + reg = <0x5040000 0x10000>; + #iommu-cells = <1>; + qcom,dynamic; + qcom,use-3-lvl-tables; + qcom,disable-atos; + #global-interrupts = <2>; + qcom,regulator-names = "vdd"; + vdd-supply = <&gpu_cx_gdsc>; + interrupts = , + , + , + , + , + , + , + , + , + ; + clock-names = "gcc_gpu_memnoc_gfx_clk"; + clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>; + attach-impl-defs = + <0x6000 0x2378>, + <0x6060 0x1055>, + <0x678c 0x8>, + <0x6794 0x28>, + <0x6800 0x6>, + <0x6900 0x3ff>, + <0x6924 0x204>, + <0x6928 0x11000>, + <0x6930 0x800>, + <0x6960 0xffffffff>, + <0x6b64 0x1a5551>, + <0x6b68 0x9a82a382>; + }; + + apps_smmu: apps-smmu@0x15000000 { + compatible = "qcom,qsmmu-v500"; + reg = <0x15000000 0x100000>, + <0x15182000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,skip-init; + qcom,use-3-lvl-tables; + #global-interrupts = <1>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + + anoc_1_tbu: anoc_1_tbu@0x15185000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15185000 0x1000>, + <0x15182200 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x0 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>; + }; + + anoc_2_tbu: anoc_2_tbu@0x15189000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15189000 0x1000>, + <0x15182208 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x400 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>; + }; + + mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x1518d000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x1518d000 0x1000>, + <0x15182210 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x800 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>; + }; + + mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x15191000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15191000 0x1000>, + <0x15182218 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0xc00 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>; + }; + + mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x15195000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15195000 0x1000>, + <0x15182220 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1000 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>; + }; + + compute_dsp_0_tbu: compute_dsp_0_tbu@0x15199000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15199000 0x1000>, + <0x15182228 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1400 0x400>; + /* No GDSC */ + }; + + adsp_tbu: adsp_tbu@0x1519d000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x1519d000 0x1000>, + <0x15182230 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1800 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>; + }; + + anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x151a1000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x151a1000 0x1000>, + <0x15182238 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1c00 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>; + clock-names = "gcc_aggre_noc_pcie_tbu_clk"; + clocks = <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>; + }; + }; + + kgsl_iommu_test_device { + compatible = "iommu-debug-test"; + iommus = <&kgsl_smmu 0x7>; + }; + + apps_iommu_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x21 0>; + }; + + apps_iommu_coherent_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x23 0>; + dma-coherent; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdxprairie.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..65fee9dff61a8d31e1d9892a27140bdb1022bd45 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdxprairie.dtsi @@ -0,0 +1,101 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + apps_smmu: apps-smmu@0x15000000 { + compatible = "qcom,qsmmu-v500"; + reg = <0x15000000 0x20000>, + <0x15022000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,use-3-lvl-tables; + #global-interrupts = <1>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + + periph_tbu: periph_tbu@0x15025000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15025000 0x1000>, + <0x15022200 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x0 0x400>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + ipa_tbu: ipa_tbu@0x15029000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15029000 0x1000>, + <0x15022208 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x400 0x400>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + }; + + apps_iommu_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x100 0x0>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi index fca527cf14cf5efdb5489b387f26eec20fa44599..3f224c133e6599f707fa353fd4f2e9fd0dc9f7df 100644 --- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm6150.dtsi @@ -11,6 +11,7 @@ */ #include +#include &soc { kgsl_smmu: kgsl-smmu@0x50a0000 { @@ -19,11 +20,18 @@ <0x50c2000 0x20>; reg-names = "base", "tcu-base"; #iommu-cells = <2>; + qcom,dynamic; qcom,skip-init; qcom,use-3-lvl-tables; #global-interrupts = <1>; qcom,regulator-names = "vdd"; vdd-supply = <&gpu_cx_gdsc>; + clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, + <&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>, + <&clock_gpucc GPU_CC_AHB_CLK>; + clock-names = "gcc_gpu_memnoc_gfx_clk", + "gcc_gpu_snoc_dvm_gfx_clk", + "gpu_cc_ahb_clk"; #size-cells = <1>; #address-cells = <1>; ranges; @@ -131,6 +139,17 @@ , , ; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; anoc_1_tbu: anoc_1_tbu@0x150c5000 { compatible = "qcom,qsmmuv500-tbu"; @@ -140,6 +159,17 @@ qcom,stream-id-range = <0x0 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; anoc_2_tbu: anoc_2_tbu@0x150c9000 { @@ -150,6 +180,17 @@ qcom,stream-id-range = <0x400 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x150cd000 { @@ -160,6 +201,17 @@ qcom,stream-id-range = <0x800 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>; + qcom,msm-bus,name = "mnoc_hf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x150d1000 { @@ -170,6 +222,17 @@ qcom,stream-id-range = <0xc00 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>; + qcom,msm-bus,name = "mnoc_sf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; compute_dsp_tbu: compute_dsp_tbu@0x150d5000 { @@ -179,6 +242,17 @@ reg-names = "base", "status-reg"; qcom,stream-id-range = <0x1000 0x400>; /* No GDSC */ + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; adsp_tbu: adsp_tbu@0x150d9000 { @@ -189,6 +263,17 @@ qcom,stream-id-range = <0x1400 0x400>; qcom,regulator-names = "vdd"; vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ed019eac9d88e37dc6f4fc6c4380129f8c354ba6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sm8150-v2.dtsi @@ -0,0 +1,394 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + kgsl_smmu: kgsl-smmu@0x02ca0000 { + compatible = "qcom,qsmmu-v500"; + reg = <0x02ca0000 0x10000>, + <0x2cc2000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,dynamic; + qcom,skip-init; + qcom,use-3-lvl-tables; + qcom,no-asid-retention; + #global-interrupts = <1>; + qcom,regulator-names = "vdd"; + vdd-supply = <&gpu_cx_gdsc>; + clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, + <&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>, + <&clock_gpucc GPU_CC_AHB_CLK>; + clock-names = "gcc_gpu_memnoc_gfx_clk", + "gcc_gpu_snoc_dvm_gfx_clk", + "gpu_cc_ahb_clk"; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + , + , + , + , + , + ; + + gfx_0_tbu: gfx_0_tbu@0x2cc5000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x2cc5000 0x1000>, + <0x2cc2200 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x0 0x400>; + }; + + gfx_1_tbu: gfx_1_tbu@0x2cc9000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x2cc9000 0x1000>, + <0x2cc2208 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x400 0x400>; + }; + }; + + apps_smmu: apps-smmu@0x15000000 { + compatible = "qcom,qsmmu-v500"; + reg = <0x15000000 0x100000>, + <0x15182000 0x20>; + reg-names = "base", "tcu-base"; + #iommu-cells = <2>; + qcom,skip-init; + qcom,use-3-lvl-tables; + qcom,no-asid-retention; + qcom,disable-atos; + #global-interrupts = <1>; + #size-cells = <1>; + #address-cells = <1>; + ranges; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + + anoc_1_tbu: anoc_1_tbu@0x15185000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15185000 0x1000>, + <0x15182200 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x0 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + anoc_2_tbu: anoc_2_tbu@0x15189000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15189000 0x1000>, + <0x15182208 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x400 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x1518d000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x1518d000 0x1000>, + <0x15182210 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x800 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>; + qcom,msm-bus,name = "mnoc_hf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x15191000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15191000 0x1000>, + <0x15182218 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0xc00 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>; + qcom,msm-bus,name = "mnoc_hf_1_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + compute_dsp_1_tbu: compute_dsp_1_tbu@0x15195000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15195000 0x1000>, + <0x15182220 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1000 0x400>; + /* No GDSC */ + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + compute_dsp_0_tbu: compute_dsp_0_tbu@0x15199000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x15199000 0x1000>, + <0x15182228 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1400 0x400>; + /* No GDSC */ + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + adsp_tbu: adsp_tbu@0x1519d000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x1519d000 0x1000>, + <0x15182230 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1800 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x151a1000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x151a1000 0x1000>, + <0x15182238 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1c00 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>; + clock-names = "gcc_aggre_noc_pcie_tbu_clk"; + clocks = <&clock_gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>; + qcom,msm-bus,name = "apps_smmu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + + mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x151a5000 { + compatible = "qcom,qsmmuv500-tbu"; + reg = <0x151a5000 0x1000>, + <0x15182240 0x8>; + reg-names = "base", "status-reg"; + qcom,stream-id-range = <0x1c00 0x400>; + qcom,regulator-names = "vdd"; + vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>; + qcom,msm-bus,name = "mnoc_sf_0_tbu"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,active-only; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + , + <0 0>, + , + , + <0 1000>; + }; + }; + + kgsl_iommu_test_device { + compatible = "iommu-debug-test"; + iommus = <&kgsl_smmu 0x7 0>; + }; + + kgsl_iommu_coherent_test_device { + compatible = "iommu-debug-test"; + iommus = <&kgsl_smmu 0x9 0>; + dma-coherent; + }; + + apps_iommu_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x21 0>; + }; + + apps_iommu_coherent_test_device { + compatible = "iommu-debug-test"; + iommus = <&apps_smmu 0x23 0>; + dma-coherent; + }; +}; + +&kgsl_smmu { /* */ + qcom,actlr = <0x0 0x407 0x303>, + <0x1460 0x0 0x303>, + <0x61 0x3400 0x303>, + <0x62 0x3401 0x303>, + <0x64 0x3400 0x303>, + <0x65 0x3400 0x303>; +}; + diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi index ce2c509521e513f683ef8e8c488f6e225dacc0ba..7e4adf41cd979c301e19584e4a4e677224fdab3f 100644 --- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi +++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi @@ -129,6 +129,44 @@ }; }; + msm_dai_cdc_dma: qcom,msm-dai-cdc-dma { + compatible = "qcom,msm-dai-cdc-dma"; + wsa_cdc_dma_0_rx: qcom,msm-dai-wsa-cdc-dma-0-rx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45056>; + }; + + wsa_cdc_dma_0_tx: qcom,msm-dai-wsa-cdc-dma-0-tx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45057>; + }; + + wsa_cdc_dma_1_rx: qcom,msm-dai-wsa-cdc-dma-1-rx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45058>; + }; + + wsa_cdc_dma_1_tx: qcom,msm-dai-wsa-cdc-dma-1-tx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45059>; + }; + + wsa_cdc_dma_2_tx: qcom,msm-dai-wsa-cdc-dma-2-tx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45061>; + }; + + va_cdc_dma_0_tx: qcom,msm-dai-va-cdc-dma-0-tx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45089>; + }; + + va_cdc_dma_1_tx: qcom,msm-dai-va-cdc-dma-1-tx { + compatible = "qcom,msm-dai-cdc-dma-dev"; + qcom,msm-dai-cdc-dma-dev-id = <45091>; + }; + }; + lsm: qcom,msm-lsm-client { compatible = "qcom,msm-lsm-client"; }; @@ -299,6 +337,13 @@ audio_apr: qcom,msm-audio-apr { compatible = "qcom,msm-audio-apr"; qcom,subsys-name = "apr_adsp"; + + msm_audio_ion: qcom,msm-audio-ion { + compatible = "qcom,msm-audio-ion"; + qcom,smmu-version = <2>; + qcom,smmu-enabled; + iommus = <&apps_smmu 0x1b21 0x0>; + }; }; dai_pri_auxpcm: qcom,msm-pri-auxpcm { @@ -376,13 +421,6 @@ qcom,dba-bridge-chip = "adv7533"; }; - msm_audio_ion: qcom,msm-audio-ion { - compatible = "qcom,msm-audio-ion"; - qcom,smmu-version = <2>; - qcom,smmu-enabled; - iommus = <&apps_smmu 0x1b21 0x0>; - }; - qcom,msm-adsp-loader { status = "ok"; compatible = "qcom,adsp-loader"; diff --git a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..426e105b0b87c47b1d63cd7d65988d60cc232595 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi @@ -0,0 +1,35 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + /* smp2p information */ + qcom,smp2p_interrupt_rdbg_2_out { + compatible = "qcom,smp2p-interrupt-rdbg-2-out"; + qcom,smem-states = <&smp2p_rdbg2_out 0>; + qcom,smem-state-names = "rdbg-smp2p-out"; + }; + qcom,smp2p_interrupt_rdbg_2_in { + compatible = "qcom,smp2p-interrupt-rdbg-2-in"; + interrupts-extended = <&smp2p_rdbg2_in 0 0>; + interrupt-names = "rdbg-smp2p-in"; + }; + qcom,smp2p_interrupt_rdbg_5_out { + compatible = "qcom,smp2p-interrupt-rdbg-5-out"; + qcom,smem-states = <&smp2p_rdbg5_out 0>; + qcom,smem-state-names = "rdbg-smp2p-out"; + }; + qcom,smp2p_interrupt_rdbg_5_in { + compatible = "qcom,smp2p-interrupt-rdbg-5-in"; + interrupts-extended = <&smp2p_rdbg5_in 0 0>; + interrupt-names = "rdbg-smp2p-in"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi index 1b9020eebd7873c555f625c65adf652008212144..ed53778679a6be6eb7e78e87ee1716f0dfe9e2ba 100644 --- a/arch/arm64/boot/dts/qcom/pm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi @@ -69,14 +69,14 @@ qcom,chgr@1000 { reg = <0x1000 0x100>; interrupts = - <0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x10 0x7 IRQ_TYPE_EDGE_RISING>; + <0x0 0x10 0x0 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x1 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x2 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x3 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x4 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x5 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x6 IRQ_TYPE_EDGE_RISING>, + <0x0 0x10 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "chgr-error", "chg-state-change", @@ -91,14 +91,14 @@ qcom,dcdc@1100 { reg = <0x1100 0x100>; interrupts = - <0x2 0x11 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x1 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x11 0x4 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x11 0x5 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x11 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x11 0x7 IRQ_TYPE_EDGE_BOTH>; + <0x0 0x11 0x0 IRQ_TYPE_EDGE_RISING>, + <0x0 0x11 0x1 IRQ_TYPE_EDGE_RISING>, + <0x0 0x11 0x2 IRQ_TYPE_EDGE_RISING>, + <0x0 0x11 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x11 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x11 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x11 0x6 IRQ_TYPE_EDGE_RISING>, + <0x0 0x11 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "otg-fail", "otg-oc-disable-sw", @@ -113,13 +113,13 @@ qcom,batif@1200 { reg = <0x1200 0x100>; interrupts = - <0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x12 0x6 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x12 0x7 IRQ_TYPE_EDGE_BOTH>; + <0x0 0x12 0x0 IRQ_TYPE_EDGE_RISING>, + <0x0 0x12 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x12 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x12 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x12 0x5 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x12 0x6 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x12 0x7 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "bat-temp", "bat-ov", @@ -133,14 +133,14 @@ qcom,usb@1300 { reg = <0x1300 0x100>; interrupts = - <0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>; + <0x0 0x13 0x0 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x13 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x13 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x13 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x13 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x13 0x5 IRQ_TYPE_EDGE_RISING>, + <0x0 0x13 0x6 IRQ_TYPE_EDGE_RISING>, + <0x0 0x13 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "usbin-collapse", "usbin-vashdn", @@ -155,13 +155,13 @@ qcom,dc@1400 { reg = <0x1400 0x100>; interrupts = - <0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>, - <0x2 0x14 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x14 0x7 IRQ_TYPE_EDGE_RISING>; + <0x0 0x14 0x1 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x14 0x2 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x14 0x3 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x14 0x4 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x14 0x5 IRQ_TYPE_EDGE_RISING>, + <0x0 0x14 0x6 IRQ_TYPE_EDGE_RISING>, + <0x0 0x14 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "dcin-vashdn", "dcin-uv", @@ -175,14 +175,14 @@ qcom,typec@1500 { reg = <0x1500 0x100>; interrupts = - <0x2 0x15 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x1 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x15 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x3 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x15 0x4 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x15 0x6 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x15 0x7 IRQ_TYPE_EDGE_RISING>; + <0x0 0x15 0x0 IRQ_TYPE_EDGE_RISING>, + <0x0 0x15 0x1 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x15 0x2 IRQ_TYPE_EDGE_RISING>, + <0x0 0x15 0x3 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x15 0x4 IRQ_TYPE_EDGE_RISING>, + <0x0 0x15 0x5 IRQ_TYPE_EDGE_RISING>, + <0x0 0x15 0x6 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x15 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "typec-or-rid-detect-change", "typec-vpd-detect", @@ -197,14 +197,14 @@ qcom,misc@1600 { reg = <0x1600 0x100>; interrupts = - <0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x2 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x3 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x4 IRQ_TYPE_LEVEL_HIGH>, - <0x2 0x16 0x5 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x6 IRQ_TYPE_EDGE_RISING>, - <0x2 0x16 0x7 IRQ_TYPE_EDGE_RISING>; + <0x0 0x16 0x0 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x1 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x2 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x3 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x4 IRQ_TYPE_LEVEL_HIGH>, + <0x0 0x16 0x5 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x6 IRQ_TYPE_EDGE_RISING>, + <0x0 0x16 0x7 IRQ_TYPE_EDGE_RISING>; interrupt-names = "wdog-snarl", "wdog-bark", diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi index 4219369b4f0fe984abb8eb9a2a416709dbd61db5..6be40025b7f0867ffd3b07a2587333eb5cf63c9e 100644 --- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi @@ -50,6 +50,13 @@ clock-names = "xo"; }; + pm8150b_qnovo: qcom,sdam-qnovo@b000 { + compatible = "qcom,qpnp-qnovo5"; + reg = <0xb000 0x100>; + interrupts = <0x2 0xb0 0 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "ptrain-done"; + }; + pm8150b_gpios: pinctrl@c000 { compatible = "qcom,spmi-gpio"; reg = <0xc000 0xc00>; @@ -162,6 +169,9 @@ qcom,pmic-revid = <&pm8150b_revid>; + qcom,charger-temp-max = <800>; + qcom,smb-temp-max = <800>; + qcom,chgr@1000 { reg = <0x1000 0x100>; interrupts = @@ -462,23 +472,54 @@ }; pm8150b_haptics: qcom,haptics@c000 { - compatible = "qcom,qpnp-haptics"; + compatible = "qcom,haptics"; reg = <0xc000 0x100>; interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>, <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "hap-sc-irq", "hap-play-irq"; - qcom,pmic-revid = <&pm8150b_revid>; - qcom,actuator-type = <0>; - qcom,play-mode = "direct"; - qcom,vmax-mv = <3200>; - qcom,ilim-ma = <800>; - qcom,sc-dbc-cycles = <8>; - qcom,wave-play-rate-us = <6667>; - qcom,en-brake; - qcom,lra-high-z = "opt0"; - qcom,lra-auto-res-mode = "qwd"; - qcom,lra-res-cal-period = <4>; - status = "ok"; + qcom,actuator-type = "lra"; + qcom,vmax-mv = <3600>; + qcom,play-rate-us = <6667>; + qcom,lra-resonance-sig-shape = "sine"; + qcom,lra-auto-resonance-mode = "qwd"; + qcom,lra-allow-variable-play-rate; + + wf_0 { + /* CLICK */ + qcom,effect-id = <0>; + qcom,wf-pattern = [3e 3e 3e]; + qcom,wf-play-rate-us = <6667>; + }; + wf_1 { + /* DOUBLE CLICK */ + qcom,effect-id = <1>; + qcom,wf-pattern = [7e 7e 02 02 02 02 7e 7e]; + qcom,wf-play-rate-us = <7143>; + }; + wf_2 { + /* TICK */ + qcom,effect-id = <2>; + qcom,wf-pattern = [7e 7e]; + qcom,wf-play-rate-us = <4000>; + }; + wf_3 { + /* THUD */ + qcom,effect-id = <3>; + qcom,wf-pattern = [7e 7e 7e]; + qcom,wf-play-rate-us = <5714>; + }; + wf_4 { + /* POP */ + qcom,effect-id = <4>; + qcom,wf-pattern = [7e 7e]; + qcom,wf-play-rate-us = <5000>; + }; + wf_5 { + /* HEAVY CLICK */ + qcom,effect-id = <5>; + qcom,wf-pattern = [7e 7e 7e]; + qcom,wf-play-rate-us = <6667>; + }; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/pms405-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pms405-rpm-regulator.dtsi index dd0200ef50276b0eaa4995883b6b5a8e4df297eb..5b5c9d76cf4cd018aa2accb957e7bf651b406b7d 100644 --- a/arch/arm64/boot/dts/qcom/pms405-rpm-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/pms405-rpm-regulator.dtsi @@ -15,7 +15,7 @@ rpm-regulator-smpa1 { compatible = "qcom,rpm-smd-regulator-resource"; qcom,resource-name = "rwmx"; - qcom,resource-id = <1>; + qcom,resource-id = <0>; qcom,regulator-type = <1>; qcom,hpm-min-load = <100000>; status = "disabled"; @@ -32,7 +32,7 @@ rpm-regulator-smpa2 { compatible = "qcom,rpm-smd-regulator-resource"; qcom,resource-name = "rwlc"; - qcom,resource-id = <2>; + qcom,resource-id = <0>; qcom,regulator-type = <1>; qcom,hpm-min-load = <100000>; status = "disabled"; diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi index 159c47c4daa7df0f5761e94e19b1c971c436755d..7149c9b1f5b16941f66b9568319d6f02b2cd743a 100644 --- a/arch/arm64/boot/dts/qcom/pms405.dtsi +++ b/arch/arm64/boot/dts/qcom/pms405.dtsi @@ -87,7 +87,7 @@ pms405_pon: qcom,power-on@800 { compatible = "qcom,qpnp-power-on"; reg = <0x800 0x100>; - interrupts = <0x0 0x8 0x0>; + interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>; interrupt-names = "kpdpwr"; qcom,pon-dbc-delay = <15625>; @@ -103,6 +103,19 @@ reg = <0x900 0x100>; }; + pms405_clkdiv: clock-controller@5b00 { + compatible = "qcom,spmi-clkdiv"; + reg = <0x5b00 0x100>; + #clock-cells = <1>; + qcom,num-clkdivs = <1>; + clock-output-names = "pms405_div_clk1"; + clocks = <&clock_rpmcc RPM_SMD_XO_CLK_SRC>; + clock-names = "xo"; + + assigned-clocks = <&pms405_clkdiv 1>; + assigned-clock-rates = <9600000>; + }; + /* QCS405 + PMS405 GPIO configuration */ pms405_gpios: pinctrl@c000 { compatible = "qcom,spmi-gpio"; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts new file mode 100644 index 0000000000000000000000000000000000000000..f0b91c4cf7df25be98fea7c516759d8e23f56d8a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku1.dts @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs403.dtsi" +#include "qcs405-wsa-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS403 EVB2 1000 IOT"; + compatible = "qcom,qcs403-iot", "qcom,qcs403", "qcom,iot"; + qcom,board-id = <0x010020 0x3>; + + cpus { + /delete-node/ cpu@102; + /delete-node/ cpu@103; + + cpu-map { + cluster0 { + /delete-node/ core2; + /delete-node/ core3; + }; + }; + }; +}; + +&soc { + cpuss_dump { + /delete-node/ qcom,l1_i_cache102; + /delete-node/ qcom,l1_i_cache103; + /delete-node/ qcom,l1_d_cache102; + /delete-node/ qcom,l1_d_cache103; + }; + + qcom,spm@b012000 { + qcom,cpu-vctl-list = <&CPU0 &CPU1>; + }; + + qcom,lpm-levels { + qcom,pm-cluster@0{ + qcom,pm-cpu { + qcom,cpu = <&CPU0 &CPU1>; + }; + }; + }; + + /delete-node/ cti@61ba000; + /delete-node/ cti@61bb000; + /delete-node/ etm@61be000; + /delete-node/ etm@61bf000; + funnel@61a1000 { + ports { + /delete-node/ port@3; + /delete-node/ port@4; + }; + }; +}; + +&thermal_zones { + cpuss-max-step { + cooling-maps { + /delete-node/ cpu2_cdev; + /delete-node/ cpu3_cdev; + }; + }; + + /delete-node/ cpuss-2-step; + /delete-node/ cpuss-3-step; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts new file mode 100644 index 0000000000000000000000000000000000000000..fc6b265c4dada301b6dcb17b14b30122960211fc --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs403-iot-sku2.dts @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs403.dtsi" +#include "qcs405-nowcd-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS403 RCM IOT"; + compatible = "qcom,qcs403-iot", "qcom,qcs403", "qcom,iot"; + qcom,board-id = <0x010015 0x0>; + + cpus { + /delete-node/ cpu@102; + /delete-node/ cpu@103; + + cpu-map { + cluster0 { + /delete-node/ core2; + /delete-node/ core3; + }; + }; + }; +}; + +&soc { + cpuss_dump { + /delete-node/ qcom,l1_i_cache102; + /delete-node/ qcom,l1_i_cache103; + /delete-node/ qcom,l1_d_cache102; + /delete-node/ qcom,l1_d_cache103; + }; + + qcom,spm@b012000 { + qcom,cpu-vctl-list = <&CPU0 &CPU1>; + }; + + qcom,lpm-levels { + qcom,pm-cluster@0{ + qcom,pm-cpu { + qcom,cpu = <&CPU0 &CPU1>; + }; + }; + }; + + /delete-node/ cti@61ba000; + /delete-node/ cti@61bb000; + /delete-node/ etm@61be000; + /delete-node/ etm@61bf000; + funnel@61a1000 { + ports { + /delete-node/ port@3; + /delete-node/ port@4; + }; + }; +}; + +&thermal_zones { + cpuss-max-step { + cooling-maps { + /delete-node/ cpu2_cdev; + /delete-node/ cpu3_cdev; + }; + }; + + /delete-node/ cpuss-2-step; + /delete-node/ cpuss-3-step; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs403.dtsi b/arch/arm64/boot/dts/qcom/qcs403.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..72e417e4f493f3894b5ea3e477a7612b1fe63350 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs403.dtsi @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS403"; + qcom,msm-name = "QCS403"; + qcom,msm-id = <373 0x0>; +}; + +&soc { + /delete-node/ qcom,cpu0-computemon; + + cpu0_computemon: qcom,cpu0-computemon { + compatible = "qcom,arm-cpu-mon"; + qcom,cpulist = <&CPU0 &CPU1>; + qcom,target-dev = <&cpu0_cpu_ddr_latfloor>; + qcom,core-dev-table = + < 1113600 MHZ_TO_MBPS( 297, 4) >, + < 1267200 MHZ_TO_MBPS( 597, 4) >, + < 1401600 MHZ_TO_MBPS( 710, 4) >; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-amic-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-amic-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..419580f78059f11b99d5be04697117f6517b5c87 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-amic-audio-overlay.dtsi @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-amic-snd-card"; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,audio-routing = + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..584f4f0212d4859b46cdeda68bac47d7343d71b6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-audio-overlay.dtsi @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" +#include "qcs405-va-bolero.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-snd-card"; + qcom,va-bolero-codec = <1>; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; + qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; + qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; + qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; + qcom,audio-routing = + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4", + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7"; +}; + +&bolero { + qcom,num-macros = <1>; +}; + +&soc { + cdc_dmic01_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic01_clk_active &cdc_dmic01_data_active>; + pinctrl-1 = <&cdc_dmic01_clk_sleep &cdc_dmic01_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic23_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic23_clk_active &cdc_dmic23_data_active>; + pinctrl-1 = <&cdc_dmic23_clk_sleep &cdc_dmic23_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic45_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic45_clk_active &cdc_dmic45_data_active>; + pinctrl-1 = <&cdc_dmic45_clk_sleep &cdc_dmic45_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic67_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic67_clk_active &cdc_dmic67_data_active>; + pinctrl-1 = <&cdc_dmic67_clk_sleep &cdc_dmic67_data_sleep>; + qcom,lpi-gpios; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..6a8dd8ed7397300703e0a1e3c7976301d4fb564a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-audio.dtsi @@ -0,0 +1,153 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "qcs405-lpi.dtsi" +#include "msm-audio-lpass.dtsi" + +&msm_audio_ion { + iommus = <&apps_smmu 0x0801 0x0>; + qcom,smmu-sid-mask = /bits/ 64 <0xf>; +}; + +&soc { + qcom,avtimer@C10000C { + compatible = "qcom,avtimer"; + reg = <0x0C10000C 0x4>, + <0x0C100010 0x4>; + reg-names = "avtimer_lsb_addr", "avtimer_msb_addr"; + qcom,clk-div = <192>; + qcom,clk-mult = <10>; + }; + + audio_apr: qcom,msm-audio-apr { + compatible = "qcom,msm-audio-apr"; + q6core: q6core { + compatible = "qcom,q6core-audio"; + bolero: bolero-cdc { + compatible = "qcom,bolero-codec"; + }; + }; + }; +}; + +&q6core { + qcs405_snd: sound { + compatible = "qcom,qcs405-asoc-snd"; + qcom,model = "qcs405-snd-card"; + qcom,mi2s-audio-intf = <1>; + qcom,auxpcm-audio-intf = <1>; + + asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, + <&loopback>, <&compress>, <&hostless>, + <&afe>, <&lsm>, <&routing>, <&compr>, + <&pcm_noirq>; + asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1", + "msm-pcm-dsp.2", "msm-voip-dsp", + "msm-pcm-voice", "msm-pcm-loopback", + "msm-compress-dsp", "msm-pcm-hostless", + "msm-pcm-afe", "msm-lsm-client", + "msm-pcm-routing", "msm-compr-dsp", + "msm-pcm-dsp-noirq"; + asoc-cpu = <&dai_dp>, <&dai_mi2s0>, <&dai_mi2s1>, + <&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>, <&dai_mi2s5>, + <&dai_pri_auxpcm>, <&dai_sec_auxpcm>, + <&dai_tert_auxpcm>, <&dai_quat_auxpcm>, + <&dai_quin_auxpcm>, + <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>, + <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>, + <&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>, + <&sb_6_rx>, <&sb_7_rx>, <&sb_7_tx>, + <&sb_8_rx>, <&sb_8_tx>, + <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, + <&afe_proxy_tx>, <&incall_record_rx>, + <&incall_record_tx>, <&incall_music_rx>, + <&incall_music_2_rx>, + <&usb_audio_rx>, <&usb_audio_tx>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>, + <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, + <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>, + <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>, + <&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>, + <&wsa_cdc_dma_0_rx>, <&wsa_cdc_dma_0_tx>, + <&wsa_cdc_dma_1_rx>, <&wsa_cdc_dma_1_tx>, + <&wsa_cdc_dma_2_tx>, + <&va_cdc_dma_0_tx>, <&va_cdc_dma_1_tx>; + asoc-cpu-names = "msm-dai-q6-dp.24608", + "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", + "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", + "msm-dai-q6-mi2s.4", "msm-dai-q6-mi2s.5", + "msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2", + "msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4", + "msm-dai-q6-auxpcm.5", + "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385", + "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387", + "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389", + "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391", + "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393", + "msm-dai-q6-dev.16394", "msm-dai-q6-dev.16395", + "msm-dai-q6-dev.16396", + "msm-dai-q6-dev.16398", "msm-dai-q6-dev.16399", + "msm-dai-q6-dev.16400", "msm-dai-q6-dev.16401", + "msm-dai-q6-dev.224", "msm-dai-q6-dev.225", + "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", + "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", + "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865", + "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", + "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", + "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", + "msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929", + "msm-dai-cdc-dma-dev.45056", + "msm-dai-cdc-dma-dev.45057", + "msm-dai-cdc-dma-dev.45058", + "msm-dai-cdc-dma-dev.45059", + "msm-dai-cdc-dma-dev.45061", + "msm-dai-cdc-dma-dev.45089", + "msm-dai-cdc-dma-dev.45091"; + }; +}; + +&slim_aud { + status = "disabled"; + msm_dai_slim { + status = "disabled"; + compatible = "qcom,msm-dai-slim"; + elemental-addr = [ff ff ff fe 17 02]; + }; +}; + +&pms405_gpios { + tasha_mclk { + tasha_mclk_default: tasha_mclk_default{ + pins = "gpio8"; + function = "func1"; + qcom,drive-strength = <2>; + power-source = <0>; + bias-disable; + output-low; + }; + }; +}; + +&dai_mi2s4 { + qcom,msm-dai-is-island-supported = <1>; +}; + +&dai_quin_auxpcm { + qcom,msm-dai-is-island-supported = <1>; +}; + +&dai_quin_tdm_tx_0 { + qcom,msm-dai-is-island-supported = <1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-blsp.dtsi b/arch/arm64/boot/dts/qcom/qcs405-blsp.dtsi index 224ec15a22cb70c624a661f57c61776357a997ee..f58fc527b93405d7034aada6199b2f3d90a49c70 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-blsp.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-blsp.dtsi @@ -348,8 +348,8 @@ interrupts = <0 1 2>; #interrupt-cells = <1>; interrupt-map-mask = <0xffffffff>; - interrupt-map = <0 &intc 0 0 107 0 - 1 &intc 0 0 238 0 + interrupt-map = <0 &intc 0 107 0 + 1 &intc 0 238 0 2 &tlmm 31 0>; qcom,inject-rx-on-wakeup; @@ -385,8 +385,8 @@ interrupts = <0 1 2>; #interrupt-cells = <1>; interrupt-map-mask = <0xffffffff>; - interrupt-map = <0 &intc 0 0 108 0 - 1 &intc 0 0 238 0 + interrupt-map = <0 &intc 0 108 0 + 1 &intc 0 238 0 2 &tlmm 23 0>; qcom,inject-rx-on-wakeup; @@ -422,8 +422,8 @@ interrupts = <0 1 2>; #interrupt-cells = <1>; interrupt-map-mask = <0xffffffff>; - interrupt-map = <0 &intc 0 0 118 0 - 1 &intc 0 0 238 0 + interrupt-map = <0 &intc 0 118 0 + 1 &intc 0 238 0 2 &tlmm 18 0>; qcom,inject-rx-on-wakeup; @@ -459,8 +459,8 @@ interrupts = <0 1 2>; #interrupt-cells = <1>; interrupt-map-mask = <0xffffffff>; - interrupt-map = <0 &intc 0 0 119 0 - 1 &intc 0 0 238 0 + interrupt-map = <0 &intc 0 119 0 + 1 &intc 0 238 0 2 &tlmm 83 0>; qcom,inject-rx-on-wakeup; @@ -498,8 +498,8 @@ interrupts = <0 1 2>; #interrupt-cells = <1>; interrupt-map-mask = <0xffffffff>; - interrupt-map = <0 &intc 0 0 297 0 - 1 &intc 0 0 239 0 + interrupt-map = <0 &intc 0 297 0 + 1 &intc 0 239 0 2 &tlmm 27 0>; qcom,inject-rx-on-wakeup; diff --git a/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi b/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi index ca659e0dc2ce023270152874d735ee0ee7b9bef4..d8b2ef74467c02b2b24f30ba271a6d19cd5d31be 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-bus.dtsi @@ -172,8 +172,9 @@ qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 - &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + &pcnoc_s_11 &pcnoc_s_2 &pcnoc_s_4 + &pcnoc_s_6 &pcnoc_s_7 &pcnoc_s_8 + &pcnoc_s_9>; }; mas_crypto: mas-crypto { @@ -186,8 +187,9 @@ qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 - &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + &pcnoc_s_11 &pcnoc_s_2 &pcnoc_s_4 + &pcnoc_s_6 &pcnoc_s_7 &pcnoc_s_8 + &pcnoc_s_9>; }; mas_sdcc_1: mas-sdcc-1 { @@ -199,8 +201,9 @@ qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 - &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + &pcnoc_s_11 &pcnoc_s_2 &pcnoc_s_4 + &pcnoc_s_6 &pcnoc_s_7 &pcnoc_s_8 + &pcnoc_s_9>; }; mas_sdcc_2: mas-sdcc-2 { @@ -212,8 +215,9 @@ qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 - &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + &pcnoc_s_11 &pcnoc_s_2 &pcnoc_s_4 + &pcnoc_s_6 &pcnoc_s_7 &pcnoc_s_8 + &pcnoc_s_9>; }; mas_snoc_pcnoc: mas-snoc-pcnoc { @@ -240,8 +244,9 @@ qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,blacklist = <&pcnoc_s_0 &pcnoc_s_1 &pcnoc_s_10 - &pcnoc_s_2 &pcnoc_s_4 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_8 &pcnoc_s_9>; + &pcnoc_s_11 &pcnoc_s_2 &pcnoc_s_4 + &pcnoc_s_6 &pcnoc_s_7 &pcnoc_s_8 + &pcnoc_s_9>; }; /*SNOC Masters*/ @@ -297,6 +302,51 @@ qcom,mas-rpm-id = ; }; + mas_emac: mas-emac { + cell-id = ; + label = "mas-emac"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <27>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_snoc_bimc_1 &snoc_int_1>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + mas_pcie: mas-pcie { + cell-id = ; + label = "mas-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <18>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_snoc_bimc_1 &snoc_int_1>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + + mas_usb3: mas-usb3 { + cell-id = ; + label = "mas-usb3"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,qport = <26>; + qcom,qos-mode = "fixed"; + qcom,connections = <&slv_snoc_bimc_1 &snoc_int_1>; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = ; + }; + /*Internal nodes*/ pcnoc_int_0: pcnoc-int-0 { cell-id = ; @@ -314,11 +364,12 @@ label = "pcnoc-int-2"; qcom,buswidth = <8>; qcom,agg-ports = <1>; - qcom,connections = <&pcnoc_s_10 &slv_tcu &pcnoc_s_2 - &pcnoc_s_3 &pcnoc_s_0 - &pcnoc_s_1 &pcnoc_s_6 - &pcnoc_s_7 &pcnoc_s_4 - &pcnoc_s_8 &pcnoc_s_9>; + qcom,connections = <&pcnoc_s_10 &slv_tcu + &pcnoc_s_11 &pcnoc_s_2 + &pcnoc_s_3 &pcnoc_s_0 + &pcnoc_s_1 &pcnoc_s_6 + &pcnoc_s_7 &pcnoc_s_4 + &pcnoc_s_8 &pcnoc_s_9>; qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,slv-rpm-id = ; @@ -396,7 +447,8 @@ label = "pcnoc-s-6"; qcom,buswidth = <4>; qcom,agg-ports = <1>; - qcom,connections = <&slv_blsp_1 &slv_tlmm_north>; + qcom,connections = <&slv_blsp_1 &slv_tlmm_north + &slv_ethernet>; qcom,bus-dev = <&fab_pcnoc>; qcom,mas-rpm-id = ; qcom,slv-rpm-id = ; @@ -448,6 +500,17 @@ qcom,slv-rpm-id = ; }; + pcnoc_s_11: pcnoc-s-11 { + cell-id = ; + label = "pcnoc-s-11"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_usb3>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,mas-rpm-id = ; + qcom,slv-rpm-id = ; + }; + qdss_int: qdss-int { cell-id = ; label = "qdss-int"; @@ -617,6 +680,16 @@ qcom,slv-rpm-id = ; }; + slv_ethernet:slv-ethernet { + cell-id = ; + label = "slv-ethernet"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,ap-owned; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + slv_blsp_2:slv-blsp-2 { cell-id = ; label = "slv-blsp-2"; @@ -690,6 +763,15 @@ qcom,slv-rpm-id = ; }; + slv_usb3:slv-usb3 { + cell-id = ; + label = "slv-usb3"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_pcnoc>; + qcom,slv-rpm-id = ; + }; + slv_crypto_0_cfg:slv-crypto-0-cfg { cell-id = ; label = "slv-crypto-0-cfg"; diff --git a/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi index c4dc6cb25d89bbe9f0344b7b1461f116149b692e..f423a1dea89b6395f7a6d5cf830155e57b7b4038 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-coresight.dtsi @@ -72,14 +72,14 @@ arm,buffer-size = <0x400000>; coresight-name = "coresight-tmc-etr"; - coresight-ctis = <&cti0>; + coresight-ctis = <&cti0 &cti0>; coresight-csr = <&csr>; clocks = <&clock_rpmcc RPM_QDSS_CLK>, <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; - interrupts = ; + interrupts = ; interrupt-names = "byte-cntr-irq"; port { @@ -98,7 +98,7 @@ reg-names = "tmc-base"; coresight-name = "coresight-tmc-etf"; - coresight-ctis = <&cti0>; + coresight-ctis = <&cti0 &cti0>; arm,default-sink; coresight-csr = <&csr>; @@ -340,7 +340,7 @@ arm,primecell-periphid = <0x0003b962>; reg = <0x6002000 0x1000>, - <0x09000000 0x1000000>; + <0x09280000 0x180000>; reg-names = "stm-base", "stm-stimulus-base"; coresight-name = "coresight-stm"; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi index 3f83fd04b6de1712608b59de3d98f47f599c0938..d15ea0356775d66414856560c8745a3fb7d228a2 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-cpu.dtsi @@ -21,7 +21,7 @@ #address-cells = <1>; #size-cells = <0>; cpu-map { - cluster1 { + cluster0 { core0 { cpu = <&CPU0>; }; @@ -42,6 +42,8 @@ compatible = "arm,cortex-a53"; reg = <0x100>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L2_1: l2-cache { @@ -65,6 +67,8 @@ compatible = "arm,cortex-a53"; reg = <0x101>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L1_I_101: l1-icache { @@ -82,6 +86,8 @@ compatible = "arm,cortex-a53"; reg = <0x102>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L1_I_102: l1-icache { @@ -99,6 +105,8 @@ compatible = "arm,cortex-a53"; reg = <0x103>; enable-method = "psci"; + capacity-dmips-mhz = <1024>; + sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; next-level-cache = <&L2_1>; #cooling-cells = <2>; L1_I_103: l1-icache { @@ -111,6 +119,37 @@ }; }; }; + + energy_costs: energy-costs { + compatible = "sched-energy"; + + CPU_COST_0: core-cost0 { + busy-cost-data = < + 960000 159 + 1305600 207 + 1497600 256 + 1708800 327 + 1804800 343 + 1958400 445 + >; + idle-cost-data = < + 100 80 60 40 + >; + }; + CLUSTER_COST_0: cluster-cost0 { + busy-cost-data = < + 960000 53 + 1305600 61 + 1497600 71 + 1708800 85 + 1804800 88 + 1958400 110 + >; + idle-cost-data = < + 4 3 2 1 + >; + }; + }; }; &soc { diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..2523e6abd8b95c0c73ab047784b8d7d3e440f6d8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-csra1-audio-overlay.dtsi @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" +#include "qcs405-va-bolero.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-csra1-snd-card"; + qcom,va-bolero-codec = <1>; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; + qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; + qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; + qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; + qcom,audio-routing = + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4", + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7"; +}; + +&bolero { + qcom,num-macros = <1>; +}; + +&soc { + cdc_dmic01_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic01_clk_active &cdc_dmic01_data_active>; + pinctrl-1 = <&cdc_dmic01_clk_sleep &cdc_dmic01_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic23_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic23_clk_active &cdc_dmic23_data_active>; + pinctrl-1 = <&cdc_dmic23_clk_sleep &cdc_dmic23_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic45_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic45_clk_active &cdc_dmic45_data_active>; + pinctrl-1 = <&cdc_dmic45_clk_sleep &cdc_dmic45_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic67_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic67_clk_active &cdc_dmic67_data_active>; + pinctrl-1 = <&cdc_dmic67_clk_sleep &cdc_dmic67_data_sleep>; + qcom,lpi-gpios; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..7dc047d196b1a7516d9630f019ba41ac575dfb6d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-csra6-audio-overlay.dtsi @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" +#include "qcs405-va-bolero.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-csra6-snd-card"; + qcom,va-bolero-codec = <1>; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; + qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; + qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; + qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; + qcom,audio-routing = + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4", + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7"; +}; + +&bolero { + qcom,num-macros = <1>; +}; + +&soc { + cdc_dmic01_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic01_clk_active &cdc_dmic01_data_active>; + pinctrl-1 = <&cdc_dmic01_clk_sleep &cdc_dmic01_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic23_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic23_clk_active &cdc_dmic23_data_active>; + pinctrl-1 = <&cdc_dmic23_clk_sleep &cdc_dmic23_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic45_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic45_clk_active &cdc_dmic45_data_active>; + pinctrl-1 = <&cdc_dmic45_clk_sleep &cdc_dmic45_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic67_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic67_clk_active &cdc_dmic67_data_active>; + pinctrl-1 = <&cdc_dmic67_clk_sleep &cdc_dmic67_data_sleep>; + qcom,lpi-gpios; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi b/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi index ed8a630383b6054963f2286e19e41036b7d215f9..2b58972a222c3cc840279eb9538e6bcdc84e774a 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-gpu.dtsi @@ -110,9 +110,9 @@ /* TURBO */ qcom,gpu-pwrlevel@0 { reg = <0>; - qcom,gpu-freq = <650000000>; + qcom,gpu-freq = <598000000>; qcom,bus-freq = <8>; - qcom,bus-min = <8>; + qcom,bus-min = <7>; qcom,bus-max = <8>; }; @@ -180,7 +180,7 @@ qcom,retention; gfx3d_user: gfx3d_user { compatible = "qcom,smmu-kgsl-cb"; - iommus = <&gfx_iommu 0>; + iommus = <&gfx_iommu 0 1>; qcom,gpu-offset = <0xa000>; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts new file mode 100644 index 0000000000000000000000000000000000000000..b450ced5cf2ebe55a7c85ada0838d8046ce1ef4a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-wsa-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 1000 IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x010020 0>; +}; + +&i2c_5 { + status = "ok"; +}; + +&smb1351_otg_supply { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cdp.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts similarity index 74% rename from arch/arm64/boot/dts/qcom/qcs405-cdp.dts rename to arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts index 3d36f2b018a1509f6626b3f055354abd86dac231..1ff864c6fea74c7535d95b9c4f24aa2ec0bb1b1a 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cdp.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts @@ -14,14 +14,10 @@ /dts-v1/; #include "qcs405.dtsi" -#include "qcs405-cdp.dtsi" +#include "qcs405-audio-overlay.dtsi" / { - model = "Qualcomm Technologies, Inc. QCS405 CDP"; - compatible = "qcom,qcs405-cdp", "qcom,qcs405", "qcom,cdp"; - qcom,board-id = <1 0>; -}; - -&qnand_1 { - status = "ok"; + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 SPI IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x010020 0x1>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts new file mode 100644 index 0000000000000000000000000000000000000000..2a54972cb02f9996d324c61aa39aa3596d6c6e8e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku3.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-nowcd-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 sEVB/SLT IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x010020 0x2>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-mtp.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts similarity index 74% rename from arch/arm64/boot/dts/qcom/qcs405-mtp.dts rename to arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts index 7e7872d997aa8e9e83008a1592d7e50b5fe11cea..5cfafbb325e61835389a16e1c63ae3318e20777f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-mtp.dts +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts @@ -14,14 +14,10 @@ /dts-v1/; #include "qcs405.dtsi" -#include "qcs405-mtp.dtsi" +#include "qcs405-audio-overlay.dtsi" / { - model = "Qualcomm Technologies, Inc. QCS405 MTP"; - compatible = "qcom,qcs405-mtp", "qcom,qcs405", "qcom,mtp"; - qcom,board-id = <8 0>; -}; - -&qnand_1 { - status = "ok"; + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 DSI IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x020020 0x1>; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts new file mode 100644 index 0000000000000000000000000000000000000000..2cb3dbb03341ce90651b627e6cd32ca9eb1798a1 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 RGB IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x030020 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku6.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku6.dts new file mode 100644 index 0000000000000000000000000000000000000000..a7e4149f43736691ebb20430ec2ceb07aeb0bc4a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku6.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-csra1-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 CSRA1 IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x040020 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku7.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku7.dts new file mode 100644 index 0000000000000000000000000000000000000000..b3b723b422b8beb6e882997335172725c5e6d682 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku7.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-csra6-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 CSRA6 IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x050020 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku8.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku8.dts new file mode 100644 index 0000000000000000000000000000000000000000..ea43b030be69d0e4099bc5570a69ce26a0c1828b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku8.dts @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "qcs405.dtsi" +#include "qcs405-amic-audio-overlay.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. QCS405 EVB1 4000 AMIC IOT"; + compatible = "qcom,qcs405-iot", "qcom,qcs405", "qcom,iot"; + qcom,board-id = <0x060020 0x1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..210375cfd0429223cb3fe8d757368c43712a693f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-lpi.dtsi @@ -0,0 +1,304 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + lpi_tlmm: lpi_pinctrl@C070000 { + compatible = "qcom,lpi-pinctrl"; + reg = <0x0C070000 0x0>; + qcom,num-gpios = <21>; + gpio-controller; + #gpio-cells = <2>; + qcom,lpi-offset-tbl = <0x00000010>, <0x00000020>, + <0x00000030>, <0x00000040>, + <0x00000050>, <0x00000060>, + <0x00000070>, <0x00000080>, + <0x00000090>, <0x00000100>, + <0x00000110>, <0x00000120>, + <0x00000130>, <0x00000140>, + <0x00000150>, <0x00000160>, + <0x00000170>, <0x00000180>, + <0x00000190>, <0x00000200>, + <0x00000210>; + + cdc_dmic01_clk_active: dmic01_clk_active { + mux { + pins = "gpio8"; + function = "func1"; + }; + + config { + pins = "gpio8"; + drive-strength = <8>; + output-high; + }; + }; + + cdc_dmic01_clk_sleep: dmic01_clk_sleep { + mux { + pins = "gpio8"; + function = "func1"; + }; + + config { + pins = "gpio8"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + + cdc_dmic01_data_active: dmic01_data_active { + mux { + pins = "gpio9"; + function = "func1"; + }; + + config { + pins = "gpio9"; + drive-strength = <8>; + input-enable; + }; + }; + + cdc_dmic01_data_sleep: dmic01_data_sleep { + mux { + pins = "gpio9"; + function = "func1"; + }; + + config { + pins = "gpio9"; + drive-strength = <2>; + pull-down; + input-enable; + }; + }; + + cdc_dmic23_clk_active: dmic23_clk_active { + mux { + pins = "gpio10"; + function = "func1"; + }; + + config { + pins = "gpio10"; + drive-strength = <8>; + output-high; + }; + }; + + cdc_dmic23_clk_sleep: dmic23_clk_sleep { + mux { + pins = "gpio10"; + function = "func1"; + }; + + config { + pins = "gpio10"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + + cdc_dmic23_data_active: dmic23_data_active { + mux { + pins = "gpio11"; + function = "func1"; + }; + + config { + pins = "gpio11"; + drive-strength = <8>; + input-enable; + }; + }; + + cdc_dmic23_data_sleep: dmic23_data_sleep { + mux { + pins = "gpio11"; + function = "func1"; + }; + + config { + pins = "gpio11"; + drive-strength = <2>; + pull-down; + input-enable; + }; + }; + + cdc_dmic45_clk_active: dmic45_clk_active { + mux { + pins = "gpio12"; + function = "func1"; + }; + + config { + pins = "gpio12"; + drive-strength = <8>; + output-high; + }; + }; + + cdc_dmic45_clk_sleep: dmic45_clk_sleep { + mux { + pins = "gpio12"; + function = "func1"; + }; + + config { + pins = "gpio12"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + + cdc_dmic45_data_active: dmic45_data_active { + mux { + pins = "gpio13"; + function = "func1"; + }; + + config { + pins = "gpio13"; + drive-strength = <8>; + input-enable; + }; + }; + + cdc_dmic45_data_sleep: dmic45_data_sleep { + mux { + pins = "gpio13"; + function = "func1"; + }; + + config { + pins = "gpio13"; + drive-strength = <2>; + pull-down; + input-enable; + }; + }; + + cdc_dmic67_clk_active: dmic67_clk_active { + mux { + pins = "gpio14"; + function = "func1"; + }; + + config { + pins = "gpio14"; + drive-strength = <8>; + output-high; + }; + }; + + cdc_dmic67_clk_sleep: dmic67_clk_sleep { + mux { + pins = "gpio14"; + function = "func1"; + }; + + config { + pins = "gpio14"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; + + cdc_dmic67_data_active: dmic67_data_active { + mux { + pins = "gpio15"; + function = "func1"; + }; + + config { + pins = "gpio15"; + drive-strength = <8>; + input-enable; + }; + }; + + cdc_dmic67_data_sleep: dmic67_data_sleep { + mux { + pins = "gpio15"; + function = "func1"; + }; + + config { + pins = "gpio15"; + drive-strength = <2>; + pull-down; + input-enable; + }; + }; + + wsa_swr_clk_pin { + wsa_swr_clk_sleep: wsa_swr_clk_sleep { + mux { + pins = "gpio5"; + function = "wsa_clk"; + }; + + config { + pins = "gpio5"; + drive-strength = <2>; + bias-bus-hold; + }; + }; + + wsa_swr_clk_active: wsa_swr_clk_active { + mux { + pins = "gpio5"; + function = "wsa_clk"; + }; + + config { + pins = "gpio5"; + drive-strength = <2>; + bias-bus-hold; + }; + }; + }; + + wsa_swr_data_pin { + wsa_swr_data_sleep: wsa_swr_data_sleep { + mux { + pins = "gpio20"; + function = "wsa_data"; + }; + + config { + pins = "gpio20"; + drive-strength = <4>; + bias-bus-hold; + }; + }; + + wsa_swr_data_active: wsa_swr_data_active { + mux { + pins = "gpio20"; + function = "wsa_data"; + }; + + config { + pins = "gpio20"; + drive-strength = <4>; + bias-bus-hold; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi b/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..31243496e78bda6a3bbe33e191b5f1f12962bb85 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-mdss-pll.dtsi @@ -0,0 +1,86 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + mdss_dsi0_pll: qcom,mdss_dsi_pll@1a94a00 { + compatible = "qcom,mdss_dsi_pll_28lpm"; + label = "MDSS DSI 0 PLL"; + cell-index = <0>; + #clock-cells = <1>; + + reg = <0x01a94a00 0xd4>, + <0x0184d074 0x8>; + reg-names = "pll_base", "gdsc_base"; + + clocks = <&clock_gcc GCC_MDSS_AHB_CLK>; + clock-names = "iface_clk"; + clock-rate = <0>; + + gdsc-supply = <&gdsc_mdss>; + + qcom,dsi-pll-ssc-en; + qcom,dsi-pll-ssc-mode = "down-spread"; + qcom,ssc-frequency-hz = <30000>; + qcom,ssc-ppm = <5000>; + + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; + + mdss_dsi1_pll: qcom,mdss_dsi_pll@1a96a00 { + status="disabled"; + compatible = "qcom,mdss_dsi_pll_28lpm"; + label = "MDSS DSI 1 PLL"; + cell-index = <1>; + #clock-cells = <1>; + + reg = <0x01a96a00 0xd4>, + <0x0184d074 0x8>; + reg-names = "pll_base", "gdsc_base"; + + clocks = <&clock_gcc GCC_MDSS_AHB_CLK>; + clock-names = "iface_clk"; + clock-rate = <0>; + + gdsc-supply = <&gdsc_mdss>; + + qcom,dsi-pll-ssc-en; + qcom,dsi-pll-ssc-mode = "down-spread"; + qcom,ssc-frequency-hz = <30000>; + qcom,ssc-ppm = <5000>; + + qcom,platform-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + + qcom,platform-supply-entry@0 { + reg = <0>; + qcom,supply-name = "gdsc"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-nowcd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-nowcd-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..bb1e52859635dfe1a442f9316e5636aac53f8445 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-nowcd-audio-overlay.dtsi @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-va-bolero.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-nowcd-snd-card"; + qcom,va-bolero-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; + qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; + qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; + qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; + qcom,audio-routing = + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7"; +}; + +&bolero { + qcom,num-macros = <1>; +}; + +&soc { + cdc_dmic01_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic01_clk_active &cdc_dmic01_data_active>; + pinctrl-1 = <&cdc_dmic01_clk_sleep &cdc_dmic01_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic23_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic23_clk_active &cdc_dmic23_data_active>; + pinctrl-1 = <&cdc_dmic23_clk_sleep &cdc_dmic23_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic45_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic45_clk_active &cdc_dmic45_data_active>; + pinctrl-1 = <&cdc_dmic45_clk_sleep &cdc_dmic45_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic67_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic67_clk_active &cdc_dmic67_data_active>; + pinctrl-1 = <&cdc_dmic67_clk_sleep &cdc_dmic67_data_sleep>; + qcom,lpi-gpios; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi index 990142fb205b786a5271981b2020a7fcc816297a..f388761c1565289ba5898ad2b755814558bf3f64 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-pinctrl.dtsi @@ -14,7 +14,7 @@ &soc { tlmm: pinctrl@1000000 { compatible = "qcom,qcs405-pinctrl"; - reg = <0x1000000 0x300000>; + reg = <0x1000000 0x500000>; interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>; gpio-controller; #gpio-cells = <2>; @@ -22,30 +22,43 @@ interrupt-parent = <&wakegpio>; #interrupt-cells = <2>; - pmx-uartconsole { - uart_console_active: uart_console_active { + blsp1_uart2_console { + blsp_uart_tx_a2_active: blsp_uart_tx_a2_active { mux { - pins = "gpio17", "gpio18"; - function = "blsp1_uart2"; + pins = "gpio17"; + function = "blsp_uart_tx_a2"; }; config { - pins = "gpio17", "gpio18"; + pins = "gpio17"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp_uart_rx_a2_active: blsp_uart_rx_a2_active { + mux { + pins = "gpio18"; + function = "blsp_uart_rx_a2"; + }; + + config { + pins = "gpio18"; drive-strength = <2>; bias-disable; }; }; - uart_console_sleep: uart_console_sleep { + blsp_uart_tx_rx_a2_sleep: blsp_uart_tx_rx_a2_sleep { mux { pins = "gpio17", "gpio18"; - function = "blsp1_uart2"; + function = "gpio"; }; config { pins = "gpio17", "gpio18"; drive-strength = <2>; - bias-pull-down; + bias-disable; }; }; }; @@ -85,14 +98,12 @@ blsp1_uart2 { blsp1_uart2_active: blsp1_uart2_active { mux { - pins = "gpio22", "gpio23", - "gpio24", "gpio25"; + pins = "gpio22", "gpio23"; function = "blsp_uart1"; }; config { - pins = "gpio22", "gpio23", - "gpio24", "gpio25"; + pins = "gpio22", "gpio23"; drive-strength = <2>; bias-disable; }; @@ -100,14 +111,12 @@ blsp1_uart2_sleep: blsp1_uart2_sleep { mux { - pins = "gpio22", "gpio23", - "gpio24", "gpio25"; + pins = "gpio22", "gpio23"; function = "gpio"; }; config { - pins = "gpio22", "gpio23", - "gpio24", "gpio25"; + pins = "gpio22", "gpio23"; drive-strength = <2>; bias-disable; }; @@ -645,6 +654,39 @@ }; }; + ntag { + ntag_int_active: ntag_int_active { + /* active state */ + mux { + /* GPIO 53 Field Detect Interrupt */ + pins = "gpio53"; + function = "gpio"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + ntag_int_suspend: ntag_int_suspend { + /* sleep state */ + mux { + /* GPIO 53 Field Detect Interrupt */ + pins = "gpio53"; + function = "gpio"; + }; + + config { + pins = "gpio53"; + drive-strength = <2>; /* 2 MA */ + bias-pull-up; + }; + }; + + }; + /* SDC pin type */ sdc1_clk_on: sdc1_clk_on { config { @@ -797,5 +839,525 @@ input-enable; }; }; + + usb3_id_det_default: usb2_id_det_default { + config { + pins = "gpio116"; + drive-strength = <2>; + bias-pull-up; + input-enable; + }; + }; + + pri_mi2s_mclk { + pri_mi2s_mclk_sleep: pri_mi2s_mclk_sleep { + mux { + pins = "gpio64"; + function = "gpio"; + }; + + config { + pins = "gpio64"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_mclk_active: pri_mi2s_mclk_active { + mux { + pins = "gpio64"; + function = "pri_mi2s"; + }; + + config { + pins = "gpio64"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_sck { + pri_mi2s_sck_sleep: pri_mi2s_sck_sleep { + mux { + pins = "gpio87"; + function = "i2s_1"; + }; + + config { + pins = "gpio87"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sck_active: pri_mi2s_sck_active { + mux { + pins = "gpio87"; + function = "i2s_1"; + }; + + config { + pins = "gpio87"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_ws { + pri_mi2s_ws_sleep: pri_mi2s_ws_sleep { + mux { + pins = "gpio88"; + function = "i2s_1"; + }; + + config { + pins = "gpio88"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_ws_active: pri_mi2s_ws_active { + mux { + pins = "gpio88"; + function = "i2s_1"; + }; + + config { + pins = "gpio88"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + pri_mi2s_sd0 { + pri_mi2s_sd0_sleep: pri_mi2s_sd0_sleep { + mux { + pins = "gpio89"; + function = "i2s_1"; + }; + + config { + pins = "gpio89"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd0_active: pri_mi2s_sd0_active { + mux { + pins = "gpio89"; + function = "i2s_1"; + }; + + config { + pins = "gpio89"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_mi2s_sd1 { + pri_mi2s_sd1_sleep: pri_mi2s_sd1_sleep { + mux { + pins = "gpio90"; + function = "i2s_1"; + }; + + config { + pins = "gpio90"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd1_active: pri_mi2s_sd1_active { + mux { + pins = "gpio90"; + function = "i2s_1"; + }; + + config { + pins = "gpio90"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + pri_mi2s_sd2 { + pri_mi2s_sd2_sleep: pri_mi2s_sd2_sleep { + mux { + pins = "gpio91"; + function = "i2s_1"; + }; + + config { + pins = "gpio91"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd2_active: pri_mi2s_sd2_active { + mux { + pins = "gpio91"; + function = "i2s_1"; + }; + + config { + pins = "gpio91"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_mi2s_sd3 { + pri_mi2s_sd3_sleep: pri_mi2s_sd3_sleep { + mux { + pins = "gpio92"; + function = "i2s_1"; + }; + + config { + pins = "gpio92"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd3_active: pri_mi2s_sd3_active { + mux { + pins = "gpio92"; + function = "i2s_1"; + }; + + config { + pins = "gpio92"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + pri_mi2s_sd4 { + pri_mi2s_sd4_sleep: pri_mi2s_sd4_sleep { + mux { + pins = "gpio93"; + function = "i2s_1"; + }; + + config { + pins = "gpio93"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd4_active: pri_mi2s_sd4_active { + mux { + pins = "gpio93"; + function = "i2s_1"; + }; + + config { + pins = "gpio93"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + pri_mi2s_sd5 { + pri_mi2s_sd5_sleep: pri_mi2s_sd5_sleep { + mux { + pins = "gpio94"; + function = "i2s_1"; + }; + + config { + pins = "gpio94"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + pri_mi2s_sd5_active: pri_mi2s_sd5_active { + mux { + pins = "gpio94"; + function = "i2s_1"; + }; + + config { + pins = "gpio94"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + sec_mi2s_sck { + sec_mi2s_sck_sleep: sec_mi2s_sck_sleep { + mux { + pins = "gpio97"; + function = "i2s_2"; + }; + + config { + pins = "gpio97"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sck_active: sec_mi2s_sck_active { + mux { + pins = "gpio97"; + function = "i2s_2"; + }; + + config { + pins = "gpio97"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + sec_mi2s_ws { + sec_mi2s_ws_sleep: sec_mi2s_ws_sleep { + mux { + pins = "gpio98"; + function = "i2s_2"; + }; + + config { + pins = "gpio98"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_ws_active: sec_mi2s_ws_active { + mux { + pins = "gpio98"; + function = "i2s_2"; + }; + + config { + pins = "gpio98"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + sec_mi2s_sd0 { + sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep { + mux { + pins = "gpio99"; + function = "i2s_2"; + }; + + config { + pins = "gpio99"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd0_active: sec_mi2s_sd0_active { + mux { + pins = "gpio99"; + function = "i2s_2"; + }; + + config { + pins = "gpio99"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd1 { + sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep { + mux { + pins = "gpio100"; + function = "i2s_2"; + }; + + config { + pins = "gpio100"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd1_active: sec_mi2s_sd1_active { + mux { + pins = "gpio100"; + function = "i2s_2"; + }; + + config { + pins = "gpio100"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + sec_mi2s_sd2 { + sec_mi2s_sd2_sleep: sec_mi2s_sd2_sleep { + mux { + pins = "gpio101"; + function = "i2s_2"; + }; + + config { + pins = "gpio101"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd2_active: sec_mi2s_sd2_active { + mux { + pins = "gpio101"; + function = "i2s_2"; + }; + + config { + pins = "gpio101"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd3 { + sec_mi2s_sd3_sleep: sec_mi2s_sd3_sleep { + mux { + pins = "gpio102"; + function = "i2s_2"; + }; + + config { + pins = "gpio102"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + input-enable; + }; + }; + + sec_mi2s_sd3_active: sec_mi2s_sd3_active { + mux { + pins = "gpio102"; + function = "i2s_2"; + }; + + config { + pins = "gpio102"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + /* WSA speaker reset pins */ + wsa_en_1_2 { + wsa_en_1_2_sleep: wsa_en_1_2_sleep { + mux { + pins = "gpio77"; + function = "gpio"; + }; + + config { + pins = "gpio77"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; + input-enable; + }; + }; + + wsa_en_1_2_active: wsa_en_1_2_active { + mux { + pins = "gpio77"; + function = "gpio"; + }; + + config { + pins = "gpio77"; + drive-strength = <16>; /* 16 mA */ + bias-disable; + output-high; + }; + }; + }; + + wcd9xxx_intr { + wcd_intr_default: wcd_intr_default{ + mux { + pins = "gpio105"; + function = "gpio"; + }; + + config { + pins = "gpio105"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + input-enable; + }; + }; + }; + + cdc_reset_ctrl { + cdc_reset_sleep: cdc_reset_sleep { + mux { + pins = "gpio46"; + function = "gpio"; + }; + config { + pins = "gpio46"; + drive-strength = <16>; + bias-disable; + output-low; + }; + }; + + cdc_reset_active:cdc_reset_active { + mux { + pins = "gpio46"; + function = "gpio"; + }; + config { + pins = "gpio46"; + drive-strength = <16>; + bias-pull-down; + output-high; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi b/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi index 85a2e9fe566e43d3e9807484cb406a4a93ed47e0..36b8d2cdcef316748938449c6e83e5f531d31817 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-pm.dtsi @@ -26,6 +26,8 @@ qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3>; qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; + qcom,vctl-port-ub = <0x1>; + qcom,pfm-port = <0x02>; }; qcom,lpm-levels { diff --git a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi index 74f0a257332b1d4f0a02745c117f9fc36eed231f..ecaccdccf65c51b4607ac018f590393e0394c1b0 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-regulator.dtsi @@ -339,7 +339,7 @@ <70 19 7 0>, <70 26 7 0>; qcom,cpr-fuse-quot-offset-scale = <5 5 5>; - qcom,cpr-init-voltage-step = <10000>; + qcom,cpr-init-voltage-step = <8000>; qcom,cpr-corner-map = <1 2 3>; qcom,mem-acc-corner-map = <1 2 2>; qcom,cpr-corner-frequency-map = diff --git a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi index 6e855d4284c0ccc33a23bb7f02b74601b9f87be8..c2885d95855f6e921eb3c2ef23ddbd9448e44e06 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405-rumi.dtsi @@ -33,10 +33,6 @@ 0x0 0x4>; }; - usb_nop_phy: usb_nop_phy { - compatible = "usb-nop-xceiv"; - }; - timer { clock-frequency = <0x100000>; }; @@ -46,7 +42,13 @@ }; }; -&usb0 { +&usb3 { + /delete-property/ extcon; + status = "disabled"; +}; + +&usb2s { + /delete-property/ extcon; dwc3@78c0000 { usb-phy = <&usb_emu_phy>, <&usb_nop_phy>; maximum-speed = "high-speed"; @@ -77,6 +79,7 @@ /delete-node/ qcom,spmi@200f000; /delete-node/ regulator@1942120; /delete-node/ regulator@b018000; + /delete-node/ usb3_extcon; }; &rpm_bus { diff --git a/arch/arm64/boot/dts/qcom/qcs405-tasha.dtsi b/arch/arm64/boot/dts/qcom/qcs405-tasha.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..aaa634f37119ca32eed87dbd58b2ec1d13586195 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-tasha.dtsi @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + wcd9xxx_intc: wcd9xxx-irq { + compatible = "qcom,wcd9xxx-irq"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tlmm>; + qcom,gpio-connect = <&tlmm 105 0>; + pinctrl-names = "default"; + pinctrl-0 = <&wcd_intr_default>; + }; + + clock_audio: audio_ext_clk { + compatible = "qcom,audio-ref-clk"; + qcom,codec-ext-clk-src = <0>; + qcom,use-pinctrl = <1>; + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&tasha_mclk_default>; + pinctrl-1 = <&tasha_mclk_default>; + #clock-names = "osr_clk"; + clocks = <&pms405_clkdiv>; + #clock-cells = <1>; + }; + + wcd_rst_gpio: msm_cdc_pinctrl@46 { + compatible = "qcom,msm-cdc-pinctrl"; + qcom,cdc-rst-n-gpio = <&tlmm 46 0>; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_reset_active>; + pinctrl-1 = <&cdc_reset_sleep>; + }; +}; + +&slim_aud { + wcd9335: tasha_codec { + compatible = "qcom,tasha-slim-pgd"; + elemental-addr = [00 01 a0 01 17 02]; + + interrupt-parent = <&wcd9xxx_intc>; + interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + 17 18 19 20 21 22 23 24 25 26 27 28 29 + 30>; + + qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>; + + clock-names = "wcd_clk"; + clocks = <&clock_audio AUDIO_PMI_CLK>; + + qcom,cdc-micbias1-mv = <1800>; + qcom,cdc-micbias2-mv = <1800>; + qcom,cdc-micbias3-mv = <1800>; + qcom,cdc-micbias4-mv = <1800>; + + qcom,cdc-mclk-clk-rate = <9600000>; + qcom,cdc-slim-ifd = "tasha-slim-ifd"; + qcom,cdc-slim-ifd-elemental-addr = [00 00 a0 01 17 02]; + qcom,cdc-dmic-sample-rate = <4800000>; + qcom,cdc-mad-dmic-rate = <600000>; + + cdc-vdd-buck-supply = <&pms405_s4>; + qcom,cdc-vdd-buck-voltage = <1800000 1800000>; + qcom,cdc-vdd-buck-current = <594000>; + + cdc-buck-sido-supply = <&pms405_s4>; + qcom,cdc-buck-sido-voltage = <1800000 1800000>; + qcom,cdc-buck-sido-current = <200000>; + + cdc-vdd-tx-h-supply = <&pms405_l6>; + qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>; + qcom,cdc-vdd-tx-h-current = <25000>; + + cdc-vdd-rx-h-supply = <&pms405_l6>; + qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>; + qcom,cdc-vdd-rx-h-current = <25000>; + + cdc-vdd-px-supply = <&pms405_l6>; + qcom,cdc-vdd-px-voltage = <1800000 1800000>; + qcom,cdc-vdd-px-current = <10000>; + + qcom,cdc-static-supplies = "cdc-vdd-buck", + "cdc-buck-sido", + "cdc-vdd-tx-h", + "cdc-vdd-rx-h"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi b/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..d213b9b738990a57b835f0b18fbe09401fffefe4 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi @@ -0,0 +1,211 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + /* Secondary USB port related controller */ + usb3: ssusb@7580000 { + compatible = "qcom,dwc-usb3-msm"; + reg = <0x7580000 0x100000>; + reg-names = "core_base"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + interrupts = <0 25 0>, <0 319 0>; + interrupt-names = "pwr_event_irq", "hs_phy_irq"; + + clocks = <&clock_gcc GCC_USB30_MASTER_CLK>, + <&clock_gcc GCC_SYS_NOC_USB3_CLK>, + <&clock_gcc GCC_USB30_SLEEP_CLK>, + <&clock_gcc GCC_USB30_MOCK_UTMI_CLK>, + <&clock_rpmcc CXO_SMD_OTG_CLK>, + <&clock_gcc GCC_PCNOC_USB3_CLK>; + clock-names = "core_clk", "iface_clk", "sleep_clk", + "utmi_clk", "xo", "noc_aggr_clk"; + + qcom,core-clk-rate = <200000000>; + qcom,core-clk-rate-hs = <10000000>; + + qcom,pm-qos-latency = <181>; + qcom,msm-bus,name = "usb3"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + ; + + resets = <&clock_gcc GCC_USB_30_BCR>; + reset-names = "core_reset"; + + dwc3@7580000 { + compatible = "snps,dwc3"; + reg = <0x7580000 0xcd00>; + interrupts = <0 26 0>; + usb-phy = <&usb2_phy1>, <&usb_ss_phy>; + linux,sysdev_is_parent; + snps,disable-clk-gating; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + snps,usb3-u1u2-disable; + usb-core-id = <1>; + maximum-speed = "high-speed"; + dr_mode = "host"; + }; + }; + + /* Secondary USB port related High Speed PHY */ + usb2_phy1: hsphy@7a000 { + compatible = "qcom,usb-snps-hsphy"; + reg = <0x7a000 0x200>; + reg-names = "phy_csr"; + + vdd-supply = <&pms405_l4>; + vdda18-supply = <&pms405_l5>; + vdda33-supply = <&pms405_l12>; + qcom,vdd-voltage-level = <0 1144000 1144000>; + + clocks = <&clock_rpmcc RPM_SMD_LN_BB_CLK>, + <&clock_gcc GCC_USB_HS_PHY_CFG_AHB_CLK>; + clock-names = "ref_clk", "phy_csr_clk"; + + resets = <&clock_gcc GCC_USB_HS_PHY_CFG_AHB_BCR>, + <&clock_gcc GCC_USB2A_PHY_BCR>; + reset-names = "phy_reset", "phy_por_reset"; + + qcom,snps-hs-phy-init-seq = + <0xc0 0x01 0>, + <0xe8 0x0d 0>, + <0x74 0x12 0>, + <0x98 0x63 0>, + <0x9c 0x03 0>, + <0xa0 0x1d 0>, + <0xa4 0x03 0>, + <0x8c 0x23 0>, + <0x78 0x08 0>, + <0x7c 0xdc 0>, + <0x90 0xe0 20>, + <0x74 0x10 0>, + <0x90 0x60 0>, + <0xffffffff 0xffffffff 0>; + }; + + /* Secondary USB port related Super Speed PHY */ + usb_ss_phy: ssphy@7678800 { + compatible = "qcom,usb-ssphy"; + reg = <0x7678800 0x400>; + vdd-supply = <&pms405_l3>; + vdda18-supply = <&pms405_l5>; + qcom,vdd-voltage-level = <0 1050000 1050000>; + + clocks = <&clock_rpmcc RPM_SMD_LN_BB_CLK>; + clock-names = "ref_clk"; + + resets = <&clock_gcc GCC_USB3_PHY_BCR>, + <&clock_gcc GCC_USB3PHY_PHY_BCR>; + reset-names = "phy_reset", "phy_com_reset"; + }; + + usb_nop_phy: usb_nop_phy { + compatible = "usb-nop-xceiv"; + }; + + /* Primary USB port related controller */ + usb2s: hsusb@78c0000 { + compatible = "qcom,dwc-usb3-msm"; + reg = <0x78c0000 0x100000>; + reg-names = "core_base"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + interrupts = <0 32 0>, <0 318 0>; + interrupt-names = "pwr_event_irq", "hs_phy_irq"; + + clocks = <&clock_gcc GCC_USB_HS_SYSTEM_CLK>, + <&clock_gcc GCC_PCNOC_USB2_CLK>, + <&clock_gcc GCC_USB_HS_INACTIVITY_TIMERS_CLK>, + <&clock_gcc GCC_USB20_MOCK_UTMI_CLK>, + <&clock_rpmcc CXO_SMD_OTG_CLK>; + clock-names = "core_clk", "iface_clk", "sleep_clk", + "utmi_clk", "xo"; + + qcom,core-clk-rate = <133333333>; + qcom,msm-bus,name = "usb2s"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + , + ; + + resets = <&clock_gcc GCC_USB_HS_BCR>; + reset-names = "core_reset"; + + dwc3@78c0000 { + compatible = "snps,dwc3"; + reg = <0x78c0000 0xcd00>; + interrupts = <0 44 0>; + usb-phy = <&usb2_phy0>, <&usb_nop_phy>; + linux,sysdev_is_parent; + snps,disable-clk-gating; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + snps,usb3_lpm_capable; + usb-core-id = <0>; + maximum-speed = "high-speed"; + dr_mode = "otg"; + }; + }; + + /* Primary USB port related High Speed PHY */ + usb2_phy0: hsphy@7c000 { + compatible = "qcom,usb-snps-hsphy"; + reg = <0x7c000 0x200>; + reg-names = "phy_csr"; + + vdd-supply = <&pms405_l4>; + vdda18-supply = <&pms405_l5>; + vdda33-supply = <&pms405_l12>; + qcom,vdd-voltage-level = <0 1144000 1144000>; + + clocks = <&clock_rpmcc RPM_SMD_LN_BB_CLK>, + <&clock_gcc GCC_USB_HS_PHY_CFG_AHB_CLK>; + clock-names = "ref_clk", "phy_csr_clk"; + + resets = <&clock_gcc GCC_QUSB2_PHY_BCR>, + <&clock_gcc GCC_USB2_HS_PHY_ONLY_BCR>; + reset-names = "phy_reset", "phy_por_reset"; + + qcom,snps-hs-phy-init-seq = + <0xc0 0x01 0>, + <0xe8 0x0d 0>, + <0x74 0x12 0>, + <0x98 0x63 0>, + <0x9c 0x03 0>, + <0xa0 0x1d 0>, + <0xa4 0x03 0>, + <0x8c 0x23 0>, + <0x78 0x08 0>, + <0x7c 0xdc 0>, + <0x90 0xe0 20>, + <0x74 0x10 0>, + <0x90 0x60 0>, + <0xffffffff 0xffffffff 0>; + }; + +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/qcs405-va-bolero.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..fface1992979f090e9e2873b7b01de532e12e73b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-va-bolero.dtsi @@ -0,0 +1,38 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&bolero { + va_macro: va_macro { + compatible = "qcom,va-macro"; + reg = <0x0C490000 0x0>; + clock-names = "va_core_clk"; + clocks = <&clock_audio_va 0>; + va-vdd-micb-supply = <&pms405_l7>; + qcom,va-vdd-micb-voltage = <1800000 1800000>; + qcom,va-vdd-micb-current = <11200>; + qcom,va-dmic-sample-rate = <4800000>; + }; +}; + +&soc { + clock_audio_va: va_core_clk { + compatible = "qcom,audio-ref-clk"; + qcom,codec-ext-clk-src = <2>; + qcom,codec-lpass-ext-clk-freq = <9600000>; + qcom,codec-lpass-clk-id = <0x30B>; + #clock-cells = <1>; + }; +}; + +&va_cdc_dma_0_tx { + qcom,msm-dai-is-island-supported = <1>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-wsa-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/qcs405-wsa-audio-overlay.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..c97b116f79d6d1f3ffcfa41231f9e1b72efca877 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-wsa-audio-overlay.dtsi @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "qcs405-tasha.dtsi" +#include "qcs405-va-bolero.dtsi" +#include "qcs405-wsa-bolero.dtsi" +#include "qcs405-wsa881x.dtsi" + +&qcs405_snd { + qcom,model = "qcs405-wsa-snd-card"; + qcom,va-bolero-codec = <1>; + qcom,wsa-bolero-codec = <1>; + qcom,tasha-codec = <1>; + asoc-codec = <&stub_codec>, <&bolero>; + asoc-codec-names = "msm-stub-codec.1", "bolero_codec"; + qcom,wsa-max-devs = <2>; + qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, + <&wsa881x_0213>, <&wsa881x_0214>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", + "SpkrLeft", "SpkrRight"; + qcom,cdc-dmic01-gpios = <&cdc_dmic01_gpios>; + qcom,cdc-dmic23-gpios = <&cdc_dmic23_gpios>; + qcom,cdc-dmic45-gpios = <&cdc_dmic45_gpios>; + qcom,cdc-dmic67-gpios = <&cdc_dmic67_gpios>; + qcom,audio-routing = + "AMIC3", "MIC BIAS3", + "AMIC4", "MIC BIAS4", + "MIC BIAS3", "Analog Mic3", + "MIC BIAS4", "Analog Mic4", + "VA DMIC0", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic0", + "VA DMIC1", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic1", + "VA DMIC2", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic2", + "VA DMIC3", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic3", + "VA DMIC4", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic4", + "VA DMIC5", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic5", + "VA DMIC6", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic6", + "VA DMIC7", "VA MIC BIAS1", + "VA MIC BIAS1", "Digital Mic7", + "SpkrLeft IN", "WSA_SPK1 OUT", + "SpkrRight IN", "WSA_SPK2 OUT", + "WSA_SPK1 OUT", "VA_MCLK", + "WSA_SPK2 OUT", "VA_MCLK"; +}; + +&bolero { + qcom,num-macros = <2>; +}; + +&soc { + cdc_dmic01_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic01_clk_active &cdc_dmic01_data_active>; + pinctrl-1 = <&cdc_dmic01_clk_sleep &cdc_dmic01_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic23_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic23_clk_active &cdc_dmic23_data_active>; + pinctrl-1 = <&cdc_dmic23_clk_sleep &cdc_dmic23_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic45_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic45_clk_active &cdc_dmic45_data_active>; + pinctrl-1 = <&cdc_dmic45_clk_sleep &cdc_dmic45_data_sleep>; + qcom,lpi-gpios; + }; + + cdc_dmic67_gpios: cdc_dmic_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_dmic67_clk_active &cdc_dmic67_data_active>; + pinctrl-1 = <&cdc_dmic67_clk_sleep &cdc_dmic67_data_sleep>; + qcom,lpi-gpios; + }; + + wsa_swr_gpios: wsa_swr_clk_data_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&wsa_swr_clk_active &wsa_swr_data_active>; + pinctrl-1 = <&wsa_swr_clk_sleep &wsa_swr_data_sleep>; + qcom,lpi-gpios; + }; + + wsa_spkr_en_1_2: wsa_spkr_en_1_2_pinctrl { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&wsa_en_1_2_active>; + pinctrl-1 = <&wsa_en_1_2_sleep>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-wsa-bolero.dtsi b/arch/arm64/boot/dts/qcom/qcs405-wsa-bolero.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..22c59831a81c2897d10a4f2a0d2a4a1b62a55b5c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-wsa-bolero.dtsi @@ -0,0 +1,40 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&bolero { + wsa_macro: wsa-macro { + compatible = "qcom,wsa-macro"; + reg = <0x0C2C0000 0x0>; + clock-names = "wsa_core_clk", "wsa_npl_clk"; + clocks = <&clock_audio_wsa_1 0>, + <&clock_audio_wsa_2 0>; + qcom,wsa-swr-gpios = &wsa_swr_gpios; + }; +}; + +&soc { + clock_audio_wsa_1: wsa_core_clk { + compatible = "qcom,audio-ref-clk"; + qcom,codec-ext-clk-src = <2>; + qcom,codec-lpass-ext-clk-freq = <19200000>; + qcom,codec-lpass-clk-id = <0x309>; + #clock-cells = <1>; + }; + + clock_audio_wsa_2: wsa_npl_clk { + compatible = "qcom,audio-ref-clk"; + qcom,codec-ext-clk-src = <2>; + qcom,codec-lpass-ext-clk-freq = <19200000>; + qcom,codec-lpass-clk-id = <0x30A>; + #clock-cells = <1>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..bdaf3fc9ab84d5a79f31113d3f77715a52b37bf2 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/qcs405-wsa881x.dtsi @@ -0,0 +1,57 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/{ + aliases { + swr0 = &swr_0; + }; +}; + +#include + +&wsa_macro { + swr_0: wsa_swr_master { + compatible = "qcom,swr-mstr"; + #address-cells = <2>; + #size-cells = <0>; + qcom,swr-num-ports = <8>; + qcom,swr-port-mapping = <1 SPKR_L 0x1>, + <2 SPKR_L_COMP 0xF>, <3 SPKR_L_BOOST 0x3>, + <4 SPKR_R 0x1>, <5 SPKR_R_COMP 0xF>, + <6 SPKR_R_BOOST 0x3>, <7 SPKR_L_VI 0x3>, + <8 SPKR_R_VI 0x3>; + + wsa881x_0211: wsa881x@20170211 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x20170211>; + qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>; + }; + + wsa881x_0212: wsa881x@20170212 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x20170212>; + qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>; + }; + + wsa881x_0213: wsa881x@21170213 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x21170213>; + qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>; + }; + + wsa881x_0214: wsa881x@21170214 { + compatible = "qcom,wsa881x"; + reg = <0x0 0x21170214>; + qcom,spkr-sd-n-node = <&wsa_spkr_en_1_2>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi index 3d6b81b1811c94dca55fcb82606929a4abfcf134..5509234f235eb6a3fd89e96f9fb563aaeeec30de 100644 --- a/arch/arm64/boot/dts/qcom/qcs405.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi @@ -18,6 +18,10 @@ #include #include #include +#include + +#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024)) +#define BW_OPP_ENTRY(mhz, w) opp-mhz {opp-hz = /bits/ 64 ;} / { model = "Qualcomm Technologies, Inc. QCS405"; @@ -118,6 +122,23 @@ #include "qcs405-pm.dtsi" #include "msm-arm-smmu-qcs405.dtsi" #include "qcs405-gpu.dtsi" +#include "qcs405-mdss-pll.dtsi" + +&i2c_5 { /* BLSP (NTAG) */ + status = "ok"; + nq@55 { + compatible = "qcom,nq-ntag"; + reg = <0x55>; + qcom,nq-ntagfd = <&tlmm 53 GPIO_ACTIVE_LOW>; + interrupt-parent = <&tlmm>; + interrupts = <53 0>; + interrupt-names = "ntag_fd"; + pinctrl-names = "ntag_active", "ntag_suspend"; + pinctrl-0 = <&ntag_int_active>; + pinctrl-1 = <&ntag_int_suspend>; + }; +}; + &soc { #address-cells = <1>; @@ -135,19 +156,19 @@ }; wakegic: wake-gic { - compatible = "qcom,mpm-gic-msm8937", "qcom,mpm-gic"; + compatible = "qcom,mpm-gic-qcs405", "qcom,mpm-gic"; interrupts = ; - reg = <0x601d0 0x1000>, + reg = <0x601b8 0x1000>, <0xb011008 0x4>; /* MSM_APCS_GCC_BASE 4K */ reg-names = "vmpm", "ipc"; - qcom,num-mpm-irqs = <64>; + qcom,num-mpm-irqs = <96>; interrupt-controller; interrupt-parent = <&intc>; #interrupt-cells = <3>; }; wakegpio: wake-gpio { - compatible = "qcom,mpm-gpio-msm8937", "qcom,mpm-gpio"; + compatible = "qcom,mpm-gpio-qcs405", "qcom,mpm-gpio"; interrupt-controller; interrupt-parent = <&intc>; #interrupt-cells = <2>; @@ -221,6 +242,15 @@ }; }; + clocks { + xo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <19200000>; + clock-output-names = "xo_board"; + }; + }; + restart@4ab000 { compatible = "qcom,pshold"; reg = <0x4ab000 0x4>, @@ -278,10 +308,9 @@ vdd_dig_ao-supply = <&pms405_s1_level>; qcom,speed0-bin-v0 = < 0 0>, - < 960000000 1>, - < 1113600000 2>, - < 1267200000 3>, - < 1382400000 4>; + < 1113600000 1>, + < 1267200000 2>, + < 1401600000 3>; #clock-cells = <1>; }; @@ -319,17 +348,28 @@ status = "disabled"; }; - blsp1_uart2: serial@78b0000 { + blsp1_uart2_console: serial@78b1000 { compatible = "qcom,msm-uartdm", "qcom,msm-uartdm-v1.4"; - reg = <0x78b0000 0x200>; - interrupts = <0 108 0>; - + reg = <0x78b1000 0x200>; + interrupts = <0 118 0>; + clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>, + <&clock_gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&blsp_uart_tx_a2_active + &blsp_uart_rx_a2_active>; + pinctrl-1 = <&blsp_uart_tx_rx_a2_sleep>; + status = "okay"; }; dcc: dcc_v2@b2000 { compatible = "qcom,dcc-v2"; reg = <0x000b2000 0x1000>, <0x000bf800 0x800>; + + clocks = <&clock_gcc GCC_DCC_CLK>; + clock-names = "dcc_clk"; + reg-names = "dcc-base", "dcc-ram-base"; dcc-ram-offset = <0x400>; }; @@ -337,6 +377,7 @@ rpm_bus: qcom,rpm-smd { compatible = "qcom,rpm-smd"; rpm-channel-name = "rpm_requests"; + interrupts = ; rpm-channel-type = <15>; /* SMD_APPS_RPM */ }; @@ -373,10 +414,12 @@ qcom,chd { compatible = "qcom,core-hang-detect"; + label = "gold"; qcom,threshold-arr = <0xb088094 0xb098094 0xb0a8094 - 0xb0b8094 0xb188094 0xb198094 0xb1a8094 0xb1a8094>; + 0xb0b8094>; qcom,config-arr = <0xb08809c 0xb09809c 0xb0a809c - 0xb0b809c 0xb18809c 0xb19809c 0xb1a809c 0xb1b809c>; + 0xb0b809c>; + staus = "disabled"; }; qcom,msm-imem@8600000 { @@ -410,6 +453,11 @@ compatible = "qcom,msm-imem-pil"; reg = <0x94c 200>; }; + + diag_dload@c8 { + compatible = "qcom,msm-imem-diag-dload"; + reg = <0xc8 200>; + }; }; qcom,lpass@c000000 { @@ -425,6 +473,7 @@ qcom,proxy-clock-names = "xo"; qcom,pas-id = <1>; + qcom,mas-crypto = <&mas_crypto>; qcom,complete-ramdump; qcom,proxy-timeout-ms = <10000>; qcom,smem-id = <423>; @@ -463,6 +512,7 @@ qcom,proxy-clock-names = "xo"; qcom,pas-id = <18>; + qcom,mas-crypto = <&mas_crypto>; qcom,complete-ramdump; qcom,proxy-timeout-ms = <10000>; qcom,smem-id = <601>; @@ -501,8 +551,9 @@ qcom,proxy-clock-names = "xo"; qcom,pas-id = <6>; + qcom,mas-crypto = <&mas_crypto>; qcom,proxy-timeout-ms = <10000>; - qcom,smem-id = <422>; + qcom,smem-id = <421>; qcom,sysmon-id = <0>; qcom,ssctl-instance-id = <0x12>; qcom,firmware-name = "wcnss"; @@ -582,53 +633,47 @@ qcom,rpc-latency-us = <611>; qcom,fastrpc-adsp-audio-pdr; qcom,fastrpc-adsp-sensors-pdr; + qcom,fastrpc-legacy-remote-heap; qcom,msm_fastrpc_compute_cb1 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; iommus = <&apps_smmu 0x1001 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb2 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; iommus = <&apps_smmu 0x1002 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb3 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; iommus = <&apps_smmu 0x1003 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb4 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; iommus = <&apps_smmu 0x1004 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb5 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "cdsprpc-smd"; iommus = <&apps_smmu 0x1005 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb6 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&apps_smmu 0x804 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb7 { compatible = "qcom,msm-fastrpc-compute-cb"; label = "adsprpc-smd"; iommus = <&apps_smmu 0x805 0x0>; - dma-coherent; }; qcom,msm_fastrpc_compute_cb8 { @@ -636,7 +681,6 @@ label = "adsprpc-smd"; iommus = <&apps_smmu 0x806 0x0>; shared-cb = <5>; - dma-coherent; }; }; @@ -856,44 +900,6 @@ thermal_zones: thermal-zones {}; - usb0: hsusb@78c0000 { - compatible = "qcom,dwc-usb3-msm"; - reg = <0x78c0000 0x100000>; - reg-names = "core_base"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - - interrupts = <0 32 0>; - interrupt-names = "pwr_event_irq"; - /* Using dummy Xo clock, need to check the proper mapping */ - clocks = <&clock_gcc GCC_USB_HS_SYSTEM_CLK>, - <&clock_gcc GCC_PCNOC_USB2_CLK>, - <&clock_gcc GCC_USB30_SLEEP_CLK>, - <&clock_gcc GCC_USB_HS_INACTIVITY_TIMERS_CLK>, - <&clock_gcc GCC_USB20_MOCK_UTMI_CLK>; - clock-names = "core_clk", "iface_clk", "xo", - "sleep_clk", "utmi_clk"; - - qcom,core-clk-rate = <200000000>; - qcom,core-clk-rate-hs = <66666667>; - qcom,dwc-usb3-msm-tx-fifo-size = <27696>; - resets = <&clock_gcc GCC_USB_HS_BCR>; - reset-names = "core_reset"; - - dwc3@78c0000 { - compatible = "snps,dwc3"; - reg = <0x78c0000 0xcd00>; - interrupt-parent = <&intc>; - interrupts = <0 44 0>; - tx-fifo-resize; - linux,sysdev_is_parent; - snps,disable-clk-gating; - snps,has-lpm-erratum; - snps,hird-threshold = /bits/ 8 <0x10>; - }; - }; - sdhc_1: sdhci@7804000 { compatible = "qcom,sdhci-msm-v5"; reg = <0x7804000 0x1000>, <0x7805000 0x1000>; @@ -916,7 +922,21 @@ clock-names = "iface_clk", "core_clk"; qcom,nonremovable; - status = "disabled"; + + /* VDD external regulator is enabled/disabled by pms405_l6 */ + vdd-io-supply = <&pms405_l6>; + qcom,vdd-io-always-on; + qcom,vdd-io-lpm-sup; + qcom,vdd-io-voltage-level = <1704000 1800000>; + qcom,vdd-io-current-level = <0 325000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on + &sdc1_rclk_on>; + pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off + &sdc1_rclk_off>; + + status = "ok"; }; sdhc_2: sdhci@7844000 { @@ -931,17 +951,26 @@ qcom,large-address-bus; qcom,clk-rates = <400000 20000000 25000000 - 50000000 100000000 201500000>; + 50000000 100000000 200000000>; qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104"; - qcom,devfreq,freq-table = <50000000 201500000>; + qcom,devfreq,freq-table = <50000000 200000000>; clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>, <&clock_gcc GCC_SDCC2_APPS_CLK>; clock-names = "iface_clk", "core_clk"; - status = "disabled"; + /* VDD is an external regulator eLDO5 */ + vdd-io-supply = <&pms405_l11>; + qcom,vdd-io-voltage-level = <2696000 3304000>; + qcom,vdd-io-current-level = <0 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>; + + status = "ok"; }; qnand_1: nand@4c0000 { @@ -958,6 +987,59 @@ status = "disabled"; }; + + msm_cpufreq: qcom,msm-cpufreq { + compatible = "qcom,msm-cpufreq"; + clock-names = "cpu0_clk"; + clocks = <&clock_cpu APCS_MUX_CLK>; + + qcom,cpufreq-table = + < 1113600 >, + < 1267200 >, + < 1401600 >; + }; + + ddr_bw_opp_table: ddr-bw-opp-table { + compatible = "operating-points-v2"; + BW_OPP_ENTRY( 297, 4); /* 1132 MB/s */ + BW_OPP_ENTRY( 595, 4); /* 2269 MB/s */ + BW_OPP_ENTRY( 710, 4); /* 2708 MB/s */ + }; + + cpubw: qcom,cpubw { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = <1 512>; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + qcom,cpu-bwmon { + compatible = "qcom,bimc-bwmon2"; + reg = <0x408000 0x300>, <0x401000 0x200>; + reg-names = "base", "global_base"; + interrupts = <0 183 4>; + qcom,mport = <0>; + qcom,target-dev = <&cpubw>; + }; + + cpu0_cpu_ddr_latfloor: qcom,cpu0-cpu-ddr-latfloor { + compatible = "qcom,devbw"; + governor = "performance"; + qcom,src-dst-ports = <1 512>; + qcom,active-only; + operating-points-v2 = <&ddr_bw_opp_table>; + }; + + cpu0_computemon: qcom,cpu0-computemon { + compatible = "qcom,arm-cpu-mon"; + qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>; + qcom,target-dev = <&cpu0_cpu_ddr_latfloor>; + qcom,core-dev-table = + < 1113600 MHZ_TO_MBPS( 297, 4) >, + < 1267200 MHZ_TO_MBPS( 597, 4) >, + < 1401600 MHZ_TO_MBPS( 710, 4) >; + }; }; #include "qcs405-gdsc.dtsi" @@ -966,6 +1048,7 @@ #include "qcs405-regulator.dtsi" #include "qcs405-thermal.dtsi" #include "qcs405-bus.dtsi" +#include "qcs405-audio.dtsi" &gdsc_mdss { status = "ok"; @@ -976,10 +1059,11 @@ }; #include "qcs405-coresight.dtsi" +#include "qcs405-usb.dtsi" &i2c_5 { - status = "ok"; smb1351_otg_supply: smb1351-charger@55 { + status = "disabled"; compatible = "qcom,smb1351-charger"; reg = <0x55>; interrupt-parent = <&tlmm>; @@ -992,5 +1076,45 @@ pinctrl-names = "default"; pinctrl-0 = <&smb_stat>; qcom,switch-freq = <2>; + dpdm-supply = <&usb2_phy0>; }; }; + +&pms405_gpios { + usb3_vbus_boost { + usb3_vbus_boost_default: usb3_vbus_boost_default { + pins = "gpio3"; + function = "normal"; + output-low; + power-source = <1>; + }; + }; + + usb3_vbus_det { + usb3_vbus_det_default: usb3_vbus_det_default { + pins = "gpio12"; + function = "normal"; + input-enable; + bias-pull-down; + power-source = <1>; + }; + }; +}; + +&soc { + usb3_extcon: usb3_extcon { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&tlmm 116 GPIO_ACTIVE_HIGH>; + vbus-gpio = <&pms405_gpios 12 GPIO_ACTIVE_HIGH>; + vbus-out-gpio = <&pms405_gpios 3 GPIO_ACTIVE_HIGH>; + + pinctrl-names = "default"; + pinctrl-0 = <&usb3_vbus_det_default + &usb3_id_det_default + &usb3_vbus_boost_default>; + }; +}; + +&usb3 { + extcon = <&usb3_extcon>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star-overlay.dts b/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts similarity index 76% rename from arch/arm64/boot/dts/qcom/sm8150-auto-adp-star-overlay.dts rename to arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts index 889797e681af1073cec34733b5043a924c7ffe0a..611d29151e3777385286e9eb8188c4c176acfa2b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star-overlay.dts @@ -13,11 +13,11 @@ /dts-v1/; /plugin/; -#include "sm8150-auto-adp-star.dtsi" +#include "sa8155-adp-star.dtsi" / { - model = "Qualcomm Technologies, Inc. SM8150 AUTO-ADP-STAR"; - compatible = "qcom,sm8150-auto-adp-star", "qcom,sm8150", - "qcom,auto-adp-star"; + model = "ADP-STAR"; + compatible = "qcom,sa8155-adp-star", "qcom,sa8155", + "qcom,adp-star"; qcom,board-id = <25 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dts b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dts similarity index 73% rename from arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dts rename to arch/arm64/boot/dts/qcom/sa8155-adp-star.dts index 250b080ae57ce4b3bd84b8cea16e00e819da23ab..d4322630a3a01814adb2c017c18694e8dea69480 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dts +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dts @@ -11,12 +11,12 @@ */ /dts-v1/; -#include "sm8150-auto.dtsi" -#include "sm8150-auto-adp-star.dtsi" +#include "sa8155.dtsi" +#include "sa8155-adp-star.dtsi" / { - model = "Qualcomm Technologies, Inc. SM8150 AUTO-ADP-STAR"; - compatible = "qcom,sm8150-auto-adp-star", "qcom,sm8150", - "qcom,auto-adp-star"; + model = "Qualcomm Technologies, Inc. SA8155 ADP-STAR"; + compatible = "qcom,sa8155-adp-star", "qcom,sa8155", + "qcom,adp-star"; qcom,board-id = <25 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi similarity index 89% rename from arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi rename to arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi index 155b29fd483ff4dc9ca341f0f44926ac0cd494af..07744a9b4d31591ada42c4577154061c53cdc674 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto-adp-star.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star.dtsi @@ -13,7 +13,22 @@ #include #include -#include "sm8150-auto-pmic-overlay.dtsi" +#include "sa8155-pmic-overlay.dtsi" + +&qupv3_se0_spi { + status = "ok"; + can-controller@0 { + compatible = "qcom,nxp,mpc5746c"; + reg = <0>; + interrupt-parent = <&tlmm>; + interrupts = <38 0>; + spi-max-frequency = <5000000>; + qcom,clk-freq-mhz = <16000000>; + qcom,max-can-channels = <4>; + qcom,bits-per-word = <8>; + qcom,support-can-fd; + }; +}; &qupv3_se12_2uart { status = "ok"; @@ -138,6 +153,7 @@ qcom,vddp-ref-clk-supply = <&pm8150_2_l5>; qcom,vddp-ref-clk-max-microamp = <100>; + qcom,disable-lpm; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi similarity index 100% rename from arch/arm64/boot/dts/qcom/sm8150-auto-pmic-overlay.dtsi rename to arch/arm64/boot/dts/qcom/sa8155-pmic-overlay.dtsi diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto-regulator.dtsi b/arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi similarity index 100% rename from arch/arm64/boot/dts/qcom/sm8150-auto-regulator.dtsi rename to arch/arm64/boot/dts/qcom/sa8155-regulator.dtsi diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto.dts b/arch/arm64/boot/dts/qcom/sa8155.dts similarity index 84% rename from arch/arm64/boot/dts/qcom/sm8150-auto.dts rename to arch/arm64/boot/dts/qcom/sa8155.dts index e948626cf6d1cb3ceb685f6f79101ff4a149c66d..0fbb4769485b20eae446bb1afaa7993f97197f0f 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-auto.dts +++ b/arch/arm64/boot/dts/qcom/sa8155.dts @@ -12,11 +12,11 @@ /dts-v1/; -#include "sm8150-auto.dtsi" +#include "sa8155.dtsi" / { - model = "Qualcomm Technologies, Inc. SM8150 AUTO SoC"; - compatible = "qcom,sm8150"; + model = "Qualcomm Technologies, Inc. SA8155 SoC"; + compatible = "qcom,sa8155"; qcom,pmic-name = "PM8150"; qcom,board-id = <0 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155.dtsi b/arch/arm64/boot/dts/qcom/sa8155.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..bc0f3ac595c17df8ff617a864d088c98526c0663 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155.dtsi @@ -0,0 +1,690 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm8150.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SA8155"; + compatible = "qcom,sa8155"; + qcom,msm-name = "SA8155"; + qcom,msm-id = <362 0x10000>; +}; + +/* Remove regulator nodes specific to SA8155 */ +&soc { + /delete-node/ regulator-pm8150-s4; + /delete-node/ rpmh-regulator-msslvl; + /delete-node/ rpmh-regulator-smpa2; + /delete-node/ rpmh-regulator-ebilvl; + /delete-node/ rpmh-regulator-smpa5; + /delete-node/ rpmh-regulator-smpa6; + /delete-node/ rpmh-regulator-ldoa1; + /delete-node/ rpmh-regulator-ldoa2; + /delete-node/ rpmh-regulator-ldoa3; + /delete-node/ rpmh-regulator-lmxlvl; + /delete-node/ rpmh-regulator-ldoa5; + /delete-node/ rpmh-regulator-ldoa6; + /delete-node/ rpmh-regulator-ldoa7; + /delete-node/ rpmh-regulator-lcxlvl; + /delete-node/ rpmh-regulator-ldoa9; + /delete-node/ rpmh-regulator-ldoa10; + /delete-node/ rpmh-regulator-ldoa11; + /delete-node/ rpmh-regulator-ldoa12; + /delete-node/ rpmh-regulator-ldoa13; + /delete-node/ rpmh-regulator-ldoa14; + /delete-node/ rpmh-regulator-ldoa15; + /delete-node/ rpmh-regulator-ldoa16; + /delete-node/ rpmh-regulator-ldoa17; + /delete-node/ rpmh-regulator-smpc1; + /delete-node/ rpmh-regulator-gfxlvl; + /delete-node/ rpmh-regulator-mxlvl; + /delete-node/ rpmh-regulator-mmcxlvl; + /delete-node/ rpmh-regulator-cxlvl; + /delete-node/ rpmh-regulator-smpc8; + /delete-node/ rpmh-regulator-ldoc1; + /delete-node/ rpmh-regulator-ldoc2; + /delete-node/ rpmh-regulator-ldoc3; + /delete-node/ rpmh-regulator-ldoc4; + /delete-node/ rpmh-regulator-ldoc5; + /delete-node/ rpmh-regulator-ldoc6; + /delete-node/ rpmh-regulator-ldoc7; + /delete-node/ rpmh-regulator-ldoc8; + /delete-node/ rpmh-regulator-ldoc9; + /delete-node/ rpmh-regulator-ldoc10; + /delete-node/ rpmh-regulator-ldoc11; + /delete-node/ rpmh-regulator-bobc1; + /delete-node/ rpmh-regulator-smpf2; + /delete-node/ rpmh-regulator-ldof2; + /delete-node/ rpmh-regulator-ldof5; + /delete-node/ rpmh-regulator-ldof6; +}; + +/* Add regulator nodes specific to SA8155 */ +#include "sa8155-regulator.dtsi" + +&cam_csiphy0 { + mipi-csi-vdd-supply = <&pm8150_2_l8>; +}; + +&cam_csiphy1 { + mipi-csi-vdd-supply = <&pm8150_2_l8>; +}; + +&cam_csiphy2 { + mipi-csi-vdd-supply = <&pm8150_2_l8>; +}; + +&cam_csiphy3 { + mipi-csi-vdd-supply = <&pm8150_2_l8>; +}; + +&pcie0 { + vreg-1.8-supply = <&pm8150_2_l8>; + vreg-0.9-supply = <&pm8150_2_l18>; +}; + +&pcie1 { + vreg-1.8-supply = <&pm8150_2_l8>; + vreg-0.9-supply = <&pm8150_2_l18>; +}; + +&mdss_dsi_phy0 { + vdda-0p9-supply = <&pm8150_2_l18>; +}; + +&mdss_dsi_phy1 { + vdda-0p9-supply = <&pm8150_2_l18>; +}; + +&mdss_dsi0 { + vdda-1p2-supply = <&pm8150_2_l8>; +}; + +&mdss_dsi1 { + vdda-1p2-supply = <&pm8150_2_l8>; +}; + +&sde_dp { + vdda-1p2-supply = <&pm8150_2_l8>; + vdda-0p9-supply = <&pm8150_2_l18>; +}; + +&lmh_dcvs1 { + isens_vref_0p8-supply = <&pm8150_1_l5_ao>; + isens_vref_1p8-supply = <&pm8150_1_l12_ao>; +}; + +&usb2_phy0 { + vdd-supply = <&pm8150_1_l5>; + vdda18-supply = <&pm8150_1_l12>; + vdda33-supply = <&pm8150_1_l2>; +}; + +&usb_qmp_dp_phy { + vdd-supply = <&pm8150_1_l5>; + core-supply = <&pm8150_2_l8>; +}; + +&usb2_phy1 { + vdd-supply = <&pm8150_1_l5>; + vdda18-supply = <&pm8150_1_l12>; + vdda33-supply = <&pm8150_1_l2>; + status = "ok"; +}; + +&usb_qmp_phy { + vdd-supply = <&pm8150_1_l5>; + core-supply = <&pm8150_2_l8>; + status = "ok"; +}; + +&icnss { + vdd-cx-mx-supply = <&pm8150_1_l1>; + vdd-1.8-xo-supply = <&pm8150_1_l7>; + vdd-1.3-rfa-supply = <&pm8150_2_l1>; + /delete-property/ vdd-3.3-ch0-supply; +}; + +&pil_ssc { + vdd_cx-supply = <&VDD_CX_LEVEL>; + vdd_mx-supply = <&VDD_MX_LEVEL>; +}; + +&pil_modem { + vdd_mss-supply = <&pm8150_1_s8_level>; +}; + +&wil6210 { + /delete-property/ vddio-supply; +}; + +&gpu_gx_gdsc { + parent-supply = <&pm8150_2_s3_level>; + vdd_parent-supply = <&pm8150_2_s3_level>; +}; + +&thermal_zones { + aoss0-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-0-0-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-0-1-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-0-2-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-0-3-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpuss-0-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpuss-1-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-0-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-1-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-2-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-3-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-4-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-5-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-6-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cpu-1-7-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + gpuss-0-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + aoss-1-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cwlan-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + video-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + ddr-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + q6-hvx-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + camera-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + cmpss-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + mdm-core-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + npu-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + mdm-vec-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + mdm-scl-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; + gpuss-1-lowf { + cooling-maps { + /delete-node/ mmcx_vdd_cdev; + }; + }; +}; + +&tlmm { + ioexp_intr_active: ioexp_intr_active { + mux { + pins = "gpio48"; + function = "gpio"; + }; + config { + pins = "gpio48"; + drive-strength = <2>; + bias-pull-up; + }; + }; + + ioexp_reset_active: ioexp_reset_active { + mux { + pins = "gpio30"; + function = "gpio"; + }; + config { + pins = "gpio30"; + drive-strength = <2>; + bias-disable; + output-high; + }; + }; +}; + +&sde_dp { + qcom,ext-disp = <&ext_disp>; + qcom,dp-hpd-gpio = <&ioexp 8 0>; + + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&dp_hpd_cfg_pins>; + pinctrl-1 = <&dp_hpd_cfg_pins>; + + qcom,core-supply-entries { + #address-cells = <1>; + #size-cells = <0>; + qcom,core-supply-entry@0 { + reg = <0>; + qcom,supply-name = "refgen"; + qcom,supply-min-voltage = <0>; + qcom,supply-max-voltage = <0>; + qcom,supply-enable-load = <0>; + qcom,supply-disable-load = <0>; + }; + }; +}; + +&qupv3_se15_i2c { + status = "ok"; + + pinctrl-0 = <&qupv3_se15_i2c_active + &ioexp_intr_active + &ioexp_reset_active>; + + ioexp: gpio@3e { + #gpio-cells = <2>; + #interrupt-cells = <2>; + compatible = "semtech,sx1509q"; + reg = <0x3e>; + interrupt-parent = <&tlmm>; + interrupts = <48 0>; + gpio-controller; + interrupt-controller; + semtech,probe-reset; + + pinctrl-names = "default"; + pinctrl-0 = <&dsi1_hpd_cfg_pins + &dsi1_cdet_cfg_pins + &dsi2_hpd_cfg_pins + &dsi2_cdet_cfg_pins>; + + dsi1_hpd_cfg_pins: gpio0-cfg { + pins = "gpio0"; + bias-pull-up; + }; + + dsi1_cdet_cfg_pins: gpio1-cfg { + pins = "gpio1"; + bias-pull-down; + }; + + dsi2_hpd_cfg_pins: gpio2-cfg { + pins = "gpio2"; + bias-pull-up; + }; + + dsi2_cdet_cfg_pins: gpio3-cfg { + pins = "gpio3"; + bias-pull-down; + }; + + dp_hpd_cfg_pins: gpio8-cfg { + pins = "gpio8"; + bias-pull-down; + }; + }; + + i2c-mux@77 { + compatible = "nxp,pca9542"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + anx_7625_1: anx7625@2c { + compatible = "analogix,anx7625"; + reg = <0x2c>; + interrupt-parent = <&ioexp>; + interrupts = <0 0>; + cbl_det-gpio = <&ioexp 1 0>; + power_en-gpio = <&tlmm 47 0>; + reset_n-gpio = <&tlmm 49 0>; + }; + }; + + i2c@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + anx_7625_2: anx7625@2c { + compatible = "analogix,anx7625"; + reg = <0x2c>; + interrupt-parent = <&ioexp>; + interrupts = <2 0>; + cbl_det-gpio = <&ioexp 3 0>; + power_en-gpio = <&tlmm 87 0>; + reset_n-gpio = <&tlmm 29 0>; + }; + }; + }; +}; + +&anx_7625_1 { + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + anx_7625_1_in: endpoint { + remote-endpoint = <&dsi_anx_7625_1_out>; + }; + }; + }; +}; + +&anx_7625_2 { + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + anx_7625_2_in: endpoint { + remote-endpoint = <&dsi_anx_7625_2_out>; + }; + }; + }; +}; + +#include "dsi-panel-ext-bridge-1080p.dtsi" + +&soc { + dsi_anx_7625_1: qcom,dsi-display@17 { + label = "dsi_anx_7625_1"; + qcom,dsi-display-active; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_ext_bridge_1080p>; + }; + + dsi_anx_7625_2: qcom,dsi-display@18 { + label = "dsi_anx_7625_2"; + qcom,dsi-display-active; + qcom,display-type = "secondary"; + + qcom,dsi-ctrl-num = <1>; + qcom,dsi-phy-num = <1>; + qcom,dsi-select-clocks = "src_byte_clk1", "src_pixel_clk1"; + + qcom,dsi-panel = <&dsi_ext_bridge_1080p>; + }; + + dsi_dp1: qcom,dsi-display@1 { + compatible = "qcom,dsi-display"; + label = "primary"; + + qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; + qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + + clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>, + <&mdss_dsi0_pll PCLK_MUX_0_CLK>, + <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>, + <&mdss_dsi1_pll PCLK_MUX_1_CLK>; + clock-names = "src_byte_clk0", "src_pixel_clk0", + "src_byte_clk1", "src_pixel_clk1"; + + qcom,dsi-display-list = + <&dsi_anx_7625_1>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_anx_7625_1_out: endpoint { + remote-endpoint = <&anx_7625_1_in>; + }; + }; + }; + }; + + dsi_dp2: qcom,dsi-display@2 { + compatible = "qcom,dsi-display"; + label = "secondary"; + + qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; + qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + + clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>, + <&mdss_dsi0_pll PCLK_MUX_0_CLK>, + <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>, + <&mdss_dsi1_pll PCLK_MUX_1_CLK>; + clock-names = "src_byte_clk0", "src_pixel_clk0", + "src_byte_clk1", "src_pixel_clk1"; + + qcom,dsi-display-list = + <&dsi_anx_7625_2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi_anx_7625_2_out: endpoint { + remote-endpoint = <&anx_7625_2_in>; + }; + }; + }; + }; + + refgen: refgen-regulator@88e7000 { + compatible = "qcom,refgen-regulator"; + reg = <0x88e7000 0x60>; + regulator-name = "refgen"; + regulator-enable-ramp-delay = <5>; + }; + + sde_wb: qcom,wb-display@0 { + compatible = "qcom,wb-display"; + cell-index = <0>; + label = "wb_display"; + }; + + ext_disp: qcom,msm-ext-disp { + compatible = "qcom,msm-ext-disp"; + + ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx { + compatible = "qcom,msm-ext-disp-audio-codec-rx"; + }; + }; +}; + +&mdss_dsi_phy0 { + qcom,panel-force-clock-lane-hs; +}; + +&mdss_dsi_phy1 { + qcom,panel-force-clock-lane-hs; +}; + +&mdss_mdp { + connectors = <&dsi_dp1 &dsi_dp2 &sde_dp &sde_wb>; +}; + +#include + +&soc { + emac_hw: qcom,emac@00020000 { + compatible = "qcom,emac-dwc-eqos"; + qcom,arm-smmu; + emac-core-version = <2>; + reg = <0x20000 0x10000>, + <0x36000 0x100>, + <0x3D00000 0x300000>; + reg-names = "emac-base", "rgmii-base", "tlmm-central-base"; + interrupts-extended = <&pdc 0 689 4>, <&pdc 0 699 4>, + <&tlmm 124 2>, <&pdc 0 691 4>, + <&pdc 0 692 4>, <&pdc 0 693 4>, + <&pdc 0 694 4>, <&pdc 0 695 4>, + <&pdc 0 696 4>, <&pdc 0 697 4>, + <&pdc 0 698 4>, <&pdc 0 699 4>; + interrupt-names = "sbd-intr", "lpi-intr", + "phy-intr", "tx-ch0-intr", + "tx-ch1-intr", "tx-ch2-intr", + "tx-ch3-intr", "tx-ch4-intr", + "rx-ch0-intr", "rx-ch1-intr", + "rx-ch2-intr", "rx-ch3-intr"; + qcom,msm-bus,name = "emac"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <98 512 0 0>, <1 781 0 0>, /* No vote */ + <98 512 1250 0>, <1 781 0 40000>, /* 10Mbps vote */ + <98 512 12500 0>, <1 781 0 40000>, /* 100Mbps vote */ + <98 512 125000 0>, <1 781 0 40000>; /* 1000Mbps vote */ + qcom,bus-vector-names = "10", "100", "1000"; + clocks = <&clock_gcc GCC_EMAC_AXI_CLK>, + <&clock_gcc GCC_EMAC_PTP_CLK>, + <&clock_gcc GCC_EMAC_RGMII_CLK>, + <&clock_gcc GCC_EMAC_SLV_AHB_CLK>; + clock-names = "emac_axi_clk", "emac_ptp_clk", + "emac_rgmii_clk", "emac_slv_ahb_clk"; + qcom,phy-reset = <&tlmm 79 GPIO_ACTIVE_HIGH>; + qcom,phy-intr-redirect = <&tlmm 124 GPIO_ACTIVE_LOW>; + gdsc_emac-supply = <&emac_gdsc>; + pinctrl-names = "dev-emac-mdc", + "dev-emac-mdio", + "dev-emac-rgmii_txd0_state", + "dev-emac-rgmii_txd1_state", + "dev-emac-rgmii_txd2_state", + "dev-emac-rgmii_txd3_state", + "dev-emac-rgmii_txc_state", + "dev-emac-rgmii_tx_ctl_state", + "dev-emac-rgmii_rxd0_state", + "dev-emac-rgmii_rxd1_state", + "dev-emac-rgmii_rxd2_state", + "dev-emac-rgmii_rxd3_state", + "dev-emac-rgmii_rxc_state", + "dev-emac-rgmii_rx_ctl_state"; + + pinctrl-0 = <&emac_mdc>; + pinctrl-1 = <&emac_mdio>; + + pinctrl-2 = <&emac_rgmii_txd0>; + pinctrl-3 = <&emac_rgmii_txd1>; + pinctrl-4 = <&emac_rgmii_txd2>; + pinctrl-5 = <&emac_rgmii_txd3>; + pinctrl-6 = <&emac_rgmii_txc>; + pinctrl-7 = <&emac_rgmii_tx_ctl>; + + pinctrl-8 = <&emac_rgmii_rxd0>; + pinctrl-9 = <&emac_rgmii_rxd1>; + pinctrl-10 = <&emac_rgmii_rxd2>; + pinctrl-11 = <&emac_rgmii_rxd3>; + pinctrl-12 = <&emac_rgmii_rxc>; + pinctrl-13 = <&emac_rgmii_rx_ctl>; + + io-macro-info { + io-macro-bypass-mode = <0>; + io-interface = "rgmii"; + }; + emac_emb_smmu: emac_emb_smmu { + compatible = "qcom,emac-smmu-embedded"; + qcom,smmu-s1-bypass; + iommus = <&apps_smmu 0x3C0 0x0>; + qcom,iova-mapping = <0x80000000 0x40000000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-mtp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts similarity index 74% rename from arch/arm64/boot/dts/qcom/sm6150-mtp.dts rename to arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts index a45cad0604033d7d391bec2205d02d52223170d5..298df1df8ad102bb57f18e4930c137032501b8ba 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp-star-overlay.dts @@ -11,12 +11,13 @@ */ /dts-v1/; +/plugin/; -#include "sm6150.dtsi" -#include "sm6150-mtp.dtsi" +#include "sa8155-adp-star.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 MTP"; - compatible = "qcom,sm6150-mtp", "qcom,sm6150", "qcom,mtp"; - qcom,board-id = <8 0>; + model = "ADP-STAR"; + compatible = "qcom,sa8155p-adp-star", "qcom,sa8155p", + "qcom,adp-star"; + qcom,board-id = <25 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp-star.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp-star.dts new file mode 100644 index 0000000000000000000000000000000000000000..8a7aaea2e24cecae16fde5e8b72078642ad6a37b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp-star.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +#include "sa8155p.dtsi" +#include "sa8155-adp-star.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SA8155P ADP-STAR"; + compatible = "qcom,sa8155p-adp-star", "qcom,sa8155p", + "qcom,adp-star"; + qcom,board-id = <25 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-cdp.dts b/arch/arm64/boot/dts/qcom/sa8155p.dts similarity index 74% rename from arch/arm64/boot/dts/qcom/sm6150-cdp.dts rename to arch/arm64/boot/dts/qcom/sa8155p.dts index 14c73223a7d9ba2d73f535652a5a25262a4ace51..fc7a5e9f0daaa0f0105debebc76659123391e841 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cdp.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p.dts @@ -12,11 +12,11 @@ /dts-v1/; -#include "sm6150.dtsi" -#include "sm6150-cdp.dtsi" +#include "sa8155p.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 CDP"; - compatible = "qcom,sm6150-cdp", "qcom,sm6150", "qcom,cdp"; - qcom,board-id = <1 0>; + model = "Qualcomm Technologies, Inc. SA8155P SoC"; + compatible = "qcom,sa8155p"; + qcom,pmic-name = "PM8150"; + qcom,board-id = <0 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8155p.dtsi b/arch/arm64/boot/dts/qcom/sa8155p.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..9b49c4fbd11b78d7a4cd01837b4d787d32d508f6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sa8155p.dtsi @@ -0,0 +1,20 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sa8155.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SA8155P "; + qcom,msm-name = "SA8155P"; + compatible = "qcom,sa8155p"; + qcom,msm-id = <367 0x10000>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-ion.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-ion.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..a8bfab8603ec6535fd3177cccda2bff14f5736f6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-ion.dtsi @@ -0,0 +1,59 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,ion { + compatible = "qcom,msm-ion"; + #address-cells = <1>; + #size-cells = <0>; + + system_heap: qcom,ion-heap@25 { + reg = <25>; + qcom,ion-heap-type = "SYSTEM"; + }; + + qcom,ion-heap@22 { /* ADSP HEAP */ + reg = <22>; + memory-region = <&adsp_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@27 { /* QSEECOM HEAP */ + reg = <27>; + memory-region = <&qseecom_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@19 { /* QSEECOM TA HEAP */ + reg = <19>; + memory-region = <&qseecom_ta_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@13 { /* SPSS HEAP */ + reg = <13>; + memory-region = <&sp_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */ + reg = <10>; + memory-region = <&secure_display_memory>; + qcom,ion-heap-type = "HYP_CMA"; + }; + + qcom,ion-heap@9 { + reg = <9>; + qcom,ion-heap-type = "SYSTEM_SECURE"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi index d466f4d3ab7ee44cffe24c214b4777b3540d0d6a..dcbb1cf0e0fc3599e6f801db25926254ec69a18b 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pinctrl.dtsi @@ -19,5 +19,806 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + + /* QUPv3 South SE mappings */ + /* SE 0 pin mappings */ + qupv3_se0_i2c_pins: qupv3_se0_i2c_pins { + qupv3_se0_i2c_active: qupv3_se0_i2c_active { + mux { + pins = "gpio49", "gpio50"; + function = "qup00"; + }; + + config { + pins = "gpio49", "gpio50"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se0_i2c_sleep: qupv3_se0_i2c_sleep { + mux { + pins = "gpio49", "gpio50"; + function = "gpio"; + }; + + config { + pins = "gpio49", "gpio50"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se0_spi_pins: qupv3_se0_spi_pins { + qupv3_se0_spi_active: qupv3_se0_spi_active { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "qup00"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se0_spi_sleep: qupv3_se0_spi_sleep { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "gpio"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 1 pin mappings */ + qupv3_se1_i2c_pins: qupv3_se1_i2c_pins { + qupv3_se1_i2c_active: qupv3_se1_i2c_active { + mux { + pins = "gpio0", "gpio1"; + function = "qup01"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se1_i2c_sleep: qupv3_se1_i2c_sleep { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se1_spi_pins: qupv3_se1_spi_pins { + qupv3_se1_spi_active: qupv3_se1_spi_active { + mux { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + function = "qup01"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se1_spi_sleep: qupv3_se1_spi_sleep { + mux { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 2 pin mappings */ + qupv3_se2_i2c_pins: qupv3_se2_i2c_pins { + qupv3_se2_i2c_active: qupv3_se2_i2c_active { + mux { + pins = "gpio34", "gpio35"; + function = "qup02"; + }; + + config { + pins = "gpio34", "gpio35"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se2_i2c_sleep: qupv3_se2_i2c_sleep { + mux { + pins = "gpio34", "gpio35"; + function = "gpio"; + }; + + config { + pins = "gpio34", "gpio35"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + /* SE 3 pin mappings */ + qupv3_se3_i2c_pins: qupv3_se3_i2c_pins { + qupv3_se3_i2c_active: qupv3_se3_i2c_active { + mux { + pins = "gpio38", "gpio39"; + function = "qup03"; + }; + + config { + pins = "gpio38", "gpio39"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se3_i2c_sleep: qupv3_se3_i2c_sleep { + mux { + pins = "gpio38", "gpio39"; + function = "gpio"; + }; + + config { + pins = "gpio38", "gpio39"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se3_4uart_pins: qupv3_se3_4uart_pins { + qupv3_se3_ctsrx: qupv3_se3_ctsrx { + mux { + pins = "gpio38", "gpio41"; + function = "qup03"; + }; + + config { + pins = "gpio38", "gpio41"; + drive-strength = <2>; + bias-no-pull; + }; + }; + + qupv3_se3_rts: qupv3_se3_rts { + mux { + pins = "gpio39"; + function = "qup03"; + }; + + config { + pins = "gpio39"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se3_tx: qupv3_se3_tx { + mux { + pins = "gpio40"; + function = "qup03"; + }; + + config { + pins = "gpio40"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se3_spi_pins: qupv3_se3_spi_pins { + qupv3_se3_spi_active: qupv3_se3_spi_active { + mux { + pins = "gpio38", "gpio39", "gpio40", + "gpio41"; + function = "qup03"; + }; + + config { + pins = "gpio38", "gpio39", "gpio40", + "gpio41"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se3_spi_sleep: qupv3_se3_spi_sleep { + mux { + pins = "gpio38", "gpio39", "gpio40", + "gpio41"; + function = "gpio"; + }; + + config { + pins = "gpio38", "gpio39", "gpio40", + "gpio41"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 4 pin mappings */ + qupv3_se4_i2c_pins: qupv3_se4_i2c_pins { + qupv3_se4_i2c_active: qupv3_se4_i2c_active { + mux { + pins = "gpio53", "gpio54"; + function = "qup04"; + }; + + config { + pins = "gpio53", "gpio54"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se4_i2c_sleep: qupv3_se4_i2c_sleep { + mux { + pins = "gpio53", "gpio54"; + function = "gpio"; + }; + + config { + pins = "gpio53", "gpio54"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se4_4uart_pins: qupv3_se4_4uart_pins { + qupv3_se4_ctsrx: qupv3_se4_ctsrx { + mux { + pins = "gpio53", "gpio56"; + function = "qup04"; + }; + + config { + pins = "gpio53", "gpio56"; + drive-strength = <2>; + bias-no-pull; + }; + }; + + qupv3_se4_rts: qupv3_se4_rts { + mux { + pins = "gpio54"; + function = "qup04"; + }; + + config { + pins = "gpio54"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se4_tx: qupv3_se4_tx { + mux { + pins = "gpio55"; + function = "qup04"; + }; + + config { + pins = "gpio55"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se4_spi_pins: qupv3_se4_spi_pins { + qupv3_se4_spi_active: qupv3_se4_spi_active { + mux { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + function = "qup04"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se4_spi_sleep: qupv3_se4_spi_sleep { + mux { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + function = "gpio"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* QUPv3 North instances */ + /* SE 6 pin mappings */ + qupv3_se6_i2c_pins: qupv3_se6_i2c_pins { + qupv3_se6_i2c_active: qupv3_se6_i2c_active { + mux { + pins = "gpio59", "gpio60"; + function = "qup10"; + }; + + config { + pins = "gpio59", "gpio60"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se6_i2c_sleep: qupv3_se6_i2c_sleep { + mux { + pins = "gpio59", "gpio60"; + function = "gpio"; + }; + + config { + pins = "gpio59", "gpio60"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se6_spi_pins: qupv3_se6_spi_pins { + qupv3_se6_spi_active: qupv3_se6_spi_active { + mux { + pins = "gpio59", "gpio60", "gpio61", + "gpio62"; + function = "qup10"; + }; + + config { + pins = "gpio59", "gpio60", "gpio61", + "gpio62"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se6_spi_sleep: qupv3_se6_spi_sleep { + mux { + pins = "gpio59", "gpio60", "gpio61", + "gpio62"; + function = "gpio"; + }; + + config { + pins = "gpio59", "gpio60", "gpio61", + "gpio62"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 7 pin mappings */ + qupv3_se7_i2c_pins: qupv3_se7_i2c_pins { + qupv3_se7_i2c_active: qupv3_se7_i2c_active { + mux { + pins = "gpio6", "gpio7"; + function = "qup11"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se7_i2c_sleep: qupv3_se7_i2c_sleep { + mux { + pins = "gpio6", "gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se7_spi_pins: qupv3_se7_spi_pins { + qupv3_se7_spi_active: qupv3_se7_spi_active { + mux { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + function = "qup11"; + }; + + config { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se7_spi_sleep: qupv3_se7_spi_sleep { + mux { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7", "gpio8", + "gpio9"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 8 pin mappings */ + qupv3_se8_i2c_pins: qupv3_se8_i2c_pins { + qupv3_se8_i2c_active: qupv3_se8_i2c_active { + mux { + pins = "gpio42", "gpio43"; + function = "qup12"; + }; + + config { + pins = "gpio42", "gpio43"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se8_i2c_sleep: qupv3_se8_i2c_sleep { + mux { + pins = "gpio42", "gpio43"; + function = "gpio"; + }; + + config { + pins = "gpio42", "gpio43"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se8_2uart_pins: qupv3_se8_2uart_pins { + qupv3_se8_2uart_active: qupv3_se8_2uart_active { + mux { + pins = "gpio44", "gpio45"; + function = "qup12"; + }; + + config { + pins = "gpio44", "gpio45"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se8_2uart_sleep: qupv3_se8_2uart_sleep { + mux { + pins = "gpio44", "gpio45"; + function = "gpio"; + }; + + config { + pins = "gpio44", "gpio45"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + qupv3_se8_spi_pins: qupv3_se8_spi_pins { + qupv3_se8_spi_active: qupv3_se8_spi_active { + mux { + pins = "gpio42", "gpio43", "gpio44", + "gpio45"; + function = "qup12"; + }; + + config { + pins = "gpio42", "gpio43", "gpio44", + "gpio45"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se8_spi_sleep: qupv3_se8_spi_sleep { + mux { + pins = "gpio42", "gpio43", "gpio44", + "gpio45"; + function = "gpio"; + }; + + config { + pins = "gpio42", "gpio43", "gpio44", + "gpio45"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 9 pin mappings */ + qupv3_se9_i2c_pins: qupv3_se9_i2c_pins { + qupv3_se9_i2c_active: qupv3_se9_i2c_active { + mux { + pins = "gpio46", "gpio47"; + function = "qup13"; + }; + + config { + pins = "gpio46", "gpio47"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se9_i2c_sleep: qupv3_se9_i2c_sleep { + mux { + pins = "gpio46", "gpio47"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + /* SE 10 pin mappings */ + qupv3_se10_i2c_pins: qupv3_se10_i2c_pins { + qupv3_se10_i2c_active: qupv3_se10_i2c_active { + mux { + pins = "gpio110", "gpio111"; + function = "qup14"; + }; + + config { + pins = "gpio110", "gpio111"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se10_i2c_sleep: qupv3_se10_i2c_sleep { + mux { + pins = "gpio110", "gpio111"; + function = "gpio"; + }; + + config { + pins = "gpio110", "gpio111"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se10_4uart_pins: qupv3_se10_4uart_pins { + qupv3_se10_ctsrx: qupv3_se10_ctsrx { + mux { + pins = "gpio110", "gpio113"; + function = "qup14"; + }; + + config { + pins = "gpio110", "gpio113"; + drive-strength = <2>; + bias-no-pull; + }; + }; + + qupv3_se10_rts: qupv3_se10_rts { + mux { + pins = "gpio111"; + function = "qup14"; + }; + + config { + pins = "gpio111"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se10_tx: qupv3_se10_tx { + mux { + pins = "gpio112"; + function = "qup14"; + }; + + config { + pins = "gpio112"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se10_spi_pins: qupv3_se10_spi_pins { + qupv3_se10_spi_active: qupv3_se10_spi_active { + mux { + pins = "gpio110", "gpio111", "gpio112", + "gpio113"; + function = "qup14"; + }; + + config { + pins = "gpio110", "gpio111", "gpio112", + "gpio113"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se10_spi_sleep: qupv3_se10_spi_sleep { + mux { + pins = "gpio110", "gpio111", "gpio112", + "gpio113"; + function = "gpio"; + }; + + config { + pins = "gpio110", "gpio111", "gpio112", + "gpio113"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 11 pin mappings */ + qupv3_se11_i2c_pins: qupv3_se11_i2c_pins { + qupv3_se11_i2c_active: qupv3_se11_i2c_active { + mux { + pins = "gpio101", "gpio102"; + function = "qup15"; + }; + + config { + pins = "gpio101", "gpio102"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se11_i2c_sleep: qupv3_se11_i2c_sleep { + mux { + pins = "gpio101", "gpio102"; + function = "gpio"; + }; + + config { + pins = "gpio101", "gpio102"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se11_4uart_pins: qupv3_se11_4uart_pins { + qupv3_se11_ctsrx: qupv3_se11_ctsrx { + mux { + pins = "gpio101", "gpio92"; + function = "qup15"; + }; + + config { + pins = "gpio101", "gpio92"; + drive-strength = <2>; + bias-no-pull; + }; + }; + + qupv3_se11_rts: qupv3_se11_rts { + mux { + pins = "gpio102"; + function = "qup15"; + }; + + config { + pins = "gpio102"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + qupv3_se11_tx: qupv3_se11_tx { + mux { + pins = "gpio103"; + function = "qup15"; + }; + + config { + pins = "gpio103"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se11_spi_pins: qupv3_se11_spi_pins { + qupv3_se11_spi_active: qupv3_se11_spi_active { + mux { + pins = "gpio101", "gpio102", "gpio103", + "gpio92"; + function = "qup15"; + }; + + config { + pins = "gpio101", "gpio102", "gpio103", + "gpio92"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se11_spi_sleep: qupv3_se11_spi_sleep { + mux { + pins = "gpio101", "gpio102", "gpio103", + "gpio92"; + function = "gpio"; + }; + + config { + pins = "gpio101", "gpio102", "gpio103", + "gpio92"; + drive-strength = <6>; + bias-disable; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..dda1f4ec0f0e992fe7e77a19d6674e9eedac130d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-pm.dtsi @@ -0,0 +1,173 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,lpm-levels { + compatible = "qcom,lpm-levels"; + #address-cells = <1>; + #size-cells = <0>; + + qcom,pm-cluster@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + label = "L3"; + qcom,psci-mode-shift = <4>; + qcom,psci-mode-mask = <0xfff>; + + qcom,pm-cluster-level@0 { /* D1 */ + reg = <0>; + label = "l3-wfi"; + qcom,psci-mode = <0x1>; + qcom,latency-us = <600>; + qcom,ss-power = <420>; + qcom,energy-overhead = <4254140>; + qcom,time-overhead = <1260>; + }; + + qcom,pm-cluster-level@1 { /* D4 */ + reg = <1>; + label = "l3-pc"; + qcom,psci-mode = <0x4>; + qcom,latency-us = <3048>; + qcom,ss-power = <329>; + qcom,energy-overhead = <6189829>; + qcom,time-overhead = <5800>; + qcom,min-child-idx = <2>; + qcom,is-reset; + }; + + qcom,pm-cluster-level@2 { /* Cx Off */ + reg = <2>; + label = "cx-off"; + qcom,psci-mode = <0x224>; + qcom,latency-us = <4562>; + qcom,ss-power = <290>; + qcom,energy-overhead = <6989829>; + qcom,time-overhead = <8200>; + qcom,min-child-idx = <2>; + qcom,is-reset; + qcom,notify-rpm; + }; + + qcom,pm-cluster-level@3 { /* LLCC off, AOSS sleep */ + reg = <3>; + label = "llcc-off"; + qcom,psci-mode = <0xC24>; + qcom,latency-us = <6562>; + qcom,ss-power = <165>; + qcom,energy-overhead = <7000029>; + qcom,time-overhead = <9825>; + qcom,min-child-idx = <2>; + qcom,is-reset; + qcom,notify-rpm; + }; + + qcom,pm-cpu@0 { + #address-cells = <1>; + #size-cells = <0>; + qcom,psci-mode-shift = <0>; + qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 + &CPU5>; + + qcom,pm-cpu-level@0 { /* C1 */ + reg = <0>; + label = "wfi"; + qcom,psci-cpu-mode = <0x1>; + qcom,latency-us = <60>; + qcom,ss-power = <383>; + qcom,energy-overhead = <64140>; + qcom,time-overhead = <121>; + }; + + qcom,pm-cpu-level@1 { /* C3 */ + reg = <1>; + label = "pc"; + qcom,psci-cpu-mode = <0x3>; + qcom,latency-us = <901>; + qcom,ss-power = <364>; + qcom,energy-overhead = <579285>; + qcom,time-overhead = <1450>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + + qcom,pm-cpu-level@2 { /* C4 */ + reg = <2>; + label = "rail-pc"; + qcom,psci-cpu-mode = <0x4>; + qcom,latency-us = <915>; + qcom,ss-power = <353>; + qcom,energy-overhead = <666292>; + qcom,time-overhead = <1617>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + }; + + qcom,pm-cpu@1 { + #address-cells = <1>; + #size-cells = <0>; + qcom,psci-mode-shift = <0>; + qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU6 &CPU7>; + + qcom,pm-cpu-level@0 { /* C1 */ + reg = <0>; + label = "wfi"; + qcom,psci-cpu-mode = <0x1>; + qcom,latency-us = <66>; + qcom,ss-power = <427>; + qcom,energy-overhead = <68410>; + qcom,time-overhead = <121>; + }; + + qcom,pm-cpu-level@1 { /* C3 */ + reg = <1>; + label = "pc"; + qcom,psci-cpu-mode = <0x3>; + qcom,latency-us = <1244>; + qcom,ss-power = <373>; + qcom,energy-overhead = <795006>; + qcom,time-overhead = <1767>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + + qcom,pm-cpu-level@2 { /* C4 */ + reg = <2>; + label = "rail-pc"; + qcom,psci-cpu-mode = <0x4>; + qcom,latency-us = <1854>; + qcom,ss-power = <359>; + qcom,energy-overhead = <1068095>; + qcom,time-overhead = <2380>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + }; + }; + }; + + qcom,rpm-stats@c300000 { + compatible = "qcom,rpm-stats"; + reg = <0xc300000 0x1000>, <0xc3f0004 0x4>; + reg-names = "phys_addr_base", "offset_addr"; + qcom,num-records = <3>; + }; + + qcom,rpmh-master-stats@b221200 { + compatible = "qcom,rpmh-master-stats-v1"; + reg = <0xb221200 0x60>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..70e03825fc965172cb38d83ccbc9fc1697883681 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-qupv3.dtsi @@ -0,0 +1,565 @@ +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + /* QUPv3 South instances */ + qupv3_0: qcom,qupv3_0_geni_se@0x8c0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0x8c0000 0x2000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x203 0x0>; + }; + }; + + /* I2C */ + qupv3_se0_i2c: i2c@0x880000 { + compatible = "qcom,i2c-geni"; + reg = <0x880000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + dmas = <&gpi_dma0 0 0 3 64 0>, + <&gpi_dma0 1 0 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se0_i2c_active>; + pinctrl-1 = <&qupv3_se0_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se1_i2c: i2c@0x884000 { + compatible = "qcom,i2c-geni"; + reg = <0x884000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + dmas = <&gpi_dma0 0 1 3 64 0>, + <&gpi_dma0 1 1 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se1_i2c_active>; + pinctrl-1 = <&qupv3_se1_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se2_i2c: i2c@0x888000 { + compatible = "qcom,i2c-geni"; + reg = <0x888000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + dmas = <&gpi_dma0 0 2 3 64 0>, + <&gpi_dma0 1 2 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se2_i2c_active>; + pinctrl-1 = <&qupv3_se2_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se3_i2c: i2c@0x88c000 { + compatible = "qcom,i2c-geni"; + reg = <0x88c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + dmas = <&gpi_dma0 0 3 3 64 0>, + <&gpi_dma0 1 3 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se3_i2c_active>; + pinctrl-1 = <&qupv3_se3_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se4_i2c: i2c@0x890000 { + compatible = "qcom,i2c-geni"; + reg = <0x890000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + dmas = <&gpi_dma0 0 4 3 64 0>, + <&gpi_dma0 1 4 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se4_i2c_active>; + pinctrl-1 = <&qupv3_se4_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + /*HS UART*/ + qupv3_se3_4uart: qcom,qup_uart@0x88c000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0x88c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se3_ctsrx>, <&qupv3_se3_rts>, + <&qupv3_se3_tx>; + pinctrl-1 = <&qupv3_se3_ctsrx>, <&qupv3_se3_rts>, + <&qupv3_se3_tx>; + interrupts-extended = <&pdc GIC_SPI 604 0>, + <&tlmm 41 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_0>; + }; + + qupv3_se4_4uart: qcom,qup_uart@0x890000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0x890000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + pinctrl-1 = <&qupv3_se4_ctsrx>, <&qupv3_se4_rts>, + <&qupv3_se4_tx>; + interrupts-extended = <&pdc GIC_SPI 605 0>, + <&tlmm 56 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_0>; + }; + + /* SPI */ + qupv3_se0_spi: spi@0x880000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x880000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se0_spi_active>; + pinctrl-1 = <&qupv3_se0_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + dmas = <&gpi_dma0 0 0 1 64 0>, + <&gpi_dma0 1 0 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se1_spi: spi@0x884000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x884000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se1_spi_active>; + pinctrl-1 = <&qupv3_se1_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + dmas = <&gpi_dma0 0 1 1 64 0>, + <&gpi_dma0 1 1 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se3_spi: spi@0x88c000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x88c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se3_spi_active>; + pinctrl-1 = <&qupv3_se3_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + dmas = <&gpi_dma0 0 3 1 64 0>, + <&gpi_dma0 1 3 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se4_spi: spi@0x890000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x890000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se4_spi_active>; + pinctrl-1 = <&qupv3_se4_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + dmas = <&gpi_dma0 0 4 1 64 0>, + <&gpi_dma0 1 4 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + + /* QUPv3 North Instances */ + qupv3_1: qcom,qupv3_1_geni_se@0xac0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0xac0000 0x2000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x4c3 0x0>; + }; + }; + + /* Debug UART Instance for CDP/MTP/RUMI platform */ + qupv3_se8_2uart: qcom,qup_uart@0xa88000 { + compatible = "qcom,msm-geni-console"; + reg = <0xa88000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se8_2uart_active>; + pinctrl-1 = <&qupv3_se8_2uart_sleep>; + interrupts = ; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + /* I2C */ + qupv3_se6_i2c: i2c@0xa80000 { + compatible = "qcom,i2c-geni"; + reg = <0xa80000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 0 3 64 0>, + <&gpi_dma1 1 0 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se6_i2c_active>; + pinctrl-1 = <&qupv3_se6_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se7_i2c: i2c@0xa84000 { + compatible = "qcom,i2c-geni"; + reg = <0xa84000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 1 3 64 0>, + <&gpi_dma1 1 1 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se7_i2c_active>; + pinctrl-1 = <&qupv3_se7_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se8_i2c: i2c@0xa88000 { + compatible = "qcom,i2c-geni"; + reg = <0xa88000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 2 3 64 0>, + <&gpi_dma1 1 2 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se8_i2c_active>; + pinctrl-1 = <&qupv3_se8_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se9_i2c: i2c@0xa8c000 { + compatible = "qcom,i2c-geni"; + reg = <0xa8c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 3 3 64 0>, + <&gpi_dma1 1 3 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se9_i2c_active>; + pinctrl-1 = <&qupv3_se9_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se10_i2c: i2c@0xa90000 { + compatible = "qcom,i2c-geni"; + reg = <0xa90000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 4 3 64 0>, + <&gpi_dma1 1 4 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_i2c_active>; + pinctrl-1 = <&qupv3_se10_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se11_i2c: i2c@0xa94000 { + compatible = "qcom,i2c-geni"; + reg = <0xa94000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + dmas = <&gpi_dma1 0 5 3 64 0>, + <&gpi_dma1 1 5 3 64 0>; + dma-names = "tx", "rx"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se11_i2c_active>; + pinctrl-1 = <&qupv3_se11_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + /*HS UART*/ + qupv3_se10_4uart: qcom,qup_uart@0xa90000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0xa90000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_ctsrx>, <&qupv3_se10_rts>, + <&qupv3_se10_tx>; + pinctrl-1 = <&qupv3_se10_ctsrx>, <&qupv3_se10_rts>, + <&qupv3_se10_tx>; + interrupts-extended = <&pdc GIC_SPI 357 0>, + <&tlmm 113 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_1>; + }; + + qupv3_se11_4uart: qcom,qup_uart@0xa94000 { + compatible = "qcom,msm-geni-serial-hs"; + reg = <0xa94000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se11_ctsrx>, <&qupv3_se11_rts>, + <&qupv3_se11_tx>; + pinctrl-1 = <&qupv3_se11_ctsrx>, <&qupv3_se11_rts>, + <&qupv3_se11_tx>; + interrupts-extended = <&pdc GIC_SPI 358 0>, + <&tlmm 92 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_1>; + }; + + /* SPI */ + qupv3_se6_spi: spi@0xa80000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa80000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se6_spi_active>; + pinctrl-1 = <&qupv3_se6_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + dmas = <&gpi_dma1 0 0 1 64 0>, + <&gpi_dma1 1 0 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se7_spi: spi@0xa84000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa84000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se7_spi_active>; + pinctrl-1 = <&qupv3_se7_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + dmas = <&gpi_dma1 0 1 1 64 0>, + <&gpi_dma1 1 1 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se8_spi: spi@0xa88000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa88000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se8_spi_active>; + pinctrl-1 = <&qupv3_se8_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + dmas = <&gpi_dma1 0 2 1 64 0>, + <&gpi_dma1 1 2 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se10_spi: spi@0xa90000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa90000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_spi_active>; + pinctrl-1 = <&qupv3_se10_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + dmas = <&gpi_dma1 0 4 1 64 0>, + <&gpi_dma1 1 4 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + qupv3_se11_spi: spi@0xa94000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa94000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se11_spi_active>; + pinctrl-1 = <&qupv3_se11_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + dmas = <&gpi_dma1 0 5 1 64 0>, + <&gpi_dma1 1 5 1 64 0>; + dma-names = "tx", "rx"; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ce55cc63ee7720385c5acf40eaaaf19e4445887d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-regulator.dtsi @@ -0,0 +1,746 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +/* TODO: Update volatge range once PGA is locked */ +&soc { + /* RPMh regulators */ + /* PM6150 S2 = VDD_GFX supply */ + rpmh-regulator-gfxlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "gfx.lvl"; + VDD_GFX_LEVEL: + S2A_LEVEL: pm6150_s2_level: regulator-pm6150-s2-level { + regulator-name = "pm6150_s2_level"; + qcom,set = ; + regulator-min-microvolt + = ; + regulator-max-microvolt + = ; + qcom,init-voltage-level + = ; + }; + }; + + /* pm6150 S3 = VDD_MX supply */ + rpmh-regulator-mxlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "mx.lvl"; + + VDD_MX_LEVEL: + S3A_LEVEL: pm6150_s3_level: regulator-pm6150-s3 { + regulator-name = "pm6150_s3_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + + VDD_MX_LEVEL_AO: + S3A_LEVEL_AO: pm6150_s3_level_ao: regulator-pm6150-s3-level-ao { + regulator-name = "pm6150_s3_level_ao"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + }; + + rpmh-regulator-smpc1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpc1"; + S1C: pm6150l_s1: regulator-pm6150l-s1 { + regulator-name = "pm6150l_s1"; + qcom,set = ; + regulator-min-microvolt = <1128000>; + regulator-max-microvolt = <1128000>; + qcom,init-voltage = <1128000>; + }; + }; + + /* pm6150l S2 + S3 = 2 phase VDD_CX supply */ + rpmh-regulator-cxlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "cx.lvl"; + pm6150l-s2-level-parent-supply = <&VDD_MX_LEVEL>; + pm6150l-s2-level_ao-parent-supply = <&VDD_MX_LEVEL_AO>; + + VDD_CX_LEVEL: + S2C_LEVEL: pm6150l_s2_level: regulator-pm6150l-s2 { + regulator-name = "pm6150l_s2_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + qcom,min-dropout-voltage-level = <(-1)>; + }; + + VDD_CX_LEVEL_AO: S2C_LEVEL_AO: + pm6150l_s2_level_ao: regulator-pm6150l-s2-level-ao { + qcom,set = ; + regulator-name = "pm6150l_s2_level_ao"; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + qcom,min-dropout-voltage-level = <(-1)>; + }; + }; + + /* pm6150l S7 = VDD_MSS supply */ + rpmh-regulator-modemlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "mss.lvl"; + + VDD_MSS_LEVEL: + S7C_LEVEL: pm6150l_s7_level: regulator-pm6150l-s7 { + regulator-name = "pm6150l_s7_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + }; + + rpmh-regulator-smpc8 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "smpc8"; + S8C: pm6150l_s8: regulator-pm6150l-s8 { + regulator-name = "pm6150l_s8"; + qcom,set = ; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1400000>; + qcom,init-voltage = <1200000>; + }; + }; + + rpmh-regulator-ldoa1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa1"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L1A: pm6150_l1: regulator-pm6150-l1 { + regulator-name = "pm6150_l1"; + qcom,set = ; + regulator-min-microvolt = <1174000>; + regulator-max-microvolt = <1252000>; + qcom,init-voltage = <1174000>; + }; + }; + + rpmh-regulator-ldoa2 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa2"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L2A: pm6150_l2: regulator-pm6150-l2 { + regulator-name = "pm6150_l2"; + qcom,set = ; + regulator-min-microvolt = <944000>; + regulator-max-microvolt = <1050000>; + qcom,init-voltage = <944000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa3 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa3"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L3A: pm6150_l3: regulator-pm6150-l3 { + regulator-name = "pm6150_l3"; + qcom,set = ; + regulator-min-microvolt = <968000>; + regulator-max-microvolt = <1060000>; + qcom,init-voltage = <968000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa4 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa4"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L4A: pm6150_l4: regulator-pm6150-l4 { + regulator-name = "pm6150_l4"; + qcom,set = ; + regulator-min-microvolt = <824000>; + regulator-max-microvolt = <920000>; + qcom,init-voltage = <824000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa5 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa5"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L5A: pm6150_l5: regulator-pm6150-l5 { + regulator-name = "pm6150_l5"; + qcom,set = ; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <2800000>; + qcom,init-voltage = <2600000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa6 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa6"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L6A: pm6150_l6: regulator-pm6150-l6 { + regulator-name = "pm6150_l6"; + qcom,set = ; + regulator-min-microvolt = <1096000>; + regulator-max-microvolt = <1304000>; + qcom,init-voltage = <1096000>; + qcom,init-mode = ; + }; + }; + + /* pm6150 L7 = LPI_MX supply */ + rpmh-regulator-lmxlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "lmx.lvl"; + LPI_MX_LEVEL: + L7A_LEVEL: pm6150_l7_level: regulator-pm6150-l7 { + regulator-name = "pm6150_l7_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + }; + + /* pm6150 L8 = LPI_CX supply */ + rpmh-regulator-lcxlvl { + compatible = "qcom,rpmh-arc-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "lcx.lvl"; + LPI_CX_LEVEL: + L8A_LEVEL: pm6150_l8_level: regulator-pm6150-l8 { + regulator-name = "pm6150_l8_level"; + qcom,set = ; + regulator-min-microvolt = + ; + regulator-max-microvolt = + ; + qcom,init-voltage-level = + ; + }; + }; + + /* pm6150 L9 = WCSS_CX supply */ + rpmh-regulator-ldoa9 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa9"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + WCSS_CX: + L9A: pm6150_l9: regulator-pm6150-l9 { + regulator-name = "pm6150_l9"; + qcom,set = ; + regulator-min-microvolt = <624000>; + regulator-max-microvolt = <760000>; + qcom,init-voltage = <624000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa10 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa10"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L10A: pm6150_l10: regulator-pm6150-l10 { + regulator-name = "pm6150_l10"; + qcom,set = ; + regulator-min-microvolt = <1720000>; + regulator-max-microvolt = <1832000>; + qcom,init-voltage = <1720000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa11 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa11"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L11A: pm6150_l11: regulator-pm6150-l11 { + regulator-name = "pm6150_l11"; + qcom,set = ; + regulator-min-microvolt = <1616000>; + regulator-max-microvolt = <1984000>; + qcom,init-voltage = <1616000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa12 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa12"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L12A: pm6150_l12: regulator-pm6150-l12 { + regulator-name = "pm6150_l12"; + qcom,set = ; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1952000>; + qcom,init-voltage = <1696000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa13 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa13"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L13A: pm6150_l13: regulator-pm6150-l13 { + regulator-name = "pm6150_l13"; + qcom,set = ; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1904000>; + qcom,init-voltage = <1696000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa14 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa14"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L14A: pm6150_l14: regulator-pm6150-l14 { + regulator-name = "pm6150_l14"; + qcom,set = ; + regulator-min-microvolt = <1720000>; + regulator-max-microvolt = <1856000>; + qcom,init-voltage = <1720000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa15 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa15"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L15A: pm6150_l15: regulator-pm6150-l15 { + regulator-name = "pm6150_l15"; + qcom,set = ; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1904000>; + qcom,init-voltage = <1696000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa16 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa16"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L16A: pm6150_l16: regulator-pm6150-l16 { + regulator-name = "pm6150_l16"; + qcom,set = ; + regulator-min-microvolt = <2424000>; + regulator-max-microvolt = <2976000>; + qcom,init-voltage = <2424000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa17 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa17"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L17A: pm6150_l17: regulator-pm6150-l17 { + regulator-name = "pm6150_l17"; + qcom,set = ; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3232000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa18 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa18"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L18A: pm6150_l18: regulator-pm6150-l18 { + regulator-name = "pm6150_l18"; + qcom,set = ; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3008000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoa19 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa19"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L19A: pm6150_l19: regulator-pm6150-l19 { + regulator-name = "pm6150_l19"; + qcom,set = ; + regulator-min-microvolt = <2944000>; + regulator-max-microvolt = <3304000>; + qcom,init-voltage = <2944000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc1"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L1C: pm6150l_l1: regulator-pm6150l-l1 { + regulator-name = "pm6150l_l1"; + qcom,set = ; + regulator-min-microvolt = <1616000>; + regulator-max-microvolt = <1984000>; + qcom,init-voltage = <1616000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc2 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc2"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L2C: pm6150l_l2: regulator-pm6150l-l2 { + regulator-name = "pm6150l_l2"; + qcom,set = ; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1352000>; + qcom,init-voltage = <1200000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc3 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc3"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L3C: pm6150l_l3: regulator-pm6150l-l3 { + regulator-name = "pm6150l_l3"; + qcom,set = ; + regulator-min-microvolt = <1144000>; + regulator-max-microvolt = <1256000>; + qcom,init-voltage = <1144000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc4 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc4"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L4C: pm6150l_l4: regulator-pm6150l-l4 { + regulator-name = "pm6150l_l4"; + qcom,set = ; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <2950000>; + qcom,init-voltage = <1648000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc5 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc5"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L5C: pm6150l_l5: regulator-pm6150l-l5 { + regulator-name = "pm6150l_l5"; + qcom,set = ; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <2950000>; + qcom,init-voltage = <1648000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc6 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc6"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L6C: pm6150l_l6: regulator-pm6150l-l6 { + regulator-name = "pm6150l_l6"; + qcom,set = ; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <3100000>; + qcom,init-voltage = <1648000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc7 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc7"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L7C: pm6150l_l7: regulator-pm6150l-l7 { + regulator-name = "pm6150l_l7"; + qcom,set = ; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <3000000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc8 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc8"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L8C: pm6150l_l8: regulator-pm6150l-l8 { + regulator-name = "pm6150l_l8"; + qcom,set = ; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1900000>; + qcom,init-voltage = <1800000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc9 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc9"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L9C: pm6150l_l9: regulator-pm6150l-l9 { + regulator-name = "pm6150l_l9"; + qcom,set = ; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <2950000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc10 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc10"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L10C: pm6150l_l10: regulator-pm6150l-l10 { + regulator-name = "pm6150l_l10"; + qcom,set = ; + regulator-min-microvolt = <3200000>; + regulator-max-microvolt = <3312000>; + qcom,init-voltage = <3200000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-ldoc11 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoc11"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1>; + L11C: pm6150l_l11: regulator-pm6150l-l11 { + regulator-name = "pm6150l_l11"; + qcom,set = ; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3400000>; + qcom,init-voltage = <2950000>; + qcom,init-mode = ; + }; + }; + + rpmh-regulator-bobc1 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "bobc1"; + qcom,regulator-type = "pmic5-bob"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 1000000 2000000>; + qcom,send-defaults; + + BOB: pm6150l_bob: regulator-pm6150l-bob { + regulator-name = "pm6150l_bob"; + qcom,set = ; + regulator-min-microvolt = <3296000>; + regulator-max-microvolt = <3960000>; + qcom,init-voltage = <3296000>; + qcom,init-mode = ; + }; + + BOB_AO: pm6150l_bob_ao: regulator-pm6150l-bob-ao { + regulator-name = "pm6150l_bob_ao"; + qcom,set = ; + regulator-min-microvolt = <3296000>; + regulator-max-microvolt = <3960000>; + qcom,init-voltage = <3296000>; + qcom,init-mode = ; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi index a683d878cc32bc5bc3a0ec129f3d5584f838d720..f9e417fcca9468ae5a51996e188478c9b620b89e 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-rumi.dtsi @@ -23,3 +23,45 @@ status = "disabled"; }; }; + +&soc { + /delete-node/ rpmh-regulator-gfxlvl; + /delete-node/ rpmh-regulator-mxlvl; + /delete-node/ rpmh-regulator-cxlvl; + /delete-node/ rpmh-regulator-smpc1; + /delete-node/ rpmh-regulator-modemlvl; + /delete-node/ rpmh-regulator-smpc8; + /delete-node/ rpmh-regulator-ldoa1; + /delete-node/ rpmh-regulator-ldoa2; + /delete-node/ rpmh-regulator-ldoa3; + /delete-node/ rpmh-regulator-ldoa4; + /delete-node/ rpmh-regulator-ldoa5; + /delete-node/ rpmh-regulator-ldoa6; + /delete-node/ rpmh-regulator-lmxlvl; + /delete-node/ rpmh-regulator-lcxlvl; + /delete-node/ rpmh-regulator-ldoa9; + /delete-node/ rpmh-regulator-ldoa10; + /delete-node/ rpmh-regulator-ldoa11; + /delete-node/ rpmh-regulator-ldoa12; + /delete-node/ rpmh-regulator-ldoa13; + /delete-node/ rpmh-regulator-ldoa14; + /delete-node/ rpmh-regulator-ldoa15; + /delete-node/ rpmh-regulator-ldoa16; + /delete-node/ rpmh-regulator-ldoa17; + /delete-node/ rpmh-regulator-ldoa18; + /delete-node/ rpmh-regulator-ldoa19; + /delete-node/ rpmh-regulator-ldoc1; + /delete-node/ rpmh-regulator-ldoc2; + /delete-node/ rpmh-regulator-ldoc3; + /delete-node/ rpmh-regulator-ldoc4; + /delete-node/ rpmh-regulator-ldoc5; + /delete-node/ rpmh-regulator-ldoc6; + /delete-node/ rpmh-regulator-ldoc7; + /delete-node/ rpmh-regulator-ldoc8; + /delete-node/ rpmh-regulator-ldoc9; + /delete-node/ rpmh-regulator-ldoc10; + /delete-node/ rpmh-regulator-ldoc11; + /delete-node/ rpmh-regulator-bobc1; +}; + +#include "sdmmagpie-stub-regulator.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-stub-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-stub-regulator.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..368685e6d1be43dd139b36ffcfc74f691b24136a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-stub-regulator.dtsi @@ -0,0 +1,352 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +/* TODO: Update volatge range once PGA is locked */ +/* Stub regulators */ + +/ { + /* PM6150 S2 = VDD_GFX supply */ + VDD_GFX_LEVEL: + pm6150_s2_level: regulator-pm6150-s2-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_s2_level"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + /* pm6150 S3 = VDD_MX supply */ + VDD_MX_LEVEL: + S3A_LEVEL: pm6150_s3_level: regulator-pm6150-s3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_s3_level"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + VDD_MX_LEVEL_AO: + S3A_LEVEL_AO: pm6150_s3_level_ao: regulator-pm6150-s3-level-ao { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_s3_level_ao"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + S1C: pm6150l_s1: regulator-pm6150l-s1 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_s1"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = <1128000>; + regulator-max-microvolt = <1128000>; + }; + + /* pm6150l S2 + S3 = 2 phase VDD_CX supply */ + VDD_CX_LEVEL: + pm6150l_s2_level: regulator-pm6150l-s2-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_s2_level"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + VDD_CX_LEVEL_AO: + pm6150l_s2_level_ao: regulator-pm6150l-s2-level-ao { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_s2_level_ao"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + /* pm6150l S7 = VDD_MSS supply */ + VDD_MSS_LEVEL: + S7C_LEVEL: pm6150l_s7_level: regulator-pm6150l-s7 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_s7_level"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + S8C: pm6150l_s8: regulator-pm6150l-s8 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_s8"; + qcom,hpm-min-load = <100000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1400000>; + }; + + L1A: pm6150_l1: regulator-pm6150-l1 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l1"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1174000>; + regulator-max-microvolt = <1252000>; + }; + + L2A: pm6150_l2: regulator-pm6150-l2 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l2"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <944000>; + regulator-max-microvolt = <1050000>; + }; + + L3A: pm6150_l3: regulator-pm6150-l3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l3"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <968000>; + regulator-max-microvolt = <1060000>; + }; + + L4A: pm6150_l4: regulator-pm6150-l4 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l4"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <824000>; + regulator-max-microvolt = <920000>; + }; + + L5A: pm6150_l5: regulator-pm6150-l5 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l5"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <2600000>; + regulator-max-microvolt = <2800000>; + }; + + L6A: pm6150_l6: regulator-pm6150-l6 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l6"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1096000>; + regulator-max-microvolt = <1304000>; + }; + + /* pm6150 L7 = LPI_MX supply */ + LPI_MX_LEVEL: + L7A_LEVEL: pm6150_l7_level: regulator-pm6150-l7-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l7_level"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + /* pm6150 L8 = LPI_CX supply */ + LPI_CX_LEVEL: + L8A_LEVEL: pm6150_l8_level: regulator-pm6150-l8-level { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l8_level"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = ; + regulator-max-microvolt = ; + }; + + /* pm6150 L9 = WCSS_CX supply */ + WCSS_CX: + L9A: pm6150_l9: regulator-pm6150-l9 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l9"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <624000>; + regulator-max-microvolt = <760000>; + }; + + L10A: pm6150_l10: regulator-pm6150-l10 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l10"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1720000>; + regulator-max-microvolt = <1832000>; + }; + + L11A: pm6150_l11: regulator-pm6150-l11 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l11"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1616000>; + regulator-max-microvolt = <1984000>; + }; + + L12A: pm6150_l12: regulator-pm6150-l12 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l12"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1952000>; + }; + + L13A: pm6150_l13: regulator-pm6150-l13 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l13"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1904000>; + }; + + L14A: pm6150_l14: regulator-pm6150-l14 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l14"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1720000>; + regulator-max-microvolt = <1856000>; + }; + + L15A: pm6150_l15: regulator-pm6150-l15 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l15"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1696000>; + regulator-max-microvolt = <1904000>; + }; + + L16A: pm6150_l16: regulator-pm6150-l16 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l16"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <2424000>; + regulator-max-microvolt = <2976000>; + }; + + L17A: pm6150_l17: regulator-pm6150-l17 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l17"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3232000>; + }; + + L18A: pm6150_l18: regulator-pm6150-l18 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l18"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3008000>; + }; + + L19A: pm6150_l19: regulator-pm6150-l19 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150_l19"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <2944000>; + regulator-max-microvolt = <3304000>; + }; + + L1C: pm6150l_l1: regulator-pm6150l-l1 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l1"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1616000>; + regulator-max-microvolt = <1984000>; + }; + + L2C: pm6150l_l2: regulator-pm6150l-l2 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l2"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1352000>; + }; + + L3C: pm6150l_l3: regulator-pm6150l-l3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l3"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1144000>; + regulator-max-microvolt = <1256000>; + }; + + L4C: pm6150l_l4: regulator-pm6150l-l4 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l4"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <2950000>; + }; + + L5C: pm6150l_l5: regulator-pm6150l-l5 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l5"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <2950000>; + }; + + L6C: pm6150l_l6: regulator-pm6150l-l6 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l6"; + qcom,hpm-min-load = <5000>; + regulator-min-microvolt = <1648000>; + regulator-max-microvolt = <3100000>; + }; + + L7C: pm6150l_l7: regulator-pm6150l-l7 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l7"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3312000>; + }; + + L8C: pm6150l_l8: regulator-pm6150l-l8 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l8"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1900000>; + }; + + L9C: pm6150l_l9: regulator-pm6150l-l9 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l9"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3312000>; + }; + + L10C: pm6150l_l10: regulator-pm6150l-l10 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l10"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <3200000>; + regulator-max-microvolt = <3312000>; + }; + + L11C: pm6150l_l11: regulator-pm6150l-l11 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_l11"; + qcom,hpm-min-load = <10000>; + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <3400000>; + }; + + BOB: pm6150l_bob: regulator-pm6150l-bob { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_bob"; + regulator-min-microvolt = <3296000>; + regulator-max-microvolt = <3960000>; + }; + + BOB_AO: pm6150l_bob_ao: regulator-pm6150l-bob-ao { + compatible = "qcom,stub-regulator"; + regulator-name = "pm6150l_bob_ao"; + regulator-min-microvolt = <3296000>; + regulator-max-microvolt = <3960000>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..f3720892fa8a20f48e1906367f883529c3aa5992 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdmmagpie-thermal.dtsi @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&thermal_zones { + aoss-0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&tsens0 0>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&tsens0 1>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&tsens0 2>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu2-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "user_space"; + thermal-sensors = <&tsens0 3>; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu3-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 4>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu4-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 5>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc0-cpu5-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 6>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + cpuss-0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 7>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + cpuss-1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 8>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc1-cpu0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 9>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc1-cpu1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 10>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc1-cpu2-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 11>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + apc1-cpu3-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 12>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + gpuss-0-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 13>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + gpuss-1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens0 14>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + aoss-1-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 0>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + cwlan-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 1>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + video-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 2>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + ddr-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 3>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + q6-hvx-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 4>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + camera-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 5>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + cmpss-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 6>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + mdm-core-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 7>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + npu-usr { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tsens1 8>; + thermal-governor = "user_space"; + trips { + active-config0 { + temperature = <125000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi index cb536c68516c1e363513a20b1dfd3291107f815e..6136e5dcddc5d745ea22ac98c4417c2aed5bb6ac 100644 --- a/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmmagpie.dtsi @@ -19,12 +19,24 @@ #include #include #include +#include +#include / { model = "Qualcomm Technologies, Inc. SDMMAGPIE"; compatible = "qcom,sdmmagpie"; qcom,msm-id = <365 0x0>; - interrupt-parent = <&intc>; + interrupt-parent = <&pdc>; + + aliases { + spi0 = &qupv3_se0_spi; + spi1 = &qupv3_se4_spi; + i2c0 = &qupv3_se2_i2c; + i2c1 = &qupv3_se7_i2c; + i2c2 = &qupv3_se9_i2c; + serial0 = &qupv3_se8_2uart; + hsuart0 = &qupv3_se3_4uart; + }; cpus { #address-cells = <2>; @@ -336,6 +348,161 @@ }; }; }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + hyp_region: hyp_region@85700000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x85700000 0 0x600000>; + }; + + xbl_aop_mem: xbl_aop_mem@85e00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85e00000 0x0 0x1ff000>; + }; + + sec_apps_mem: sec_apps_region@85fff000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85fff000 0x0 0x1000>; + }; + + smem_region: smem@86000000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x86000000 0x0 0x200000>; + }; + + removed_region: removed_region@86200000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x86200000 0 0x2d00000>; + }; + + pil_camera_mem: camera_region@8ab00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x8ab00000 0 0x500000>; + }; + + pil_modem_mem: modem_region@8b000000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x8b000000 0 0x7e00000>; + }; + + pil_video_mem: pil_video_region@92e00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x92e00000 0 0x500000>; + }; + + wlan_msa_mem: wlan_msa_region@93300000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x93300000 0 0x100000>; + }; + + pil_cdsp_mem: cdsp_regions@93400000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x93400000 0 0x800000>; + }; + + pil_adsp_mem: pil_adsp_region@93c00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x93c00000 0 0x1e00000>; + }; + + pil_ipa_fw_mem: ips_fw_region@95a00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x95a00000 0 0x10000>; + }; + + pil_ipa_gsi_mem: ipa_gsi_region@95a10000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x95a10000 0 0x5000>; + }; + + pil_gpu_mem: gpu_region@95a15000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x95a15000 0 0x2000>; + }; + + npu_mem: npu_region@95a80000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x95a80000 0 0x80000>; + }; + + qseecom_mem: qseecom_region@9e400000 { + compatible = "shared-dma-pool"; + no-map; + reg = <0 0x9e400000 0 0x1400000>; + }; + + adsp_mem: adsp_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; + reusable; + alignment = <0 0x400000>; + size = <0 0xc00000>; + }; + + qseecom_ta_mem: qseecom_ta_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; + reusable; + alignment = <0 0x400000>; + size = <0 0x1000000>; + }; + + sp_mem: sp_region { /* SPSS-HLOS ION shared mem */ + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */ + reusable; + alignment = <0 0x400000>; + size = <0 0x800000>; + }; + + secure_display_memory: secure_display_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; + reusable; + alignment = <0 0x400000>; + size = <0 0x5c00000>; + }; + + cont_splash_memory: cont_splash_region@9d400000 { + reg = <0x0 0x9d400000 0x0 0x02400000>; + label = "cont_splash_region"; + }; + + dump_mem: mem_dump_region { + compatible = "shared-dma-pool"; + reusable; + size = <0 0x2400000>; + }; + + /* global autoconfigured region for contiguous allocations */ + linux,cma { + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; + reusable; + alignment = <0 0x400000>; + size = <0 0x2000000>; + linux,cma-default; + }; + }; }; &soc { @@ -356,6 +523,14 @@ interrupt-parent = <&intc>; }; + pdc: interrupt-controller@b220000 { + compatible = "qcom,pdc-sdmmagpie"; + reg = <0xb220000 0x400>; + #interrupt-cells = <3>; + interrupt-parent = <&intc>; + interrupt-controller; + }; + timer { compatible = "arm,armv8-timer"; interrupts = , @@ -430,6 +605,12 @@ #clock-cells = <1>; }; + clock_aop: qcom,aopclk { + compatible = "qcom,dummycc"; + clock-output-names = "aop_clocks"; + #clock-cells = <1>; + }; + clock_gcc: qcom,gcc { compatible = "qcom,dummycc"; clock-output-names = "gcc_clocks"; @@ -523,6 +704,78 @@ qcom,rtb-size = <0x100000>; }; + gpi_dma0: qcom,gpi-dma@0x800000 { + #dma-cells = <5>; + compatible = "qcom,gpi-dma"; + reg = <0x800000 0x60000>; + reg-names = "gpi-top"; + interrupts = <0 244 0>, <0 245 0>, <0 246 0>, <0 247 0>, + <0 248 0>, <0 249 0>, <0 250 0>, <0 251 0>; + qcom,max-num-gpii = <8>; + qcom,gpii-mask = <0x0f>; + qcom,ev-factor = <2>; + iommus = <&apps_smmu 0x0216 0x0>; + qcom,smmu-cfg = <0x1>; + qcom,iova-range = <0x0 0x100000 0x0 0x100000>; + status = "ok"; + }; + + gpi_dma1: qcom,gpi-dma@0xa00000 { + #dma-cells = <5>; + compatible = "qcom,gpi-dma"; + reg = <0xa00000 0x60000>; + reg-names = "gpi-top"; + interrupts = <0 279 0>, <0 280 0>, <0 281 0>, <0 282 0>, + <0 283 0>, <0 284 0>, <0 293 0>, <0 294 0>; + qcom,max-num-gpii = <8>; + qcom,gpii-mask = <0x0f>; + qcom,ev-factor = <2>; + iommus = <&apps_smmu 0x04d6 0x0>; + qcom,smmu-cfg = <0x1>; + qcom,iova-range = <0x0 0x100000 0x0 0x100000>; + status = "ok"; + }; + + slim_aud: slim@62dc0000 { + cell-index = <1>; + compatible = "qcom,slim-ngd"; + reg = <0x62dc0000 0x2c000>, + <0x62d84000 0x2a000>; + reg-names = "slimbus_physical", "slimbus_bam_physical"; + interrupts = <0 163 0>, <0 164 0>; + interrupt-names = "slimbus_irq", "slimbus_bam_irq"; + qcom,apps-ch-pipes = <0x7c0000>; + qcom,ea-pc = <0x300>; + status = "disabled"; + qcom,iommu-s1-bypass; + + iommu_slim_aud_ctrl_cb: qcom,iommu_slim_ctrl_cb { + compatible = "qcom,iommu-slim-ctrl-cb"; + iommus = <&apps_smmu 0x1be6 0x0>, + <&apps_smmu 0x1bed 0x0>, + <&apps_smmu 0x1bee 0x1>, + <&apps_smmu 0x1bf0 0x1>; + }; + + }; + + slim_qca: slim@62e40000 { + cell-index = <3>; + compatible = "qcom,slim-ngd"; + reg = <0x62e40000 0x2c000>, + <0x62e04000 0x20000>; + reg-names = "slimbus_physical", "slimbus_bam_physical"; + interrupts = <0 291 0>, <0 292 0>; + interrupt-names = "slimbus_irq", "slimbus_bam_irq"; + status = "disabled"; + qcom,iommu-s1-bypass; + + iommu_slim_qca_ctrl_cb: qcom,iommu_slim_ctrl_cb { + compatible = "qcom,iommu-slim-ctrl-cb"; + iommus = <&apps_smmu 0x1bf3 0x0>; + }; + }; + wdog: qcom,wdt@17c10000{ compatible = "qcom,msm-watchdog"; reg = <0x17c10000 0x1000>; @@ -692,6 +945,30 @@ }; }; + thermal_zones: thermal-zones {}; + + tsens0: tsens@c222000 { + compatible = "qcom,tsens24xx"; + reg = <0xc222000 0x8>, + <0xc263000 0x1ff>; + reg-names = "tsens_srot_physical", + "tsens_tm_physical"; + interrupts = <0 506 0>, <0 508 0>; + interrupt-names = "tsens-upper-lower", "tsens-critical"; + #thermal-sensor-cells = <1>; + }; + + tsens1: tsens@c223000 { + compatible = "qcom,tsens24xx"; + reg = <0xc223000 0x8>, + <0xc265000 0x1ff>; + reg-names = "tsens_srot_physical", + "tsens_tm_physical"; + interrupts = <0 507 0>, <0 509 0>; + interrupt-names = "tsens-upper-lower", "tsens-critical"; + #thermal-sensor-cells = <1>; + }; + qcom,llcc@9200000 { compatible = "qcom,llcc-core", "syscon", "simple-mfd"; reg = <0x9200000 0x450000>; @@ -716,10 +993,351 @@ compatible = "qcom,llcc-amon"; }; }; + + apps_rsc: mailbox@18220000 { + compatible = "qcom,tcs-drv"; + label = "apps_rsc"; + reg = <0x18220000 0x100>, <0x18220d00 0x3000>; + interrupts = <0 5 0>; + #mbox-cells = <1>; + qcom,drv-id = <2>; + qcom,tcs-config = , + , + , + ; + }; + + disp_rsc: mailbox@af20000 { + compatible = "qcom,tcs-drv"; + label = "display_rsc"; + reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>; + interrupts = <0 129 0>; + #mbox-cells = <1>; + qcom,drv-id = <0>; + qcom,tcs-config = , + , + , + ; + }; + + system_pm { + compatible = "qcom,system-pm"; + mboxes = <&apps_rsc 0>; + }; + + cmd_db: qcom,cmd-db@c3f000c { + compatible = "qcom,cmd-db"; + reg = <0xc3f000c 8>; + }; + + tcsr_mutex_block: syscon@1f40000 { + compatible = "syscon"; + reg = <0x1f40000 0x20000>; + }; + + tcsr_mutex: hwlock { + compatible = "qcom,tcsr-mutex"; + syscon = <&tcsr_mutex_block 0 0x1000>; + #hwlock-cells = <1>; + }; + + smem: qcom,smem { + compatible = "qcom,smem"; + memory-region = <&smem_region>; + hwlocks = <&tcsr_mutex 3>; + }; + + apcs: syscon@17c0000c { + compatible = "syscon"; + reg = <0x17c0000c 0x4>; + }; + + apcs_glb: mailbox@17c00000 { + compatible = "qcom,sm8150-apcs-hmss-global"; + reg = <0x17c00000 0x1000>; + + #mbox-cells = <1>; + }; + + qcom,glink { + compatible = "qcom,glink"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + glink_modem: modem { + qcom,remote-pid = <1>; + transport = "smem"; + mboxes = <&apcs_glb 12>; + mbox-names = "mpss_smem"; + interrupts = ; + + label = "modem"; + qcom,glink-label = "mpss"; + + qcom,modem_qrtr { + qcom,glink-channels = "IPCRTR"; + qcom,intents = <0x800 5 + 0x2000 3 + 0x4400 2>; + }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,modem_ds { + qcom,glink-channels = "DS"; + qcom,intents = <0x4000 0x2>; + }; + + qcom,modem_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_adsp>, + <&glink_cdsp>; + }; + }; + + glink_adsp: adsp { + qcom,remote-pid = <2>; + transport = "smem"; + mboxes = <&apcs_glb 24>; + mbox-names = "adsp_smem"; + interrupts = ; + + label = "adsp"; + qcom,glink-label = "lpass"; + + qcom,adsp_qrtr { + qcom,glink-channels = "IPCRTR"; + qcom,intents = <0x800 5 + 0x2000 3 + 0x4400 2>; + }; + + qcom,apr_tal_rpmsg { + qcom,glink-channels = "apr_audio_svc"; + qcom,intents = <0x200 20>; + }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,adsp_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_modem>, + <&glink_cdsp>; + }; + }; + + glink_cdsp: cdsp { + qcom,remote-pid = <5>; + transport = "smem"; + mboxes = <&apcs_glb 4>; + mbox-names = "cdsp_smem"; + interrupts = ; + + label = "cdsp"; + qcom,glink-label = "cdsp"; + + qcom,cdsp_qrtr { + qcom,glink-channels = "IPCRTR"; + qcom,intents = <0x800 5 + 0x2000 3 + 0x4400 2>; + }; + + qcom,msm_fastrpc_rpmsg { + compatible = "qcom,msm-fastrpc-rpmsg"; + qcom,glink-channels = "fastrpcglink-apps-dsp"; + qcom,intents = <0x64 64>; + }; + + qcom,cdsp_glink_ssr { + qcom,glink-channels = "glink_ssr"; + qcom,notify-edges = <&glink_modem>, + <&glink_adsp>; + }; + }; + + glink_spi_xprt_wdsp: wdsp { + qcom,remote-pid = <10>; + transport = "spi"; + tx-descriptors = <0x12000 0x12004>; + rx-descriptors = <0x1200c 0x12010>; + + qcom,wdsp_ctrl { + qcom,glink-channels = "g_glink_ctrl"; + qcom,intents = <0x400 1>; + }; + + qcom,wdsp_ild { + qcom,glink-channels = + "g_glink_persistent_data_ild"; + }; + + qcom,wdsp_nild { + qcom,glink-channels = + "g_glink_persistent_data_nild"; + }; + + qcom,wdsp_data { + qcom,glink-channels = "g_glink_audio_data"; + qcom,intents = <0x1000 2>; + }; + }; + }; + + qcom,glinkpkt { + compatible = "qcom,glinkpkt"; + + qcom,glinkpkt-at-mdm0 { + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DS"; + qcom,glinkpkt-dev-name = "at_mdm0"; + }; + + qcom,glinkpkt-apr-apps2 { + qcom,glinkpkt-edge = "adsp"; + qcom,glinkpkt-ch-name = "apr_apps2"; + qcom,glinkpkt-dev-name = "apr_apps2"; + }; + + qcom,glinkpkt-data40-cntl { + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA40_CNTL"; + qcom,glinkpkt-dev-name = "smdcntl8"; + }; + + qcom,glinkpkt-data1 { + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA1"; + qcom,glinkpkt-dev-name = "smd7"; + }; + + qcom,glinkpkt-data4 { + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA4"; + qcom,glinkpkt-dev-name = "smd8"; + }; + + qcom,glinkpkt-data11 { + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA11"; + qcom,glinkpkt-dev-name = "smd11"; + }; + }; + + qmp_npu0: qcom,qmp-npu-low@9818000 { + compatible = "qcom,qmp-mbox"; + reg = <0x9818000 0x8000>, <0x17c00010 0x4>; + reg-names = "msgram", "irq-reg-base"; + qcom,irq-mask = <0x20>; + interrupts = ; + + label = "npu_qmp_low"; + priority = <0>; + mbox-desc-offset = <0x0>; + #mbox-cells = <1>; + }; + + qmp_npu1: qcom,qmp-npu-high@9818000 { + compatible = "qcom,qmp-mbox"; + reg = <0x9818000 0x8000>, <0x17c00010 0x4>; + reg-names = "msgram", "irq-reg-base"; + qcom,irq-mask = <0x40>; + interrupts = ; + + label = "npu_qmp_high"; + priority = <1>; + mbox-desc-offset = <0x2000>; + #mbox-cells = <1>; + }; + + qmp_aop: qcom,qmp-aop@c300000 { + compatible = "qcom,qmp-mbox"; + reg = <0xc300000 0x1000>, <0x17c0000C 0x4>; + reg-names = "msgram", "irq-reg-base"; + qcom,irq-mask = <0x1>; + interrupts = ; + + label = "aop"; + qcom,early-boot; + priority = <0>; + mbox-desc-offset = <0x0>; + #mbox-cells = <1>; + }; + + qcom,smp2p-modem { + compatible = "qcom,smp2p"; + qcom,smem = <435>, <428>; + interrupts = ; + qcom,ipc = <&apcs 0 14>; + qcom,local-pid = <0>; + qcom,remote-pid = <1>; + + modem_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + modem_smp2p_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + qcom,smp2p-adsp { + compatible = "qcom,smp2p"; + qcom,smem = <443>, <429>; + interrupts = ; + qcom,ipc = <&apcs 0 26>; + qcom,local-pid = <0>; + qcom,remote-pid = <2>; + + adsp_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + adsp_smp2p_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + qcom,smp2p-cdsp { + compatible = "qcom,smp2p"; + qcom,smem = <94>, <432>; + interrupts = ; + qcom,ipc = <&apcs 0 6>; + qcom,local-pid = <0>; + qcom,remote-pid = <5>; + + cdsp_smp2p_out: master-kernel { + qcom,entry-name = "master-kernel"; + #qcom,smem-state-cells = <1>; + }; + + cdsp_smp2p_in: slave-kernel { + qcom,entry-name = "slave-kernel"; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; }; #include "sdmmagpie-pinctrl.dtsi" #include "sdmmagpie-gdsc.dtsi" +#include "sdmmagpie-qupv3.dtsi" +#include "sdmmagpie-thermal.dtsi" &pcie_0_gdsc { status = "ok"; @@ -816,3 +1434,8 @@ &npu_core_gdsc { status = "ok"; }; + +#include "sdmmagpie-ion.dtsi" +#include "msm-arm-smmu-sdmmagpie.dtsi" +#include "sdmmagpie-pm.dtsi" +#include "sdmmagpie-regulator.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdmshrike-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdmshrike-audio-overlay.dtsi index d8a0f4e06c40251aa3bab48ae70225a6e0799f19..4e0d024e849473a54bdd4ae7371ec312d88aabda 100644 --- a/arch/arm64/boot/dts/qcom/sdmshrike-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sdmshrike-audio-overlay.dtsi @@ -125,6 +125,7 @@ qcom,codec-ext-clk-src = <2>; qcom,codec-lpass-ext-clk-freq = <19200000>; qcom,codec-lpass-clk-id = <278>; + qcom,use-pinctrl = <1>; pinctrl-names = "active", "sleep"; pinctrl-0 = <&quin_mi2s_mclk_active>; pinctrl-1 = <&quin_mi2s_mclk_sleep>; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dts b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dts new file mode 100644 index 0000000000000000000000000000000000000000..56d0e61a65d90886af65cefefbd2611a78365b9c --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdxprairie-cdp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDXPRAIRIE CDP"; + compatible = "qcom,sdxprairie-cdp", + "qcom,sdxprairie", "qcom,cdp"; + qcom,board-id = <1 0x102>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi similarity index 83% rename from arch/arm64/boot/dts/qcom/qcs405-mtp.dtsi rename to arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi index 406c57cfa407f79cb00e3015fc1aa9959f07158d..3f6f520f6007106340e8bb4313576b6e7e37e68f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi @@ -1,5 +1,4 @@ -/* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,4 +9,5 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include "qcs405.dtsi" + +#include "sdxprairie.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-ion.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-ion.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..f1fcf928746e627b6b53d50bd99a752df9379178 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-ion.dtsi @@ -0,0 +1,23 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +&soc { + qcom,ion { + compatible = "qcom,msm-ion"; + #address-cells = <1>; + #size-cells = <0>; + + system_heap: qcom,ion-heap@25 { + reg = <25>; + qcom,ion-heap-type = "SYSTEM"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dts b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dts new file mode 100644 index 0000000000000000000000000000000000000000..b2d5be0382cc59fca010cd53483ca8cfc2c3b770 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sdxprairie-mtp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDXPRAIRIE MTP"; + compatible = "qcom,sdxprairie-mtp", + "qcom,sdxprairie", "qcom,mtp"; + qcom,board-id = <8 0x102>; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi similarity index 79% rename from arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi rename to arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi index 2922b606d35a6c0155d22e3bbd6f8b294980c036..3f6f520f6007106340e8bb4313576b6e7e37e68f 100644 --- a/arch/arm64/boot/dts/qcom/qcs405-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi @@ -1,5 +1,4 @@ -/* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -11,6 +10,4 @@ * GNU General Public License for more details. */ -&smb1351_otg_supply { - qcom,charging-disabled; -}; +#include "sdxprairie.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..ab6e1c7a99cb8994d0bbe55a7380733f499910e0 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-pinctrl.dtsi @@ -0,0 +1,49 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + tlmm: pinctrl@f100000 { + compatible = "qcom,sdxprairie-pinctrl"; + reg = <0xf100000 0x300000>, + <0xb204900 0x280>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + interrupt-parent = <&intc>; + #interrupt-cells = <2>; + + uart3_console_active: uart3_console_active { + mux { + pins = "gpio8", "gpio9"; + function = "blsp_uart3"; + }; + config { + pins = "gpio8", "gpio9"; + drive-strength = <2>; + bias-disable; + }; + }; + + uart3_console_sleep: uart3_console_sleep { + mux { + pins = "gpio8", "gpio9"; + function = "blsp_uart3"; + }; + config { + pins = "gpio8", "gpio9"; + drive-strength = <2>; + bias-disable; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dts b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dts new file mode 100644 index 0000000000000000000000000000000000000000..9f94297a7c44355ee2867a78f0d1c888670374fd --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dts @@ -0,0 +1,23 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + + +#include "sdxprairie-rumi.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDXPRAIRIE RUMI"; + compatible = "qcom,sdxprairie-rumi", + "qcom,sdxprairie", "qcom,rumi"; + qcom,board-id = <15 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..3d05e32ac5cae7ed9f95922fda5d144f07289fa1 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi @@ -0,0 +1,27 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdxprairie.dtsi" + +&soc { + timer { + clock-frequency = <48000>; + }; + + timer@17820000 { + clock-frequency = <48000>; + }; +}; + +&ipa_hw { + qcom,ipa-hw-mode = <1>; /* IPA hw type = Virtual */ +}; diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..5c9007a353cc817c0321cc80d85b2d89050ee9d7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi @@ -0,0 +1,231 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include "skeleton.dtsi" +#include +#include + +/ { + model = "Qualcomm Technologies, Inc. SDXPRAIRIE"; + compatible = "qcom,sdxprairie"; + qcom,msm-id = <357 0x0>; + interrupt-parent = <&intc>; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + peripheral2_mem: peripheral2_region@8fd00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x8fd00000 0x300000>; + label = "peripheral2_mem"; + }; + + hyp_mem: hyp_region@8fc00000 { + no-map; + reg = <0x8fc00000 0x80000>; + label = "hyp_mem"; + }; + + mpss_adsp_mem: mpss_adsp_region@84000000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x84000000 0xb700000>; + label = "mpss_adsp_mem"; + }; + }; + + cpus { + #size-cells = <0>; + #address-cells = <1>; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x0>; + }; + }; + + soc: soc { }; +}; + + +&soc { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + intc: interrupt-controller@17800000 { + compatible = "qcom,msm-qgic2"; + interrupt-controller; + #interrupt-cells = <3>; + reg = <0x17800000 0x1000>, + <0x17802000 0x1000>; + }; + + timer { + compatible = "arm,armv7-timer"; + interrupts = <1 13 0xf08>, + <1 12 0xf08>, + <1 10 0xf08>, + <1 11 0xf08>; + clock-frequency = <19200000>; + }; + + timer@17820000 { + #address-cells = <1>; + #size-cells = <1>; + ranges; + compatible = "arm,armv7-timer-mem"; + reg = <0x17820000 0x1000>; + clock-frequency = <19200000>; + + frame@17821000 { + frame-number = <0>; + interrupts = <0 7 0x4>, + <0 6 0x4>; + reg = <0x17821000 0x1000>, + <0x17822000 0x1000>; + }; + + frame@17823000 { + frame-number = <1>; + interrupts = <0 8 0x4>; + reg = <0x17823000 0x1000>; + status = "disabled"; + }; + + frame@17824000 { + frame-number = <2>; + interrupts = <0 9 0x4>; + reg = <0x17824000 0x1000>; + status = "disabled"; + }; + + frame@17825000 { + frame-number = <3>; + interrupts = <0 10 0x4>; + reg = <0x17825000 0x1000>; + status = "disabled"; + }; + + frame@17826000 { + frame-number = <4>; + interrupts = <0 11 0x4>; + reg = <0x17826000 0x1000>; + status = "disabled"; + }; + + frame@17827000 { + frame-number = <5>; + interrupts = <0 12 0x4>; + reg = <0x17827000 0x1000>; + status = "disabled"; + }; + + frame@17828000 { + frame-number = <6>; + interrupts = <0 13 0x4>; + reg = <0x17828000 0x1000>; + status = "disabled"; + }; + + frame@17829000 { + frame-number = <7>; + interrupts = <0 14 0x4>; + reg = <0x17829000 0x1000>; + status = "disabled"; + }; + }; + + clock_gcc: qcom,gcc { + compatible = "qcom,dummycc"; + clock-output-names = "gcc_clocks"; + #clock-cells = <1>; + }; + + serial_uart: serial@831000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x831000 0x200>; + interrupts = <0 26 0>; + clocks = <&clock_gcc GCC_BLSP1_UART3_APPS_CLK>, + <&clock_gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&uart3_console_active>; + pinctrl-1 = <&uart3_console_sleep>; + status = "ok"; + }; + + qcom,msm_gsi { + compatible = "qcom,msm_gsi"; + }; + + ipa_hw: qcom,ipa@01e00000 { + compatible = "qcom,ipa"; + reg = <0x1e00000 0xc0000>, + <0x1e04000 0x23000>; + reg-names = "ipa-base", "gsi-base"; + interrupts = + <0 241 IRQ_TYPE_NONE>, + <0 47 IRQ_TYPE_NONE>; + interrupt-names = "ipa-irq", "gsi-irq"; + qcom,ipa-hw-ver = <17>; /* IPA core version = IPAv4.5 */ + qcom,ipa-hw-mode = <0>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,mhi-event-ring-id-limits = <9 10>; /* start and end */ + qcom,modem-cfg-emb-pipe-flt; + qcom,use-ipa-pm; + qcom,bandwidth-vote-for-ipa; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <5>; + qcom,msm-bus,num-paths = <4>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <90 512 0 0>, + <90 585 0 0>, + <1 676 0 0>, + <143 777 0 0>, + /* SVS2 */ + <90 512 900000 1800000>, + <90 585 300000 600000>, + <1 676 90000 179000>, /*gcc_config_noc_clk_src */ + <143 777 0 120>, /* IB defined for IPA2X_clk in MHz*/ + /* SVS */ + <90 512 1530000 3060000>, + <90 585 400000 800000>, + <1 676 100000 199000>, + <143 777 0 250>, /* IB defined for IPA2X_clk in MHz*/ + /* NOMINAL */ + <90 512 2592000 5184000>, + <90 585 800000 1600000>, + <1 676 200000 399000>, + <143 777 0 440>, /* IB defined for IPA2X_clk in MHz*/ + /* TURBO */ + <90 512 2592000 5184000>, + <90 585 960000 1920000>, + <1 676 266000 531000>, + <143 777 0 500>; /* IB defined for IPA clk in MHz*/ + qcom,bus-vector-names = "MIN", "SVS2", "SVS", "NOMINAL", + "TURBO"; + qcom,throughput-threshold = <310 600 1000>; + qcom,scaling-exceptions = <>; + }; +}; + +#include "sdxprairie-pinctrl.dtsi" +#include "sdxprairie-ion.dtsi" +#include "msm-arm-smmu-sdxprairie.dtsi" diff --git a/arch/arm64/boot/dts/qcom/skeleton.dtsi b/arch/arm64/boot/dts/qcom/skeleton.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..34eda68d9ea21dfd4a421298157b7b173ecf2731 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/skeleton.dtsi @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file is deprecated, and will be removed once existing users have been + * updated. New dts{,i} files should *not* include skeleton.dtsi, and should + * instead explicitly provide the below nodes only as required. + * + * Skeleton device tree; the bare minimum needed to boot; just include and + * add a compatible value. The bootloader will typically populate the memory + * node. + */ + +/ { + #address-cells = <1>; + #size-cells = <1>; + chosen { }; + aliases { }; + memory { device_type = "memory"; reg = <0 0>; }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi b/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..565c4ead011c361e09b7c06f1c3743ad21e027b7 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-bus.dtsi @@ -0,0 +1,1612 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +&soc { + ad_hoc_bus: ad-hoc-bus { + compatible = "qcom,msm-bus-device"; + reg = <0x1700000 0x40000>, + <0x1500000 0x40000>, + <0x9160000 0x40000>, + <0x9680000 0x60000>, + <0x1380000 0x40000>, + <0x1740000 0x40000>, + <0x1620000 0x40000>, + <0x1620000 0x40000>, + <0x1620000 0x40000>; + + reg-names = "aggre1_noc-base", "config_noc-base", + "dc_noc-base", "gem_noc-base", + "mc_virt-base", "mmss_noc-base", + "system_noc-base", "ipa_virt-base", + "camnoc_virt-base"; + + mbox-names = "apps_rsc", "disp_rsc"; + mboxes = <&apps_rsc 0 &disp_rsc 0>; + + /*RSCs*/ + rsc_apps: rsc-apps { + cell-id = ; + label = "apps_rsc"; + qcom,rsc-dev; + qcom,req_state = <2>; + }; + + rsc_disp: rsc-disp { + cell-id = ; + label = "disp_rsc"; + qcom,rsc-dev; + qcom,req_state = <3>; + }; + + /*BCMs*/ + bcm_acv: bcm-acv { + cell-id = ; + label = "ACV"; + qcom,bcm-name = "ACV"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_alc: bcm-alc { + cell-id = ; + label = "ALC"; + qcom,bcm-name = "ALC"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mc0: bcm-mc0 { + cell-id = ; + label = "MC0"; + qcom,bcm-name = "MC0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh0: bcm-sh0 { + cell-id = ; + label = "SH0"; + qcom,bcm-name = "SH0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm0: bcm-mm0 { + cell-id = ; + label = "MM0"; + qcom,bcm-name = "MM0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm1: bcm-mm1 { + cell-id = ; + label = "MM1"; + qcom,bcm-name = "MM1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh2: bcm-sh2 { + cell-id = ; + label = "SH2"; + qcom,bcm-name = "SH2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm2: bcm-mm2 { + cell-id = ; + label = "MM2"; + qcom,bcm-name = "MM2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sh3: bcm-sh3 { + cell-id = ; + label = "SH3"; + qcom,bcm-name = "SH3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mm3: bcm-mm3 { + cell-id = ; + label = "MM3"; + qcom,bcm-name = "MM3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn0: bcm-sn0 { + cell-id = ; + label = "SN0"; + qcom,bcm-name = "SN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ce0: bcm-ce0 { + cell-id = ; + label = "CE0"; + qcom,bcm-name = "CE0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_ip0: bcm-ip0 { + cell-id = ; + label = "IP0"; + qcom,bcm-name = "IP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_cn0: bcm-cn0 { + cell-id = ; + label = "CN0"; + qcom,bcm-name = "CN0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_qup0: bcm-qup0 { + cell-id = ; + label = "QUP0"; + qcom,bcm-name = "QUP0"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn1: bcm-sn1 { + cell-id = ; + label = "SN1"; + qcom,bcm-name = "SN1"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn2: bcm-sn2 { + cell-id = ; + label = "SN2"; + qcom,bcm-name = "SN2"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn3: bcm-sn3 { + cell-id = ; + label = "SN3"; + qcom,bcm-name = "SN3"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn4: bcm-sn4 { + cell-id = ; + label = "SN4"; + qcom,bcm-name = "SN4"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn5: bcm-sn5 { + cell-id = ; + label = "SN5"; + qcom,bcm-name = "SN5"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn8: bcm-sn8 { + cell-id = ; + label = "SN8"; + qcom,bcm-name = "SN8"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn9: bcm-sn9 { + cell-id = ; + label = "SN9"; + qcom,bcm-name = "SN9"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn12: bcm-sn12 { + cell-id = ; + label = "SN12"; + qcom,bcm-name = "SN12"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn13: bcm-sn13 { + cell-id = ; + label = "SN13"; + qcom,bcm-name = "SN13"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn14: bcm-sn14 { + cell-id = ; + label = "SN14"; + qcom,bcm-name = "SN14"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_sn15: bcm-sn15 { + cell-id = ; + label = "SN15"; + qcom,bcm-name = "SN15"; + qcom,rscs = <&rsc_apps>; + qcom,bcm-dev; + }; + + bcm_mc0_display: bcm-mc0_display { + cell-id = ; + label = "MC0_DISPLAY"; + qcom,bcm-name = "MC0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_sh0_display: bcm-sh0_display { + cell-id = ; + label = "SH0_DISPLAY"; + qcom,bcm-name = "SH0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm0_display: bcm-mm0_display { + cell-id = ; + label = "MM0_DISPLAY"; + qcom,bcm-name = "MM0"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm1_display: bcm-mm1_display { + cell-id = ; + label = "MM1_DISPLAY"; + qcom,bcm-name = "MM1"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm2_display: bcm-mm2_display { + cell-id = ; + label = "MM2_DISPLAY"; + qcom,bcm-name = "MM2"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + bcm_mm3_display: bcm-mm3_display { + cell-id = ; + label = "MM3_DISPLAY"; + qcom,bcm-name = "MM3"; + qcom,rscs = <&rsc_disp>; + qcom,bcm-dev; + }; + + /*Buses*/ + fab_aggre1_noc: fab-aggre1_noc { + cell-id = ; + label = "fab-aggre1_noc"; + qcom,fab-dev; + qcom,base-name = "aggre1_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <16384>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_camnoc_virt: fab-camnoc_virt { + cell-id = ; + label = "fab-camnoc_virt"; + qcom,fab-dev; + qcom,base-name = "camnoc_virt-base"; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_config_noc: fab-config_noc { + cell-id = ; + label = "fab-config_noc"; + qcom,fab-dev; + qcom,base-name = "config_noc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_dc_noc: fab-dc_noc { + cell-id = ; + label = "fab-dc_noc"; + qcom,fab-dev; + qcom,base-name = "dc_noc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_gem_noc: fab-gem_noc { + cell-id = ; + label = "fab-gem_noc"; + qcom,fab-dev; + qcom,base-name = "gem_noc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_ipa_virt: fab-ipa_virt { + cell-id = ; + label = "fab-ipa_virt"; + qcom,fab-dev; + qcom,base-name = "ipa_virt-base"; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mc_virt: fab-mc_virt { + cell-id = ; + label = "fab-mc_virt"; + qcom,fab-dev; + qcom,base-name = "mc_virt-base"; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mmss_noc: fab-mmss_noc { + cell-id = ; + label = "fab-mmss_noc"; + qcom,fab-dev; + qcom,base-name = "mmss_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <36864>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_system_noc: fab-system_noc { + cell-id = ; + label = "fab-system_noc"; + qcom,fab-dev; + qcom,base-name = "system_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <45056>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_gem_noc_display: fab-gem_noc_display { + cell-id = ; + label = "fab-gem_noc_display"; + qcom,fab-dev; + qcom,base-name = "gem_noc-base"; + qcom,qos-off = <128>; + qcom,base-offset = <176128>; + qcom,sbm-offset = <0>; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + fab_mc_virt_display: fab-mc_virt_display { + cell-id = ; + label = "fab-mc_virt_display"; + qcom,fab-dev; + qcom,base-name = "mc_virt-base"; + qcom,bypass-qos-prg; + clocks = <>; + }; + + fab_mmss_noc_display: fab-mmss_noc_display { + cell-id = ; + label = "fab-mmss_noc_display"; + qcom,fab-dev; + qcom,base-name = "mmss_noc-base"; + qcom,bypass-qos-prg; + qcom,bus-type = <1>; + clocks = <>; + }; + + /*Masters*/ + + mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg { + cell-id = ; + label = "mas-qhm-a1noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_aggre1_noc>; + qcom,bus-dev = <&fab_aggre1_noc>; + }; + + mas_qhm_qdss_bam: mas-qhm-qdss-bam { + cell-id = ; + label = "mas-qhm-qdss-bam"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <8>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qhm_qspi: mas-qhm-qspi { + cell-id = ; + label = "mas-qhm-qspi"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <19>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qhm_qup1: mas-qhm-qup1 { + cell-id = ; + label = "mas-qhm-qup1"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,qport = <14>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_qup0>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qnm_cnoc: mas-qnm-cnoc { + cell-id = ; + label = "mas-qnm-cnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + qcom,forwarding; + }; + + mas_qxm_crypto: mas-qxm-crypto { + cell-id = ; + label = "mas-qxm-crypto"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_ce0>; + qcom,ap-owned; + qcom,prio = <2>; + qcom,forwarding; + }; + + mas_qxm_ipa: mas-qxm-ipa { + cell-id = ; + label = "mas-qxm-ipa"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qns_lpass_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + qcom,forwarding; + }; + + mas_xm_pcie: mas-xm-pcie { + cell-id = ; + label = "mas-xm-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <15>; + qcom,connections = <&slv_qns_pcie_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_xm_qdss_etr: mas-xm-qdss-etr { + cell-id = ; + label = "mas-xm-qdss-etr"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <7>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_xm_sdc2: mas-xm-sdc2 { + cell-id = ; + label = "mas-xm-sdc2"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <18>; + qcom,connections = <&slv_qns_a1noc_snoc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; + }; + + mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-hf0-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-hf1-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp { + cell-id = ; + label = "mas-qxm-camnoc-sf-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_camnoc_uncomp>; + qcom,bus-dev = <&fab_camnoc_virt>; + qcom,bcms = <&bcm_mm1>; + }; + + mas_qhm_spdm: mas-qhm-spdm { + cell-id = ; + label = "mas-qhm-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_cnoc_a2noc>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + mas_qnm_snoc: mas-qnm-snoc { + cell-id = ; + label = "mas-qnm-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_tlmm_south + &slv_qhs_camera_cfg &slv_qhs_snoc_cfg + &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg + &slv_qhs_glm &slv_qhs_qdss_cfg + &slv_qhs_display_cfg &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_qhs_sdc2 + &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg + &slv_qhs_ipa &slv_qhs_clk_ctl + &slv_qhs_aop &slv_srvc_cnoc + &slv_qhs_ahb2phy_west &slv_qhs_cpr_cx + &slv_qhs_a1_noc_cfg &slv_qhs_aoss + &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg + &slv_qhs_spdm &slv_qhs_crypto0_cfg + &slv_qhs_pimem_cfg &slv_qhs_cpr_mx + &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + mas_xm_qdss_dap: mas-xm-qdss-dap { + cell-id = ; + label = "mas-xm-qdss-dap"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_tlmm_south + &slv_qhs_camera_cfg &slv_qhs_snoc_cfg + &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg + &slv_qhs_glm &slv_qhs_qdss_cfg + &slv_qhs_display_cfg &slv_qhs_tcsr + &slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc + &slv_qhs_sdc2 &slv_qhs_gpuss_cfg + &slv_qhs_venus_cfg &slv_qhs_ipa + &slv_qhs_clk_ctl &slv_qhs_aop + &slv_srvc_cnoc &slv_qhs_ahb2phy_west + &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg + &slv_qhs_aoss &slv_qhs_prng + &slv_qhs_vsense_ctrl_cfg &slv_qhs_spdm + &slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg + &slv_qhs_cpr_mx &slv_qhs_imem_cfg>; + qcom,bus-dev = <&fab_config_noc>; + }; + + mas_qhm_cnoc: mas-qhm-cnoc { + cell-id = ; + label = "mas-qhm-cnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qhs_llcc + &slv_qhs_dc_noc_gemnoc>; + qcom,bus-dev = <&fab_dc_noc>; + }; + + mas_acm_apps: mas-acm-apps { + cell-id = ; + label = "mas-acm-apps"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,qport = <96 98>; + qcom,connections = <&slv_qns_llcc &slv_qns_gem_noc_snoc + &slv_qns_sys_pcie>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,bcms = <&bcm_sh2>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_acm_gpu_tcu: mas-acm-gpu-tcu { + cell-id = ; + label = "mas-acm-gpu-tcu"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <352>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <6>; + }; + + mas_acm_sys_tcu: mas-acm-sys-tcu { + cell-id = ; + label = "mas-acm-sys-tcu"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <384>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <6>; + }; + + mas_qhm_gemnoc_cfg: mas-qhm-gemnoc-cfg { + cell-id = ; + label = "mas-qhm-gemnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_gemnoc + &slv_qhs_mdsp_ms_mpu_cfg>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + mas_qnm_gpu: mas-qnm-gpu { + cell-id = ; + label = "mas-qnm-gpu"; + qcom,buswidth = <32>; + qcom,agg-ports = <2>; + qcom,qport = <288 289>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qnm_mnoc_hf: mas-qnm-mnoc-hf { + cell-id = ; + label = "mas-qnm-mnoc-hf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <128>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qnm_mnoc_sf: mas-qnm-mnoc-sf { + cell-id = ; + label = "mas-qnm-mnoc-sf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <320>; + qcom,connections = <&slv_qns_llcc + &slv_qns_gem_noc_snoc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qnm_snoc_gc: mas-qnm-snoc-gc { + cell-id = ; + label = "mas-qnm-snoc-gc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <192>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qnm_snoc_sf: mas-qnm-snoc-sf { + cell-id = ; + label = "mas-qnm-snoc-sf"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,qport = <160>; + qcom,connections = <&slv_qns_llcc>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_ipa_core_master: mas-ipa-core-master { + cell-id = ; + label = "mas-ipa-core-master"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_ipa_core_slave>; + qcom,bus-dev = <&fab_ipa_virt>; + }; + + mas_llcc_mc: mas-llcc-mc { + cell-id = ; + label = "mas-llcc-mc"; + qcom,buswidth = <4>; + qcom,agg-ports = <4>; + qcom,connections = <&slv_ebi>; + qcom,bus-dev = <&fab_mc_virt>; + }; + + mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg { + cell-id = ; + label = "mas-qhm-mnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_mnoc>; + qcom,bus-dev = <&fab_mmss_noc>; + }; + + mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 { + cell-id = ; + label = "mas-qxm-camnoc-hf0"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 { + cell-id = ; + label = "mas-qxm-camnoc-hf1"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_camnoc_sf: mas-qxm-camnoc-sf { + cell-id = ; + label = "mas-qxm-camnoc-sf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <0>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm2>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_mdp0: mas-qxm-mdp0 { + cell-id = ; + label = "mas-qxm-mdp0"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_mem_noc_hf>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_rot: mas-qxm-rot { + cell-id = ; + label = "mas-qxm-rot"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <5>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_venus0: mas-qxm-venus0 { + cell-id = ; + label = "mas-qxm-venus0"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <6>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qxm_venus_arm9: mas-qxm-venus-arm9 { + cell-id = ; + label = "mas-qxm-venus-arm9"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <8>; + qcom,connections = <&slv_qns2_mem_noc>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + }; + + mas_qhm_snoc_cfg: mas-qhm-snoc-cfg { + cell-id = ; + label = "mas-qhm-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_srvc_snoc>; + qcom,bus-dev = <&fab_system_noc>; + }; + + mas_qnm_aggre1_noc: mas-qnm-aggre1-noc { + cell-id = ; + label = "mas-qnm-aggre1-noc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_gemnoc_sf &slv_qxs_pimem + &slv_xs_pcie &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn9>; + }; + + mas_qnm_gemnoc: mas-qnm-gemnoc { + cell-id = ; + label = "mas-qnm-gemnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qxs_pimem &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn15>; + }; + + mas_qnm_gemnoc_pcie: mas-qnm-gemnoc-pcie { + cell-id = ; + label = "mas-qnm-gemnoc-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_xs_pcie>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn8>; + }; + + mas_qnm_lpass_anoc: mas-qnm-lpass-anoc { + cell-id = ; + label = "mas-qnm-lpass-anoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_gemnoc_sf + &slv_qxs_pimem &slv_xs_pcie + &slv_qxs_imem &slv_qhs_apss + &slv_qns_cnoc &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn13>; + }; + + mas_qnm_pcie_anoc: mas-qnm-pcie-anoc { + cell-id = ; + label = "mas-qnm-pcie-anoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,connections = <&slv_qns_gemnoc_sf &slv_qxs_imem + &slv_qhs_apss &slv_qns_cnoc + &slv_xs_qdss_stm>; + qcom,bus-dev = <&fab_system_noc>; + }; + + mas_qxm_pimem: mas-qxm-pimem { + cell-id = ; + label = "mas-qxm-pimem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <1>; + qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn12>; + qcom,ap-owned; + qcom,prio = <2>; + qcom,forwarding; + }; + + mas_xm_gic: mas-xm-gic { + cell-id = ; + label = "mas-xm-gic"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,qport = <2>; + qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn12>; + qcom,ap-owned; + qcom,prio = <2>; + qcom,forwarding; + }; + + mas_alc: mas-alc { + cell-id = ; + label = "mas-alc"; + qcom,buswidth = <1>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mc_virt>; + qcom,bcms = <&bcm_alc>; + }; + + mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display { + cell-id = ; + label = "mas-qnm-mnoc-hf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <128>; + qcom,connections = <&slv_qns_llcc_display>; + qcom,bus-dev = <&fab_gem_noc_display>; + }; + + mas_qnm_mnoc_sf_display: mas-qnm-mnoc-sf_display { + cell-id = ; + label = "mas-qnm-mnoc-sf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <320>; + qcom,connections = <&slv_qns_llcc_display>; + qcom,bus-dev = <&fab_gem_noc_display>; + }; + + mas_llcc_mc_display: mas-llcc-mc_display { + cell-id = ; + label = "mas-llcc-mc_display"; + qcom,buswidth = <4>; + qcom,agg-ports = <4>; + qcom,connections = <&slv_ebi_display>; + qcom,bus-dev = <&fab_mc_virt_display>; + }; + + mas_qxm_mdp0_display: mas-qxm-mdp0_display { + cell-id = ; + label = "mas-qxm-mdp0_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <3>; + qcom,connections = <&slv_qns_mem_noc_hf_display>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,bcms = <&bcm_mm1_display>; + }; + + mas_qxm_rot_display: mas-qxm-rot_display { + cell-id = ; + label = "mas-qxm-rot_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,qport = <5>; + qcom,connections = <&slv_qns2_mem_noc_display>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,bcms = <&bcm_mm3_display>; + }; + + /*Internal nodes*/ + + /*Slaves*/ + + slv_qns_a1noc_snoc:slv-qns-a1noc-snoc { + cell-id = ; + label = "slv-qns-a1noc-snoc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,connections = <&mas_qnm_aggre1_noc>; + }; + + slv_srvc_aggre1_noc:slv-srvc-aggre1-noc { + cell-id = ; + label = "slv-srvc-aggre1-noc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,bcms = <&bcm_sn8>; + }; + + slv_qns_lpass_snoc:slv-qns-lpass-snoc { + cell-id = ; + label = "slv-qns-lpass-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,connections = <&mas_qnm_lpass_anoc>; + }; + + slv_qns_pcie_snoc:slv-qns-pcie-snoc { + cell-id = ; + label = "slv-qns-pcie-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,connections = <&mas_qnm_pcie_anoc>; + qcom,bcms = <&bcm_sn14>; + }; + + slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp { + cell-id = ; + label = "slv-qns-camnoc-uncomp"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_camnoc_virt>; + }; + + slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg { + cell-id = ; + label = "slv-qhs-a1-noc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_a1noc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ahb2phy_west:slv-qhs-ahb2phy-west { + cell-id = ; + label = "slv-qhs-ahb2phy-west"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_aop:slv-qhs-aop { + cell-id = ; + label = "slv-qhs-aop"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_aoss:slv-qhs-aoss { + cell-id = ; + label = "slv-qhs-aoss"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_camera_cfg:slv-qhs-camera-cfg { + cell-id = ; + label = "slv-qhs-camera-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_clk_ctl:slv-qhs-clk-ctl { + cell-id = ; + label = "slv-qhs-clk-ctl"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_cpr_cx:slv-qhs-cpr-cx { + cell-id = ; + label = "slv-qhs-cpr-cx"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_cpr_mx:slv-qhs-cpr-mx { + cell-id = ; + label = "slv-qhs-cpr-mx"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg { + cell-id = ; + label = "slv-qhs-crypto0-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg { + cell-id = ; + label = "slv-qhs-ddrss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_cnoc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_display_cfg:slv-qhs-display-cfg { + cell-id = ; + label = "slv-qhs-display-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_glm:slv-qhs-glm { + cell-id = ; + label = "slv-qhs-glm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_gpuss_cfg:slv-qhs-gpuss-cfg { + cell-id = ; + label = "slv-qhs-gpuss-cfg"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_imem_cfg:slv-qhs-imem-cfg { + cell-id = ; + label = "slv-qhs-imem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ipa:slv-qhs-ipa { + cell-id = ; + label = "slv-qhs-ipa"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_mnoc_cfg:slv-qhs-mnoc-cfg { + cell-id = ; + label = "slv-qhs-mnoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_mnoc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_pimem_cfg:slv-qhs-pimem-cfg { + cell-id = ; + label = "slv-qhs-pimem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_prng:slv-qhs-prng { + cell-id = ; + label = "slv-qhs-prng"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_qdss_cfg:slv-qhs-qdss-cfg { + cell-id = ; + label = "slv-qhs-qdss-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_sdc2:slv-qhs-sdc2 { + cell-id = ; + label = "slv-qhs-sdc2"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_snoc_cfg:slv-qhs-snoc-cfg { + cell-id = ; + label = "slv-qhs-snoc-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qhm_snoc_cfg>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_spdm:slv-qhs-spdm { + cell-id = ; + label = "slv-qhs-spdm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tcsr:slv-qhs-tcsr { + cell-id = ; + label = "slv-qhs-tcsr"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_tlmm_south:slv-qhs-tlmm-south { + cell-id = ; + label = "slv-qhs-tlmm-south"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_ufs_mem_cfg:slv-qhs-ufs-mem-cfg { + cell-id = ; + label = "slv-qhs-ufs-mem-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_venus_cfg:slv-qhs-venus-cfg { + cell-id = ; + label = "slv-qhs-venus-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg { + cell-id = ; + label = "slv-qhs-vsense-ctrl-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qns_cnoc_a2noc:slv-qns-cnoc-a2noc { + cell-id = ; + label = "slv-qns-cnoc-a2noc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,connections = <&mas_qnm_cnoc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_srvc_cnoc:slv-srvc-cnoc { + cell-id = ; + label = "slv-srvc-cnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_config_noc>; + qcom,bcms = <&bcm_cn0>; + }; + + slv_qhs_dc_noc_gemnoc:slv-qhs-dc-noc-gemnoc { + cell-id = ; + label = "slv-qhs-dc-noc-gemnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_dc_noc>; + qcom,connections = <&mas_qhm_gemnoc_cfg>; + }; + + slv_qhs_llcc:slv-qhs-llcc { + cell-id = ; + label = "slv-qhs-llcc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_dc_noc>; + }; + + slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg { + cell-id = ; + label = "slv-qhs-mdsp-ms-mpu-cfg"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + slv_qns_gem_noc_snoc:slv-qns-gem-noc-snoc { + cell-id = ; + label = "slv-qns-gem-noc-snoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,connections = <&mas_qnm_gemnoc>; + qcom,bcms = <&bcm_sh3>; + }; + + slv_qns_llcc:slv-qns-llcc { + cell-id = ; + label = "slv-qns-llcc"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,connections = <&mas_llcc_mc>; + qcom,bcms = <&bcm_sh0>; + }; + + slv_qns_sys_pcie:slv-qns-sys-pcie { + cell-id = ; + label = "slv-qns-sys-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + qcom,connections = <&mas_qnm_gemnoc_pcie>; + }; + + slv_srvc_gemnoc:slv-srvc-gemnoc { + cell-id = ; + label = "slv-srvc-gemnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc>; + }; + + slv_ipa_core_slave:slv-ipa-core-slave { + cell-id = ; + label = "slv-ipa-core-slave"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_ipa_virt>; + qcom,bcms = <&bcm_ip0>; + }; + + slv_ebi:slv-ebi { + cell-id = ; + label = "slv-ebi"; + qcom,buswidth = <4>; + qcom,agg-ports = <4>; + qcom,bus-dev = <&fab_mc_virt>; + qcom,bcms = <&bcm_mc0>, <&bcm_acv>; + }; + + slv_qns2_mem_noc:slv-qns2-mem-noc { + cell-id = ; + label = "slv-qns2-mem-noc"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,connections = <&mas_qnm_mnoc_sf>; + qcom,bcms = <&bcm_mm2>; + }; + + slv_qns_mem_noc_hf:slv-qns-mem-noc-hf { + cell-id = ; + label = "slv-qns-mem-noc-hf"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc>; + qcom,connections = <&mas_qnm_mnoc_hf>; + qcom,bcms = <&bcm_mm0>; + }; + + slv_srvc_mnoc:slv-srvc-mnoc { + cell-id = ; + label = "slv-srvc-mnoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc>; + }; + + slv_qhs_apss:slv-qhs-apss { + cell-id = ; + label = "slv-qhs-apss"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_qns_cnoc:slv-qns-cnoc { + cell-id = ; + label = "slv-qns-cnoc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc>; + qcom,bcms = <&bcm_sn3>; + }; + + slv_qns_gemnoc_sf:slv-qns-gemnoc-sf { + cell-id = ; + label = "slv-qns-gemnoc-sf"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc_sf>; + qcom,bcms = <&bcm_sn0>; + }; + + slv_qns_memnoc_gc:slv-qns-memnoc-gc { + cell-id = ; + label = "slv-qns-memnoc-gc"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,connections = <&mas_qnm_snoc_gc>; + qcom,bcms = <&bcm_sn2>; + }; + + slv_qxs_imem:slv-qxs-imem { + cell-id = ; + label = "slv-qxs-imem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn1>; + }; + + slv_qxs_pimem:slv-qxs-pimem { + cell-id = ; + label = "slv-qxs-pimem"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn4>; + }; + + slv_srvc_snoc:slv-srvc-snoc { + cell-id = ; + label = "slv-srvc-snoc"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_xs_pcie:slv-xs-pcie { + cell-id = ; + label = "slv-xs-pcie"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn8>; + }; + + slv_xs_qdss_stm:slv-xs-qdss-stm { + cell-id = ; + label = "slv-xs-qdss-stm"; + qcom,buswidth = <4>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + qcom,bcms = <&bcm_sn5>; + }; + + slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg { + cell-id = ; + label = "slv-xs-sys-tcu-cfg"; + qcom,buswidth = <8>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_system_noc>; + }; + + slv_qns_llcc_display:slv-qns-llcc_display { + cell-id = ; + label = "slv-qns-llcc_display"; + qcom,buswidth = <16>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_gem_noc_display>; + qcom,connections = <&mas_llcc_mc_display>; + qcom,bcms = <&bcm_sh0_display>; + }; + + slv_ebi_display:slv-ebi_display { + cell-id = ; + label = "slv-ebi_display"; + qcom,buswidth = <4>; + qcom,agg-ports = <4>; + qcom,bus-dev = <&fab_mc_virt_display>; + qcom,bcms = <&bcm_mc0_display>; + }; + + slv_qns2_mem_noc_display:slv-qns2-mem-noc_display { + cell-id = ; + label = "slv-qns2-mem-noc_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,connections = <&mas_qnm_mnoc_sf_display>; + qcom,bcms = <&bcm_mm2_display>; + }; + + slv_qns_mem_noc_hf_display:slv-qns-mem-noc-hf_display { + cell-id = ; + label = "slv-qns-mem-noc-hf_display"; + qcom,buswidth = <32>; + qcom,agg-ports = <1>; + qcom,bus-dev = <&fab_mmss_noc_display>; + qcom,connections = <&mas_qnm_mnoc_hf_display>; + qcom,bcms = <&bcm_mm0_display>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi index d4db086685f50460ce0a7eea24728c0a13a77350..836f84bc261b62c9262562152c65375ede04b7ee 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-cdp.dtsi @@ -20,3 +20,12 @@ &pm6150_charger { qcom,batteryless-platform; }; + +&pm6150l_wled { + qcom,string-cfg= <7>; + status = "ok"; +}; + +&pm6150l_lcdb { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-gdsc.dtsi b/arch/arm64/boot/dts/qcom/sm6150-gdsc.dtsi index 5548c3c0c9ea33cd16054a3f2dd0d6b2f43d261e..0c9ffdc43afea171ee50625cb9371b5af20aacf9 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-gdsc.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-gdsc.dtsi @@ -14,7 +14,7 @@ &soc { /* GDSCs in Global CC */ emac_gdsc: qcom,gdsc@106004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "emac_gdsc"; reg = <0x106004 0x4>; qcom,poll-cfg-gdscr; @@ -22,7 +22,7 @@ }; pcie_0_gdsc: qcom,gdsc@16b004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "pcie_0_gdsc"; reg = <0x16b004 0x4>; qcom,poll-cfg-gdscr; @@ -30,7 +30,7 @@ }; ufs_phy_gdsc: qcom,gdsc@177004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "ufs_phy_gdsc"; reg = <0x177004 0x4>; qcom,poll-cfg-gdscr; @@ -38,7 +38,7 @@ }; usb20_sec_gdsc: qcom,gdsc@1a6004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "usb20_sec_gdsc"; reg = <0x1a6004 0x4>; qcom,poll-cfg-gdscr; @@ -46,7 +46,7 @@ }; usb30_prim_gdsc: qcom,gdsc@10f004 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "usb30_prim_gdsc"; reg = <0x10f004 0x4>; qcom,poll-cfg-gdscr; @@ -54,7 +54,7 @@ }; hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc: qcom,gdsc@17d040 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc"; reg = <0x17d040 0x4>; qcom,no-status-check-on-disable; @@ -63,7 +63,7 @@ }; hlos1_vote_aggre_noc_mmu_tbu1_gdsc: qcom,gdsc@17d044 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc"; reg = <0x17d044 0x4>; qcom,no-status-check-on-disable; @@ -72,7 +72,7 @@ }; hlos1_vote_aggre_noc_mmu_tbu2_gdsc: qcom,gdsc@17d048 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc"; reg = <0x17d048 0x4>; qcom,no-status-check-on-disable; @@ -81,7 +81,7 @@ }; hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc: qcom,gdsc@17d04c { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc"; reg = <0x17d04c 0x4>; qcom,no-status-check-on-disable; @@ -90,7 +90,7 @@ }; hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc"; reg = <0x17d050 0x4>; qcom,no-status-check-on-disable; @@ -99,7 +99,7 @@ }; hlos1_vote_mmnoc_mmu_tbu_sf_gdsc: qcom,gdsc@17d054 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc"; reg = <0x17d054 0x4>; qcom,no-status-check-on-disable; @@ -108,7 +108,7 @@ }; hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d058 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc"; reg = <0x17d058 0x4>; qcom,no-status-check-on-disable; @@ -116,24 +116,6 @@ status = "disabled"; }; - hlos1_vote_turing_mmu_tbu0_gdsc: qcom,gdsc@17d05c { - compatible = "regulator-fixed"; - regulator-name = "hlos1_vote_turing_mmu_tbu0_gdsc"; - reg = <0x17d05c 0x4>; - qcom,no-status-check-on-disable; - qcom,gds-timeout = <500>; - status = "disabled"; - }; - - hlos1_vote_turing_mmu_tbu1_gdsc: qcom,gdsc@17d060 { - compatible = "regulator-fixed"; - regulator-name = "hlos1_vote_turing_mmu_tbu1_gdsc"; - reg = <0x17d060 0x4>; - qcom,no-status-check-on-disable; - qcom,gds-timeout = <500>; - status = "disabled"; - }; - /* GDSCs in Camera CC */ bps_gdsc: qcom,gdsc@ad06004 { compatible = "regulator-fixed"; @@ -194,7 +176,7 @@ }; gpu_cx_gdsc: qcom,gdsc@509106c { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "gpu_cx_gdsc"; reg = <0x509106c 0x4>; hw-ctrl-addr = <&gpu_cx_hw_ctrl>; @@ -205,7 +187,7 @@ }; gpu_gx_gdsc: qcom,gdsc@509100c { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "gpu_gx_gdsc"; reg = <0x509100c 0x4>; qcom,poll-cfg-gdscr; @@ -214,7 +196,7 @@ /* GDSCs in Video CC */ vcodec0_gdsc: qcom,gdsc@ab00874 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "vcodec0_gdsc"; reg = <0xab00874 0x4>; qcom,poll-cfg-gdscr; @@ -222,7 +204,7 @@ }; venus_gdsc: qcom,gdsc@ab00814 { - compatible = "regulator-fixed"; + compatible = "qcom,gdsc"; regulator-name = "venus_gdsc"; reg = <0xab00814 0x4>; qcom,poll-cfg-gdscr; diff --git a/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..b93dcdce1044466a08c039c8efd1ae917435a07b --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm6150-gpu.dtsi @@ -0,0 +1,216 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + msm_gpu: qcom,kgsl-3d0@5000000 { + label = "kgsl-3d0"; + compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d"; + status = "ok"; + reg = <0x5000000 0x40000>; + reg-names = "kgsl_3d0_reg_memory"; + interrupts = <0 300 0>; + interrupt-names = "kgsl_3d0_irq"; + qcom,id = <0>; + + qcom,chipid = <0x06000800>; + + qcom,initial-pwrlevel = <5>; + + /* */ + qcom,idle-timeout = <80>; + qcom,no-nap; + + qcom,highest-bank-bit = <14>; + qcom,ubwc-mode = <2>; + qcom,min-access-length = <32>; + + /* size in bytes */ + qcom,snapshot-size = <1048576>; + + /* base addr, size */ + qcom,gpu-qdss-stm = <0x06900000 0x40000>; + #cooling-cells = <2>; + + clocks = <&clock_gpucc GPU_CC_GX_GFX3D_CLK>, + <&clock_gpucc GPU_CC_CXO_CLK>, + <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, + <&clock_gpucc GPU_CC_AHB_CLK>, + <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, + <&clock_gpucc GPU_CC_CX_SNOC_DVM_CLK>, + <&clock_gpucc GPU_CC_CX_GMU_CLK>; + + clock-names = "core_clk", "rbbmtimer_clk", "mem_clk", + "iface_clk", "mem_iface_clk", + "alt_mem_iface_clk", "gmu_clk"; + + /* Bus Scale Settings */ + qcom,msm-bus,name = "grp3d"; + qcom,msm-bus,num-cases = <12>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <26 512 0 0>, + <26 512 0 400000>, /* 1 bus=100 (Low SVS) */ + <26 512 0 800000>, /* 2 bus=200 (Low SVS) */ + <26 512 0 1200000>, /* 3 bus=300 (Low SVS) */ + <26 512 0 1804000>, /* 4 bus=451.2 (Low SVS) */ + <26 512 0 2188000>, /* 5 bus=547.2 (Low SVS) */ + <26 512 0 2726000>, /* 6 bus=681.6 (SVS) */ + <26 512 0 3072000>, /* 7 bus=768 (SVS) */ + <26 512 0 4070000>, /* 8 bus=1017.6 (SVS L1) */ + <26 512 0 5414000>, /* 9 bus=1353.6 (NOM) */ + <26 512 0 6220000>, /* 10 bus=1555.2 (NOM) */ + <26 512 0 7219000>; /* 11 bus=1804.8 (TURBO) */ + + /* GDSC regulator names */ + regulator-names = "vddcx", "vdd"; + /* GDSC oxili regulators */ + vddcx-supply = <&gpu_cx_gdsc>; + vdd-supply = <&gpu_gx_gdsc>; + + /* CPU latency parameter */ + qcom,pm-qos-active-latency = <67>; + qcom,pm-qos-wakeup-latency = <67>; + + /* Enable context aware freq. scaling */ + qcom,enable-ca-jump; + /* Context aware jump busy penalty in us */ + qcom,ca-busy-penalty = <12000>; + /* Context aware jump target power level */ + qcom,ca-target-pwrlevel = <3>; + + /* GPU Mempools */ + qcom,gpu-mempools { + #address-cells = <1>; + #size-cells = <0>; + compatible = "qcom,gpu-mempools"; + + /* 4K Page Pool configuration */ + qcom,gpu-mempool@0 { + reg = <0>; + qcom,mempool-page-size = <4096>; + qcom,mempool-allocate; + }; + /* 8K Page Pool configuration */ + qcom,gpu-mempool@1 { + reg = <1>; + qcom,mempool-page-size = <8192>; + qcom,mempool-allocate; + }; + /* 64K Page Pool configuration */ + qcom,gpu-mempool@2 { + reg = <2>; + qcom,mempool-page-size = <65536>; + qcom,mempool-reserved = <256>; + }; + /* 1M Page Pool configuration */ + qcom,gpu-mempool@3 { + reg = <3>; + qcom,mempool-page-size = <1048576>; + qcom,mempool-reserved = <32>; + }; + }; + + qcom,gpu-pwrlevels { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "qcom,gpu-pwrlevels"; + + /* TURBO */ + qcom,gpu-pwrlevel@0 { + reg = <0>; + qcom,gpu-freq = <845000000>; + qcom,bus-freq = <11>; + qcom,bus-min = <10>; + qcom,bus-max = <11>; + }; + + /* NOM L1 */ + qcom,gpu-pwrlevel@1 { + reg = <1>; + qcom,gpu-freq = <706000000>; + qcom,bus-freq = <10>; + qcom,bus-min = <9>; + qcom,bus-max = <11>; + }; + + /* NOM */ + qcom,gpu-pwrlevel@2 { + reg = <2>; + qcom,gpu-freq = <645000000>; + qcom,bus-freq = <9>; + qcom,bus-min = <8>; + qcom,bus-max = <10>; + }; + + /* SVS L1 */ + qcom,gpu-pwrlevel@3 { + reg = <3>; + qcom,gpu-freq = <513000000>; + qcom,bus-freq = <8>; + qcom,bus-min = <7>; + qcom,bus-max = <9>; + }; + + /* SVS */ + qcom,gpu-pwrlevel@4 { + reg = <4>; + qcom,gpu-freq = <400000000>; + qcom,bus-freq = <7>; + qcom,bus-min = <5>; + qcom,bus-max = <8>; + }; + + /* Low SVS */ + qcom,gpu-pwrlevel@5 { + reg = <5>; + qcom,gpu-freq = <290000000>; + qcom,bus-freq = <4>; + qcom,bus-min = <4>; + qcom,bus-max = <5>; + }; + + /* XO */ + qcom,gpu-pwrlevel@6 { + reg = <6>; + qcom,gpu-freq = <0>; + qcom,bus-freq = <0>; + qcom,bus-min = <0>; + qcom,bus-max = <0>; + }; + }; + }; + + kgsl_msm_iommu: qcom,kgsl-iommu@0x050a0000 { + compatible = "qcom,kgsl-smmu-v2"; + + reg = <0x050a0000 0x10000>; + qcom,protect = <0xa0000 0x10000>; + + clocks =<&clock_gcc GCC_GPU_CFG_AHB_CLK>, + <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, + <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>, + <&clock_gpucc GPU_CC_CX_SNOC_DVM_CLK>; + + clock-names = "iface_clk", "mem_clk", "mem_iface_clk", + "alt_mem_iface_clk"; + + qcom,retention; + + gfx3d_user: gfx3d_user { + compatible = "qcom,smmu-kgsl-cb"; + label = "gfx3d_user"; + iommus = <&kgsl_smmu 0x0 0x401>; + qcom,gpu-offset = <0xa8000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts index 4a3b42b71f7006b9f41d7471fe4f0f88e6772493..ae891836ed6b78cb8d7f9201bfdfc6534dc0dded 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-idp-overlay.dts @@ -20,5 +20,6 @@ / { model = "Qualcomm Technologies, Inc. SM6150 IDP"; compatible = "qcom,sm6150-idp", "qcom,sm6150", "qcom,idp"; + qcom,msm-id = <355 0x0>; qcom,board-id = <34 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi index 83cbf89a00a70608c4fffd2f17e3cde39995f84f..f9f7bc31ec86da2de869b9848855955b8ee3da2f 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-idp.dtsi @@ -16,3 +16,12 @@ &qupv3_se0_2uart { status = "ok"; }; + +&pm6150l_wled { + qcom,string-cfg= <7>; + status = "ok"; +}; + +&pm6150l_lcdb { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-ion.dtsi b/arch/arm64/boot/dts/qcom/sm6150-ion.dtsi index a8bfab8603ec6535fd3177cccda2bff14f5736f6..f89f20ef5c376060fde6dc2381e1b0d1dfb80f74 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-ion.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-ion.dtsi @@ -21,12 +21,6 @@ qcom,ion-heap-type = "SYSTEM"; }; - qcom,ion-heap@22 { /* ADSP HEAP */ - reg = <22>; - memory-region = <&adsp_mem>; - qcom,ion-heap-type = "DMA"; - }; - qcom,ion-heap@27 { /* QSEECOM HEAP */ reg = <27>; memory-region = <&qseecom_mem>; diff --git a/arch/arm64/boot/dts/qcom/sm6150-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm6150-mtp.dtsi index 83cbf89a00a70608c4fffd2f17e3cde39995f84f..f9f7bc31ec86da2de869b9848855955b8ee3da2f 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-mtp.dtsi @@ -16,3 +16,12 @@ &qupv3_se0_2uart { status = "ok"; }; + +&pm6150l_wled { + qcom,string-cfg= <7>; + status = "ok"; +}; + +&pm6150l_lcdb { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi index 1126ddac9394d8e0fd9b52cd9d99fdea51e094ba..5e62775e076181e7eb2749f499d1089a0efa9215 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pinctrl.dtsi @@ -20,6 +20,52 @@ interrupt-controller; #interrupt-cells = <2>; + ufs_dev_reset_assert: ufs_dev_reset_assert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * UFS_RESET driver strengths are having + * different values/steps compared to typical + * GPIO drive strengths. + * + * Following table clarifies: + * + * HDRV value | UFS_RESET | Typical GPIO + * (dec) | (mA) | (mA) + * 0 | 0.8 | 2 + * 1 | 1.55 | 4 + * 2 | 2.35 | 6 + * 3 | 3.1 | 8 + * 4 | 3.9 | 10 + * 5 | 4.65 | 12 + * 6 | 5.4 | 14 + * 7 | 6.15 | 16 + * + * POR value for UFS_RESET HDRV is 3 which means + * 3.1mA and we want to use that. Hence just + * specify 8mA to "drive-strength" binding and + * that should result into writing 3 to HDRV + * field. + */ + drive-strength = <8>; /* default: 3.1 mA */ + output-low; /* active low reset */ + }; + }; + + ufs_dev_reset_deassert: ufs_dev_reset_deassert { + config { + pins = "ufs_reset"; + bias-pull-down; /* default: pull down */ + /* + * default: 3.1 mA + * check comments under ufs_dev_reset_assert + */ + drive-strength = <8>; + output-high; /* active low reset */ + }; + }; + /* QUPv3_0 South SE mappings */ /* SE 0 pin mappings */ qupv3_se0_2uart_pins: qupv3_se0_2uart_pins { diff --git a/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi b/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi index 303f0634f01b620c3ebdcabfde971222d674544e..e15beb75a9712b09bbf3a873fdaffb2bb1ac5e88 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-pm.dtsi @@ -165,6 +165,7 @@ compatible = "qcom,rpm-stats"; reg = <0xc300000 0x1000>, <0xc3f0004 0x4>; reg-names = "phys_addr_base", "offset_addr"; + qcom,num-records = <3>; }; qcom,rpmh-master-stats@b221200 { diff --git a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi index 184c771ea70e4e07d7cc375f40477968d5e5b503..ee3ac333f7066a50138b3ccd6e8909853fc82078 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-regulator.dtsi @@ -43,6 +43,14 @@ qcom,init-voltage-level = ; }; + + mx_cdev: mx-cdev-lvl { + compatible = "qcom,regulator-cooling-device"; + regulator-cdev-supply = <&VDD_MX_LEVEL>; + regulator-levels = ; + #cooling-cells = <2>; + }; }; /* pm6150 S1 - VDD_CX supply */ @@ -78,6 +86,13 @@ ; qcom,min-dropout-voltage-level = <(-1)>; }; + + cx_cdev: regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "cx"; + #cooling-cells = <2>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts index 7b5115ecdb68ae2562d0648efd11cf15e0cf5c72..47b69d4655115b69ebc0d10dd0b1dbb652754ccd 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm6150-rumi-overlay.dts @@ -20,5 +20,6 @@ / { model = "Qualcomm Technologies, Inc. SM6150 RUMI"; compatible = "qcom,sm6150-rumi", "qcom,sm6150", "qcom,rumi"; + qcom,msm-id = <355 0x0>; qcom,board-id = <15 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-rumi.dtsi b/arch/arm64/boot/dts/qcom/sm6150-rumi.dtsi index 51ec825cb9ef77c2157d33f14877f0eccf01226e..3d8b2bddefea3392520e8c6f7ccf92e9cd4294ed 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-rumi.dtsi @@ -72,6 +72,40 @@ status = "disabled"; }; +&ufsphy_mem { + compatible = "qcom,ufs-phy-qrbtc-sdm845"; + + vdda-phy-supply = <&pm6150_l4>; /* 0.88v */ + vdda-pll-supply = <&pm6150_l11>; /* 1.2v */ + vdda-phy-max-microamp = <51400>; + vdda-pll-max-microamp = <14200>; + + status = "ok"; +}; + +&ufshc_mem { + limit-tx-hs-gear = <1>; + limit-rx-hs-gear = <1>; + scsi-cmd-timeout = <300000>; + + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm6150l_l11>; + vccq2-supply = <&pm6150_l12>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm6150l_l3>; + qcom,vddp-ref-clk-max-microamp = <100>; + qcom,vddp-ref-clk-min-uV = <1232000>; + qcom,vddp-ref-clk-max-uV = <1260000>; + + qcom,disable-lpm; + rpm-level = <0>; + spm-level = <0>; + status = "ok"; +}; + &spmi_bus { status = "disabled"; }; diff --git a/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi b/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi index 37bae76fa1e49f3fb519693bf86c9dc3b7a4024a..95a8366d4b00fdbf00ad96bb008ddb4ea7bc1480 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150-thermal.dtsi @@ -13,6 +13,59 @@ #include +&soc { + qmi-tmd-devices { + compatible = "qcom,qmi-cooling-devices"; + + modem { + qcom,instance-id = <0x0>; + + modem_pa: modem_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem_proc: modem_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem_current: modem_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem_skin: modem_skin { + qcom,qmi-dev-name = "modem_skin"; + #cooling-cells = <2>; + }; + + modem_vdd: modem_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + adsp { + qcom,instance-id = <0x1>; + + adsp_vdd: adsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + cdsp { + qcom,instance-id = <0x43>; + + cdsp_vdd: cdsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; +}; + &thermal_zones { aoss-usr { polling-delay-passive = <0>; @@ -84,7 +137,7 @@ }; }; - apc1-cpu0-usr { + cpu-1-0-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 5>; @@ -98,7 +151,7 @@ }; }; - apc1-cpu1-usr { + cpu-1-1-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 6>; @@ -112,7 +165,7 @@ }; }; - apc1-cpu2-usr { + cpu-1-2-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 7>; @@ -126,7 +179,7 @@ }; }; - apc1-cpu3-usr { + cpu-1-3-usr { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&tsens0 8>; diff --git a/arch/arm64/boot/dts/qcom/sm6150.dtsi b/arch/arm64/boot/dts/qcom/sm6150.dtsi index e384dd622a7c28560476a95321a9a83a2d342dc1..eb18da4b791624b20f75129ba863d7da79e2f414 100644 --- a/arch/arm64/boot/dts/qcom/sm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6150.dtsi @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ #include #include #include +#include / { model = "Qualcomm Technologies, Inc. SM6150"; @@ -30,6 +32,7 @@ interrupt-parent = <&pdc>; aliases { + ufshc1 = &ufshc_mem; /* Embedded UFS slot */ serial0 = &qupv3_se0_2uart; sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ sdhc2 = &sdhc_2; /* SDC2 SD Card slot */ @@ -53,7 +56,7 @@ next-level-cache = <&L2_0>; L2_0: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; @@ -66,16 +69,16 @@ L1_I_0: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; L1_D_0: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_0: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_0: l2-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -85,26 +88,26 @@ reg = <0x0 0x100>; enable-method = "psci"; cache-size = <0x8000>; - next-level-cache = <&L2_1>; - L2_1: l2-cache { + next-level-cache = <&L2_100>; + L2_100: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; }; - L1_I_1: l1-icache { + L1_I_100: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; - L1_D_1: l1-dcache { + L1_D_100: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_1: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_100: l1-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -115,26 +118,26 @@ reg = <0x0 0x200>; enable-method = "psci"; cache-size = <0x8000>; - next-level-cache = <&L2_2>; - L2_2: l2-cache { + next-level-cache = <&L2_200>; + L2_200: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; }; - L1_I_2: l1-icache { + L1_I_200: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; - L1_D_2: l1-dcache { + L1_D_200: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_2: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_200: l1-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -144,26 +147,26 @@ reg = <0x0 0x300>; enable-method = "psci"; cache-size = <0x8000>; - next-level-cache = <&L2_3>; - L2_3: l2-cache { + next-level-cache = <&L2_300>; + L2_300: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; }; - L1_I_3: l1-icache { + L1_I_300: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; - L1_D_3: l1-dcache { + L1_D_300: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_3: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_300: l1-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -173,26 +176,26 @@ reg = <0x0 0x400>; enable-method = "psci"; cache-size = <0x8000>; - next-level-cache = <&L2_4>; - L2_4: l2-cache { + next-level-cache = <&L2_400>; + L2_400: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; }; - L1_I_4: l1-icache { + L1_I_400: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; - L1_D_4: l1-dcache { + L1_D_400: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_4: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_400: l1-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -202,26 +205,26 @@ reg = <0x0 0x500>; enable-method = "psci"; cache-size = <0x8000>; - next-level-cache = <&L2_5>; - L2_5: l2-cache { + next-level-cache = <&L2_500>; + L2_500: l2-cache { compatible = "arm,arch-cache"; - cache-size = <0x20000>; + cache-size = <0x10000>; cache-level = <2>; next-level-cache = <&L3_0>; }; - L1_I_5: l1-icache { + L1_I_500: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x8800>; }; - L1_D_5: l1-dcache { + L1_D_500: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0xa000>; + qcom,dump-size = <0x9000>; }; - L1_TLB_5: l1-tlb { - qcom,dump-size = <0x3000>; + L2_TLB_500: l1-tlb { + qcom,dump-size = <0x5000>; }; }; @@ -231,26 +234,35 @@ reg = <0x0 0x600>; enable-method = "psci"; cache-size = <0x10000>; - next-level-cache = <&L2_6>; - L2_6: l2-cache { + next-level-cache = <&L2_600>; + L2_600: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x40000>; cache-level = <2>; next-level-cache = <&L3_0>; + qcom,dump-size = <0x48000>; }; - L1_I_100: l1-icache { + L1_I_600: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0x14000>; + qcom,dump-size = <0x11000>; }; - L1_D_100: l1-dcache { + L1_D_600: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0x14000>; + qcom,dump-size = <0x12000>; }; - L1_TLB_100: l1-tlb { - qcom,dump-size = <0x3c00>; + L1_ITLB_600: l1-itlb { + qcom,dump-size = <0x300>; + }; + + L1_DTLB_600: l1-dtlb { + qcom,dump-size = <0x480>; + }; + + L2_TLB_600: l2-tlb { + qcom,dump-size = <0x7800>; }; }; @@ -260,26 +272,35 @@ reg = <0x0 0x700>; enable-method = "psci"; cache-size = <0x10000>; - next-level-cache = <&L2_7>; - L2_7: l2-cache { + next-level-cache = <&L2_700>; + L2_700: l2-cache { compatible = "arm,arch-cache"; cache-size = <0x40000>; cache-level = <2>; next-level-cache = <&L3_0>; + qcom,dump-size = <0x48000>; }; - L1_I_200: l1-icache { + L1_I_700: l1-icache { compatible = "arm,arch-cache"; - qcom,dump-size = <0x14000>; + qcom,dump-size = <0x11000>; }; - L1_D_200: l1-dcache { + L1_D_700: l1-dcache { compatible = "arm,arch-cache"; - qcom,dump-size = <0x14000>; + qcom,dump-size = <0x12000>; }; - L1_TLB_200: l1-tlb { - qcom,dump-size = <0x3c00>; + L1_ITLB_700: l1-itlb { + qcom,dump-size = <0x300>; + }; + + L1_DTLB_700: l1-dtlb { + qcom,dump-size = <0x480>; + }; + + L2_TLB_700: l2-tlb { + qcom,dump-size = <0x7800>; }; }; @@ -596,8 +617,9 @@ }; clock_rpmh: qcom,rpmhclk { - compatible = "qcom,dummycc"; - clock-output-names = "rpm_clocks"; + compatible = "qcom,rpmh-clk-sm6150"; + mboxes = <&apps_rsc 0>; + mbox-names = "apps"; #clock-cells = <1>; }; @@ -608,15 +630,20 @@ }; clock_gcc: qcom,gcc { - compatible = "qcom,dummycc"; - clock-output-names = "gcc_clocks"; + compatible = "qcom,gcc-sm6150", "syscon"; + reg = <0x100000 0x1f0000>; + reg-names = "cc_base"; + vdd_cx-supply = <&pm6150_s1_level>; + vdd_cx_ao-supply = <&pm6150_s1_level_ao>; #clock-cells = <1>; #reset-cells = <1>; }; clock_videocc: qcom,videocc { - compatible = "qcom,dummycc"; - clock-output-names = "videocc_clocks"; + compatible = "qcom,videocc-sm6150", "syscon"; + reg = <0xab00000 0x10000>; + reg-names = "cc_base"; + vdd_cx-supply = <&pm6150_s1_level>; #clock-cells = <1>; #reset-cells = <1>; }; @@ -636,8 +663,11 @@ }; clock_gpucc: qcom,gpupcc { - compatible = "qcom,dummycc"; - clock-output-names = "gpucc_clocks"; + compatible = "qcom,gpucc-sm6150", "syscon"; + reg = <0x5090000 0x9000>; + reg-names = "cc_base"; + vdd_cx-supply = <&pm6150_s1_level>; + vdd_mx-supply = <&pm6150_s3_level>; #clock-cells = <1>; #reset-cells = <1>; }; @@ -685,6 +715,11 @@ compatible = "qcom,msm-imem-pil"; reg = <0x94c 200>; }; + + diag_dload@c8 { + compatible = "qcom,msm-imem-diag-dload"; + reg = <0xc8 200>; + }; }; restart@c264000 { @@ -794,38 +829,38 @@ qcom,dump-id = <0x60>; }; - qcom,l1_i_cache1 { - qcom,dump-node = <&L1_I_1>; + qcom,l1_i_cache100 { + qcom,dump-node = <&L1_I_100>; qcom,dump-id = <0x61>; }; - qcom,l1_i_cache2 { - qcom,dump-node = <&L1_I_2>; + qcom,l1_i_cache200 { + qcom,dump-node = <&L1_I_200>; qcom,dump-id = <0x62>; }; - qcom,l1_i_cache3 { - qcom,dump-node = <&L1_I_3>; + qcom,l1_i_cache300 { + qcom,dump-node = <&L1_I_300>; qcom,dump-id = <0x63>; }; - qcom,l1_i_cache4 { - qcom,dump-node = <&L1_I_4>; + qcom,l1_i_cache400 { + qcom,dump-node = <&L1_I_400>; qcom,dump-id = <0x64>; }; - qcom,l1_i_cache5 { - qcom,dump-node = <&L1_I_5>; + qcom,l1_i_cache500 { + qcom,dump-node = <&L1_I_500>; qcom,dump-id = <0x65>; }; - qcom,l1_i_cache100 { - qcom,dump-node = <&L1_I_100>; + qcom,l1_i_cache600 { + qcom,dump-node = <&L1_I_600>; qcom,dump-id = <0x66>; }; - qcom,l1_i_cache200 { - qcom,dump-node = <&L1_I_200>; + qcom,l1_i_cache700 { + qcom,dump-node = <&L1_I_700>; qcom,dump-id = <0x67>; }; @@ -834,79 +869,109 @@ qcom,dump-id = <0x80>; }; - qcom,l1_d_cache1 { - qcom,dump-node = <&L1_D_1>; + qcom,l1_d_cache100 { + qcom,dump-node = <&L1_D_100>; qcom,dump-id = <0x81>; }; - qcom,l1_d_cache2 { - qcom,dump-node = <&L1_D_2>; + qcom,l1_d_cache200 { + qcom,dump-node = <&L1_D_200>; qcom,dump-id = <0x82>; }; - qcom,l1_d_cache3 { - qcom,dump-node = <&L1_D_3>; + qcom,l1_d_cache300 { + qcom,dump-node = <&L1_D_300>; qcom,dump-id = <0x83>; }; - qcom,l1_d_cache4 { - qcom,dump-node = <&L1_D_4>; + qcom,l1_d_cache400 { + qcom,dump-node = <&L1_D_400>; qcom,dump-id = <0x84>; }; - qcom,l1_d_cache5 { - qcom,dump-node = <&L1_D_5>; + qcom,l1_d_cache500 { + qcom,dump-node = <&L1_D_500>; qcom,dump-id = <0x85>; }; - qcom,l1_d_cache100 { - qcom,dump-node = <&L1_D_100>; + qcom,l1_d_cache600 { + qcom,dump-node = <&L1_D_600>; qcom,dump-id = <0x86>; }; - qcom,l1_d_cache200 { - qcom,dump-node = <&L1_D_200>; + qcom,l1_d_cache700 { + qcom,dump-node = <&L1_D_700>; qcom,dump-id = <0x87>; }; - qcom,l1_tlb_dump0 { - qcom,dump-node = <&L1_TLB_0>; - qcom,dump-id = <0x20>; + qcom,l1_i_tlb_dump600 { + qcom,dump-node = <&L1_ITLB_600>; + qcom,dump-id = <0x26>; }; - qcom,l1_tlb_dump1 { - qcom,dump-node = <&L1_TLB_1>; - qcom,dump-id = <0x21>; + qcom,l1_i_tlb_dump700 { + qcom,dump-node = <&L1_ITLB_700>; + qcom,dump-id = <0x27>; }; - qcom,l1_tlb_dump2 { - qcom,dump-node = <&L1_TLB_2>; - qcom,dump-id = <0x22>; + qcom,l1_d_tlb_dump600 { + qcom,dump-node = <&L1_DTLB_600>; + qcom,dump-id = <0x46>; }; - qcom,l1_tlb_dump3 { - qcom,dump-node = <&L1_TLB_3>; - qcom,dump-id = <0x23>; + qcom,l1_d_tlb_dump700 { + qcom,dump-node = <&L1_DTLB_700>; + qcom,dump-id = <0x47>; }; - qcom,l1_tlb_dump4 { - qcom,dump-node = <&L1_TLB_4>; - qcom,dump-id = <0x24>; + qcom,l2_cache_dump600 { + qcom,dump-node = <&L2_600>; + qcom,dump-id = <0xc6>; }; - qcom,l1_tlb_dump5 { - qcom,dump-node = <&L1_TLB_5>; - qcom,dump-id = <0x25>; + qcom,l2_cache_dump700 { + qcom,dump-node = <&L2_700>; + qcom,dump-id = <0xc7>; }; - qcom,l1_tlb_dump100 { - qcom,dump-node = <&L1_TLB_100>; - qcom,dump-id = <0x26>; + qcom,l2_tlb_dump0 { + qcom,dump-node = <&L2_TLB_0>; + qcom,dump-id = <0x120>; }; - qcom,l1_tlb_dump200 { - qcom,dump-node = <&L1_TLB_200>; - qcom,dump-id = <0x27>; + qcom,l2_tlb_dump100 { + qcom,dump-node = <&L2_TLB_100>; + qcom,dump-id = <0x121>; + }; + + qcom,l2_tlb_dump200 { + qcom,dump-node = <&L2_TLB_200>; + qcom,dump-id = <0x122>; + }; + + qcom,l2_tlb_dump300 { + qcom,dump-node = <&L2_TLB_300>; + qcom,dump-id = <0x123>; + }; + + qcom,l2_tlb_dump400 { + qcom,dump-node = <&L2_TLB_400>; + qcom,dump-id = <0x124>; + }; + + qcom,l2_tlb_dump500 { + qcom,dump-node = <&L2_TLB_500>; + qcom,dump-id = <0x125>; + }; + + qcom,l2_tlb_dump600 { + qcom,dump-node = <&L2_TLB_600>; + qcom,dump-id = <0x126>; + }; + + qcom,l2_tlb_dump700 { + qcom,dump-node = <&L2_TLB_700>; + qcom,dump-id = <0x127>; }; }; @@ -1030,6 +1095,78 @@ status = "disabled"; }; + ufsphy_mem: ufsphy_mem@1d87000 { + reg = <0x1d87000 0xdb8>; /* PHY regs */ + reg-names = "phy_mem"; + #phy-cells = <0>; + + lanes-per-direction = <1>; + + clock-names = "ref_clk_src", + "ref_clk", + "ref_aux_clk"; + clocks = <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>, + <&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>; + + status = "disabled"; + }; + + ufshc_mem: ufshc@1d84000 { + compatible = "qcom,ufshc"; + reg = <0x1d84000 0x3000>; + interrupts = <0 265 0>; + phys = <&ufsphy_mem>; + phy-names = "ufsphy"; + + lanes-per-direction = <1>; + dev-ref-clk-freq = <0>; /* 19.2 MHz */ + spm-level = <5>; + + clock-names = + "core_clk", + "bus_aggr_clk", + "iface_clk", + "core_clk_unipro", + "core_clk_ice", + "ref_clk", + "tx_lane0_sync_clk", + "rx_lane0_sync_clk"; + clocks = + <&clock_gcc GCC_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_UFS_PHY_AHB_CLK>, + <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, + <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>; + freq-table-hz = + <37500000 300000000>, + <0 0>, + <0 0>, + <37500000 300000000>, + <75000000 300000000>, + <0 0>, + <0 0>, + <0 0>; + + /* PM QoS */ + qcom,pm-qos-cpu-groups = <0x3f 0xC0>; + qcom,pm-qos-cpu-group-latency-us = <67 67>; + qcom,pm-qos-default-cpu = <0>; + + pinctrl-names = "dev-reset-assert", "dev-reset-deassert"; + pinctrl-0 = <&ufs_dev_reset_assert>; + pinctrl-1 = <&ufs_dev_reset_deassert>; + + resets = <&clock_gcc GCC_UFS_PHY_BCR>; + reset-names = "core_reset"; + non-removable; + + status = "disabled"; + }; + spmi_bus: qcom,spmi@c440000 { compatible = "qcom,spmi-pmic-arb"; reg = <0xc440000 0x2300>, @@ -1126,6 +1263,7 @@ qcom,msm-adsprpc-mem { compatible = "qcom,msm-adsprpc-mem-region"; memory-region = <&adsp_mem>; + restrict-access; }; qcom,msm_fastrpc { @@ -1476,6 +1614,204 @@ #thermal-sensor-cells = <1>; }; + qcom,lpass@62400000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x62400000 0x00100>; + + vdd_cx-supply = <&L8A_LEVEL>; + qcom,vdd_cx-uV-uA = ; + qcom,proxy-reg-names = "vdd_cx"; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <1>; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <423>; + qcom,sysmon-id = <1>; + qcom,ssctl-instance-id = <0x14>; + qcom,firmware-name = "adsp"; + memory-region = <&pil_adsp_mem>; + + /* Inputs from lpass */ + interrupts-extended = <&pdc 0 162 1>, + <&adsp_smp2p_in 0 0>, + <&adsp_smp2p_in 2 0>, + <&adsp_smp2p_in 1 0>, + <&adsp_smp2p_in 3 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack"; + + /* Outputs to lpass */ + qcom,smem-states = <&adsp_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + + mboxes = <&qmp_aop 0>; + mbox-names = "adsp-pil"; + }; + + pil_modem: qcom,mss@4080000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x4080000 0x100>; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + vdd_cx-supply = <&VDD_CX_LEVEL>; + qcom,vdd_cx-uV-uA = ; + vdd_mss-supply = <&VDD_MSS_LEVEL>; + qcom,vdd_mss-uV-uA = ; + qcom,proxy-reg-names = "vdd_cx", "vdd_mss"; + + qcom,firmware-name = "modem"; + memory-region = <&pil_modem_mem>; + qcom,proxy-timeout-ms = <10000>; + qcom,sysmon-id = <0>; + qcom,ssctl-instance-id = <0x12>; + qcom,pas-id = <4>; + qcom,smem-id = <421>; + qcom,complete-ramdump; + + /* Inputs from mss */ + interrupts-extended = <&pdc 0 266 1>, + <&modem_smp2p_in 0 0>, + <&modem_smp2p_in 2 0>, + <&modem_smp2p_in 1 0>, + <&modem_smp2p_in 3 0>, + <&modem_smp2p_in 7 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack", + "qcom,shutdown-ack"; + + /* Outputs to mss */ + qcom,smem-states = <&modem_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + + mboxes = <&qmp_aop 0>; + mbox-names = "mss-pil"; + }; + + qcom,turing@8300000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x8300000 0x100000>; + + vdd_cx-supply = <&VDD_CX_LEVEL>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <18>; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <601>; + qcom,sysmon-id = <7>; + qcom,ssctl-instance-id = <0x17>; + qcom,firmware-name = "cdsp"; + memory-region = <&pil_cdsp_mem>; + + /* Inputs from turing */ + interrupts-extended = <&pdc 0 578 1>, + <&cdsp_smp2p_in 0 0>, + <&cdsp_smp2p_in 2 0>, + <&cdsp_smp2p_in 1 0>, + <&cdsp_smp2p_in 3 0>; + + interrupt-names = "qcom,wdog", + "qcom,err-fatal", + "qcom,proxy-unvote", + "qcom,err-ready", + "qcom,stop-ack"; + + /* Outputs to turing */ + qcom,smem-states = <&cdsp_smp2p_out 0>; + qcom,smem-state-names = "qcom,force-stop"; + + mboxes = <&qmp_aop 0>; + mbox-names = "cdsp-pil"; + }; + + qcom,msm_gsi { + compatible = "qcom,msm_gsi"; + }; + + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa3"; + qcom,rmnet-ipa-ssr; + qcom,ipa-loaduC; + qcom,ipa-advertise-sg-support; + qcom,ipa-napi-enable; + }; + + ipa_hw: qcom,ipa@1e00000 { + compatible = "qcom,ipa"; + reg = <0x1e00000 0x34000>, + <0x1e04000 0x2c000>; + reg-names = "ipa-base", "gsi-base"; + interrupts = <0 311 0>, <0 432 0>; + interrupt-names = "ipa-irq", "gsi-irq"; + qcom,ipa-hw-ver = <16>; /* IPA core version = IPAv4.2 */ + qcom,ipa-hw-mode = <1>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,modem-cfg-emb-pipe-flt; + qcom,ipa-wdi2; + qcom,ipa-fltrt-not-hashable; + qcom,use-64-bit-dma-mask; + qcom,arm-smmu; + qcom,bandwidth-vote-for-ipa; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <5>; + qcom,msm-bus,num-paths = <4>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + , + , + , + , + /* SVS2 */ + , + , + , + , + /* SVS */ + , + , + , + , + /* NOMINAL */ + , + , + , + , + /* TURBO */ + , + , + , + ; + qcom,bus-vector-names = + "MIN", "SVS2", "SVS", "NOMINAL", "TURBO"; + + }; + + qcom,ipa_fws { + compatible = "qcom,pil-tz-generic"; + qcom,pas-id = <0xf>; + qcom,firmware-name = "ipa_fws"; + qcom,pil-force-shutdown; + memory-region = <&pil_ipa_fw_mem>; + }; }; #include "pm6150.dtsi" @@ -1486,6 +1822,7 @@ #include "sm6150-gdsc.dtsi" #include "sm6150-qupv3.dtsi" #include "sm6150-thermal.dtsi" +#include "sm6150-gpu.dtsi" &emac_gdsc { status = "ok"; @@ -1535,14 +1872,6 @@ status = "ok"; }; -&hlos1_vote_turing_mmu_tbu0_gdsc { - status = "ok"; -}; - -&hlos1_vote_turing_mmu_tbu1_gdsc { - status = "ok"; -}; - &bps_gdsc { status = "ok"; }; @@ -1572,6 +1901,10 @@ }; &gpu_gx_gdsc { + clock-names = "core_root_clk"; + clocks = <&clock_gpucc GPU_CC_GX_GFX3D_CLK_SRC>; + qcom,force-enable-root-clk; + parent-supply = <&pm6150_s1_level>; status = "ok"; }; @@ -1586,3 +1919,4 @@ #include "sm6150-ion.dtsi" #include "msm-arm-smmu-sm6150.dtsi" #include "sm6150-coresight.dtsi" +#include "sm6150-bus.dtsi" diff --git a/arch/arm64/boot/dts/qcom/sm8150-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-audio-overlay.dtsi index 145b7e47c43ef89b470fef922016e878abaeb01a..ae00fc4d03b4129328b57ae530b6c6f026d6dbec 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-audio-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-audio-overlay.dtsi @@ -46,7 +46,7 @@ asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx"; - qcom,wsa-max-devs = <2>; + qcom,wsa-max-devs = <1>; qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, <&wsa881x_0213>, <&wsa881x_0214>; qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", @@ -120,6 +120,7 @@ qcom,codec-ext-clk-src = <2>; qcom,codec-lpass-ext-clk-freq = <19200000>; qcom,codec-lpass-clk-id = <278>; + qcom,use-pinctrl = <1>; pinctrl-names = "active", "sleep"; pinctrl-0 = <&quin_mi2s_mclk_active>; pinctrl-1 = <&quin_mi2s_mclk_sleep>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi b/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi deleted file mode 100644 index 5b1c5faa8cf4c120848d7c9dc7824ba9e9e84970..0000000000000000000000000000000000000000 --- a/arch/arm64/boot/dts/qcom/sm8150-auto.dtsi +++ /dev/null @@ -1,315 +0,0 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "sm8150.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. SM8150 AUTO"; - qcom,msm-name = "SM8150 AUTO"; - qcom,msm-id = <362 0x10000>; -}; - -/* Remove regulator nodes specific to SM8150 */ -&soc { - /delete-node/ regulator-pm8150-s4; - /delete-node/ rpmh-regulator-msslvl; - /delete-node/ rpmh-regulator-smpa2; - /delete-node/ rpmh-regulator-ebilvl; - /delete-node/ rpmh-regulator-smpa5; - /delete-node/ rpmh-regulator-smpa6; - /delete-node/ rpmh-regulator-ldoa1; - /delete-node/ rpmh-regulator-ldoa2; - /delete-node/ rpmh-regulator-ldoa3; - /delete-node/ rpmh-regulator-lmxlvl; - /delete-node/ rpmh-regulator-ldoa5; - /delete-node/ rpmh-regulator-ldoa6; - /delete-node/ rpmh-regulator-ldoa7; - /delete-node/ rpmh-regulator-lcxlvl; - /delete-node/ rpmh-regulator-ldoa9; - /delete-node/ rpmh-regulator-ldoa10; - /delete-node/ rpmh-regulator-ldoa11; - /delete-node/ rpmh-regulator-ldoa12; - /delete-node/ rpmh-regulator-ldoa13; - /delete-node/ rpmh-regulator-ldoa14; - /delete-node/ rpmh-regulator-ldoa15; - /delete-node/ rpmh-regulator-ldoa16; - /delete-node/ rpmh-regulator-ldoa17; - /delete-node/ rpmh-regulator-smpc1; - /delete-node/ rpmh-regulator-gfxlvl; - /delete-node/ rpmh-regulator-mxlvl; - /delete-node/ rpmh-regulator-mmcxlvl; - /delete-node/ rpmh-regulator-cxlvl; - /delete-node/ rpmh-regulator-smpc8; - /delete-node/ rpmh-regulator-ldoc1; - /delete-node/ rpmh-regulator-ldoc2; - /delete-node/ rpmh-regulator-ldoc3; - /delete-node/ rpmh-regulator-ldoc4; - /delete-node/ rpmh-regulator-ldoc5; - /delete-node/ rpmh-regulator-ldoc6; - /delete-node/ rpmh-regulator-ldoc7; - /delete-node/ rpmh-regulator-ldoc8; - /delete-node/ rpmh-regulator-ldoc9; - /delete-node/ rpmh-regulator-ldoc10; - /delete-node/ rpmh-regulator-ldoc11; - /delete-node/ rpmh-regulator-bobc1; - /delete-node/ rpmh-regulator-smpf2; - /delete-node/ rpmh-regulator-ldof2; - /delete-node/ rpmh-regulator-ldof5; - /delete-node/ rpmh-regulator-ldof6; -}; - -/* Add regulator nodes specific to SM8150 Auto */ -#include "sm8150-auto-regulator.dtsi" - -&cam_csiphy0 { - mipi-csi-vdd-supply = <&pm8150_2_l8>; -}; - -&cam_csiphy1 { - mipi-csi-vdd-supply = <&pm8150_2_l8>; -}; - -&cam_csiphy2 { - mipi-csi-vdd-supply = <&pm8150_2_l8>; -}; - -&cam_csiphy3 { - mipi-csi-vdd-supply = <&pm8150_2_l8>; -}; - -&pcie0 { - vreg-1.8-supply = <&pm8150_2_l8>; - vreg-0.9-supply = <&pm8150_2_l18>; -}; - -&pcie1 { - vreg-1.8-supply = <&pm8150_2_l8>; - vreg-0.9-supply = <&pm8150_2_l18>; -}; - -&mdss_dsi_phy0 { - vdda-0p9-supply = <&pm8150_2_l18>; -}; - -&mdss_dsi_phy1 { - vdda-0p9-supply = <&pm8150_2_l18>; -}; - -&mdss_dsi0 { - vdda-1p2-supply = <&pm8150_2_l8>; -}; - -&mdss_dsi1 { - vdda-1p2-supply = <&pm8150_2_l8>; -}; - -&sde_dp { - vdda-1p2-supply = <&pm8150_2_l8>; - vdda-0p9-supply = <&pm8150_2_l18>; -}; - -&lmh_dcvs1 { - isens_vref_0p8-supply = <&pm8150_1_l5_ao>; - isens_vref_1p8-supply = <&pm8150_1_l12_ao>; -}; - -&usb2_phy0 { - vdd-supply = <&pm8150_1_l5>; - vdda18-supply = <&pm8150_1_l12>; - vdda33-supply = <&pm8150_1_l2>; -}; - -&usb_qmp_dp_phy { - vdd-supply = <&pm8150_1_l5>; - core-supply = <&pm8150_2_l8>; -}; - -&usb2_phy1 { - vdd-supply = <&pm8150_1_l5>; - vdda18-supply = <&pm8150_1_l12>; - vdda33-supply = <&pm8150_1_l2>; - status = "ok"; -}; - -&usb_qmp_phy { - vdd-supply = <&pm8150_1_l5>; - core-supply = <&pm8150_2_l8>; - status = "ok"; -}; - -&icnss { - vdd-cx-mx-supply = <&pm8150_1_l1>; - vdd-1.8-xo-supply = <&pm8150_1_l7>; - vdd-1.3-rfa-supply = <&pm8150_2_l1>; - /delete-property/ vdd-3.3-ch0-supply; -}; - -&pil_ssc { - vdd_cx-supply = <&VDD_CX_LEVEL>; - vdd_mx-supply = <&VDD_MX_LEVEL>; -}; - -&pil_modem { - vdd_mss-supply = <&pm8150_1_s8_level>; -}; - -&wil6210 { - /delete-property/ vddio-supply; -}; - -&gpu_gx_gdsc { - parent-supply = <&pm8150_2_s3_level>; - vdd_parent-supply = <&pm8150_2_s3_level>; -}; - -&thermal_zones { - aoss0-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-0-0-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-0-1-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-0-2-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-0-3-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpuss-0-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpuss-1-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-0-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-1-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-2-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-3-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-4-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-5-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-6-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cpu-1-7-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - gpuss-0-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - aoss-1-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cwlan-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - video-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - ddr-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - q6-hvx-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - camera-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - cmpss-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - mdm-core-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - npu-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - mdm-vec-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - mdm-scl-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; - gpuss-1-lowf { - cooling-maps { - /delete-node/ mmcx_vdd_cdev; - }; - }; -}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-cdp.dtsi index 2fcd47b44606bf0d6d1e0baa86927ad8fe273044..bdce6a16fb3af14fb9167b4474ab3e0d51b26da6 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-cdp.dtsi @@ -32,6 +32,17 @@ status = "ok"; }; + led_flash_front: qcom,camera-flash@2 { + cell-index = <2>; + reg = <0x02 0x00>; + compatible = "qcom,camera-flash"; + wled-flash-support; + flash-source = <&wled_flash>; + torch-source = <&wled_torch>; + switch-source = <&wled_switch>; + status = "ok"; + }; + led_flash_iris: qcom,camera-flash@3 { cell-index = <3>; reg = <0x03 0x00>; @@ -317,6 +328,7 @@ sensor-position-yaw = <0>; eeprom-src = <&eeprom_front>; actuator-src = <&actuator_front>; + led-flash-src = <&led_flash_front>; cam_vio-supply = <&pm8150l_l1>; cam_bob-supply = <&pm8150l_bob>; cam_vana-supply = <&pm8009_l6>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi index 2fcd47b44606bf0d6d1e0baa86927ad8fe273044..bdce6a16fb3af14fb9167b4474ab3e0d51b26da6 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-mtp.dtsi @@ -32,6 +32,17 @@ status = "ok"; }; + led_flash_front: qcom,camera-flash@2 { + cell-index = <2>; + reg = <0x02 0x00>; + compatible = "qcom,camera-flash"; + wled-flash-support; + flash-source = <&wled_flash>; + torch-source = <&wled_torch>; + switch-source = <&wled_switch>; + status = "ok"; + }; + led_flash_iris: qcom,camera-flash@3 { cell-index = <3>; reg = <0x03 0x00>; @@ -317,6 +328,7 @@ sensor-position-yaw = <0>; eeprom-src = <&eeprom_front>; actuator-src = <&actuator_front>; + led-flash-src = <&led_flash_front>; cam_vio-supply = <&pm8150l_l1>; cam_bob-supply = <&pm8150l_bob>; cam_vana-supply = <&pm8009_l6>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-qrd.dtsi index a6dc55336f11266681c03578ec25ae4d2eb43e27..05b305c851226938d46ecda99c5188e622e79cc0 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-camera-sensor-qrd.dtsi @@ -11,6 +11,8 @@ * GNU General Public License for more details. */ +#include + &soc { led_flash_rear: qcom,camera-flash@0 { cell-index = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-cdp-overlay.dts index 20c9e2ddf327026fc2b84da9dc904034106f3aa6..a466bace1e2b8dc7a6ea7984a96cdb20ddd9f534 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-cdp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-cdp-overlay.dts @@ -25,3 +25,7 @@ compatible = "qcom,sm8150-cdp", "qcom,sm8150", "qcom,cdp"; qcom,board-id = <1 0>; }; + +&dsi_sharp_4k_dsc_cmd_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi index c566fa8f34c22527d9244a0384f3c8fa73701ea2..dfec76bdf274c1a878f5862dba8ce4c7be660e50 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-cdp.dtsi @@ -254,10 +254,6 @@ }; }; -&dsi_sharp_4k_dsc_cmd_display { - qcom,dsi-display-active; -}; - &ufsphy_mem { compatible = "qcom,ufs-phy-qmp-v4"; diff --git a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi index 5da67a724097827ff04a75642d9360d74c447f30..1c678a247b6b9a4c8f423882453587b09aced3c7 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-coresight.dtsi @@ -325,7 +325,7 @@ arm,buffer-size = <0x400000>; coresight-name = "coresight-tmc-etr"; - coresight-ctis = <&cti0>; + coresight-ctis = <&cti0 &cti0>; coresight-csr = <&csr>; clocks = <&clock_aop QDSS_CLK>; @@ -350,7 +350,7 @@ reg-names = "tmc-base"; coresight-name = "coresight-tmc-etf"; - coresight-ctis = <&cti0>; + coresight-ctis = <&cti0 &cti0>; coresight-csr = <&csr>; arm,default-sink; @@ -1099,6 +1099,28 @@ <&tpda_dl_north_out_funnel_dl_north>; }; }; + + port@2 { + reg = <1>; + funnel_dl_north_in_tpdm_wcss: endpoint { + slave-mode; + remote-endpoint = + <&tpdm_wcss_out_funnel_dl_north>; + }; + }; + }; + }; + + tpdm_wcss: tpdm@699c000 { + compatible = "qcom,coresight-dummy"; + + coresight-name = "coresight-tpdm-wcss"; + qcom,dummy-source; + + port { + tpdm_wcss_out_funnel_dl_north: endpoint { + remote-endpoint = <&funnel_dl_north_in_tpdm_wcss>; + }; }; }; @@ -1730,18 +1752,42 @@ coresight-name = "coresight-tpdm-npu"; - clocks = <&clock_npucc NPU_CC_NPU_CORE_APB_CLK>, - <&clock_npucc NPU_CC_NPU_CORE_ATB_CLK>, - <&clock_npucc NPU_CC_NPU_CORE_CTI_CLK>; - - com,tpdm-clks = "npu_cc_npu_core_apb_clk", - "npu_cc_npu_core_atb_clk", - "npu_cc_npu_core_cti_clk"; - com,tpdm-regs = <&npu_core_gdsc>; + clocks = <&clock_aop QDSS_CLK>, + <&clock_gcc GCC_NPU_TRIG_CLK>, + <&clock_gcc GCC_NPU_AT_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_APB_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_ATB_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_CLK>, + <&clock_npucc NPU_CC_NPU_CORE_CLK_SRC>, + <&clock_npucc NPU_CC_NPU_CORE_CTI_CLK>; + + clock-names = "apb_pclk", + "gcc_npu_trig_clk", + "gcc_npu_at_clk", + "npu_core_apb_clk", + "npu_core_atb_clk", + "npu_core_clk", + "npu_core_clk_src", + "npu_core_cti_clk"; + + qcom,tpdm-clks = "apb_pclk", + "gcc_npu_trig_clk", + "gcc_npu_at_clk", + "npu_core_apb_clk", + "npu_core_atb_clk", + "npu_core_clk", + "npu_core_clk_src", + "npu_core_cti_clk"; + + vdd-supply = <&npu_core_gdsc>; + vdd_cx-supply = <&VDD_CX_LEVEL>; + qcom,proxy-reg-names ="vdd", "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + qcom,tpdm-regs = "vdd", "vdd_cx"; port{ tpdm_npu_out_tpda: endpoint { - remote-endpoint = <&tpda_in_tpdm_npu>; + remote-endpoint = <&tpda_in_tpdm_npu>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi index ab10631ce6bf8791411529a51f977437c39d3cbe..7d74d28e9f1023f3a042d6fdf47641c762b49adb 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-gpu.dtsi @@ -93,7 +93,7 @@ qcom,tsens-name = "tsens_tz_sensor12"; #cooling-cells = <2>; - qcom,pm-qos-active-latency = <460>; + qcom,pm-qos-active-latency = <44>; clocks = <&clock_gpucc GPU_CC_CXO_CLK>, <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>, diff --git a/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi index b5055c8ab3ec6132ca24929c252ac6d0566f223b..0f1c82fac01537b94cc36042947ef5310a26b816 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mhi.dtsi @@ -324,27 +324,25 @@ mhi,ee = <2>; }; - mhi_chan@100 { - reg = <100>; - label = "IP_HW0"; - mhi,num-elements = <512>; + mhi_chan@104 { + reg = <104>; + label = "IP_HW_OFFLOAD_0"; mhi,event-ring = <4>; mhi,chan-dir = <1>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; + mhi,data-type = <3>; mhi,ee = <2>; - mhi,db-mode-switch; + mhi,offload-chan; }; - mhi_chan@101 { - reg = <101>; - label = "IP_HW0"; - mhi,num-elements = <512>; + mhi_chan@105 { + reg = <105>; + label = "IP_HW_OFFLOAD_0"; mhi,event-ring = <5>; mhi,chan-dir = <2>; - mhi,data-type = <1>; - mhi,doorbell-mode = <3>; + mhi,data-type = <3>; mhi,ee = <2>; + mhi,offload-chan; + mhi,lpm-notify; }; mhi_event@0 { @@ -381,24 +379,27 @@ }; mhi_event@4 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; + mhi,num-elements = <0>; + mhi,intmod = <0>; mhi,msi = <5>; - mhi,chan = <100>; + mhi,chan = <104>; mhi,priority = <1>; mhi,brstmode = <3>; mhi,hw-ev; + mhi,client-manage; + mhi,offload; }; mhi_event@5 { - mhi,num-elements = <1024>; - mhi,intmod = <5>; + mhi,num-elements = <0>; + mhi,intmod = <0>; mhi,msi = <6>; - mhi,chan = <101>; + mhi,chan = <105>; mhi,priority = <1>; mhi,brstmode = <3>; mhi,hw-ev; mhi,client-manage; + mhi,offload; }; mhi_netdev_0: mhi_rmnet@0 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-mtp-overlay.dts index 4721c8b51c4855d5a3cbfb01dc96d485fe8498ad..877a93e7cfb9dda703c57d48e206e2ad812d2b68 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp-overlay.dts @@ -25,3 +25,7 @@ compatible = "qcom,sm8150-mtp", "qcom,sm8150", "qcom,mtp"; qcom,board-id = <8 0>; }; + +&dsi_sharp_4k_dsc_cmd_display { + qcom,dsi-display-active; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi index e330b3a35b2236a28d0befff6f2d404e6cd225d5..6cf59bad362617cf64e0ae2f60b5a8b7f139dd83 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-mtp.dtsi @@ -19,6 +19,7 @@ #include "sm8150-camera-sensor-mtp.dtsi" #include "sm8150-thermal-overlay.dtsi" #include "smb1390.dtsi" +#include "smb1355.dtsi" &qupv3_se12_2uart { status = "ok"; @@ -228,10 +229,6 @@ qcom,platform-reset-gpio = <&tlmm 6 0>; }; -&dsi_sharp_4k_dsc_cmd_display { - qcom,dsi-display-active; -}; - &qupv3_se9_i2c { status = "ok"; nq@28 { @@ -296,6 +293,13 @@ qcom,battery-data = <&mtp_batterydata>; qcom,hold-soc-while-full; qcom,linearize-soc; + /* ESR fast calibration */ + qcom,fg-esr-timer-chg-fast = <0 10>; + qcom,fg-esr-timer-dischg-fast = <0 10>; + qcom,fg-esr-timer-chg-slow = <0 96>; + qcom,fg-esr-timer-dischg-slow = <0 96>; + qcom,fg-esr-cal-soc-thresh = <26 230>; + qcom,fg-esr-cal-temp-thresh = <10 40>; }; &sdhc_2 { @@ -557,7 +561,7 @@ }; &pm8150b_charger { - qcom,sec-charger-config = <1>; + qcom,sec-charger-config = <3>; qcom,auto-recharge-soc = <98>; io-channels = <&pm8150b_vadc ADC_USB_IN_V_16>, <&pm8150b_vadc ADC_USB_IN_I>, @@ -565,11 +569,14 @@ io-channel-names = "usb_in_voltage", "usb_in_current", "chg_temp"; + qcom,battery-data = <&mtp_batterydata>; + qcom,step-charging-enable; + qcom,sw-jeita-enable; }; &smb1390 { pinctrl-names = "default"; - pinctrl-0 = <&smb1390_stat_default>; + pinctrl-0 = <&smb_stat_default>; status = "ok"; }; @@ -580,3 +587,13 @@ &usb1 { extcon = <&extcon_usb1>; }; + +&smb1355 { + status = "ok"; +}; + +&smb1355_charger { + io-channels = <&pm8150b_vadc ADC_AMUX_THM2>; + io-channel-names = "charger_temp"; + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi index fd99bfeb4482afadff44cb1b3f554d3c29d46eb1..b17b9a60f5da2de836e347144007c03145e986cc 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-npu.dtsi @@ -24,7 +24,6 @@ cache-slice-names = "npu"; cache-slices = <&llcc 23>; clocks = <&clock_npucc NPU_CC_CAL_DP_CLK>, - <&clock_npucc NPU_CC_CAL_DP_CLK_SRC>, <&clock_npucc NPU_CC_XO_CLK>, <&clock_npucc NPU_CC_ARMWIC_CORE_CLK>, <&clock_npucc NPU_CC_BTO_CORE_CLK>, @@ -35,15 +34,16 @@ <&clock_npucc NPU_CC_NPU_CORE_APB_CLK>, <&clock_npucc NPU_CC_NPU_CORE_ATB_CLK>, <&clock_npucc NPU_CC_NPU_CORE_CLK>, - <&clock_npucc NPU_CC_NPU_CORE_CLK_SRC>, <&clock_npucc NPU_CC_NPU_CORE_CTI_CLK>, <&clock_npucc NPU_CC_NPU_CPC_CLK>, <&clock_npucc NPU_CC_NPU_CPC_TIMER_CLK>, <&clock_npucc NPU_CC_PERF_CNT_CLK>, <&clock_npucc NPU_CC_QTIMER_CORE_CLK>, - <&clock_npucc NPU_CC_SLEEP_CLK>; + <&clock_npucc NPU_CC_SLEEP_CLK>, + <&clock_gcc GCC_NPU_AT_CLK>, + <&clock_gcc GCC_NPU_TRIG_CLK>, + <&clock_aop QDSS_CLK>; clock-names = "cal_dp_clk", - "cal_dp_clk_src", "xo_clk", "armwic_core_clk", "bto_core_clk", @@ -54,13 +54,15 @@ "npu_core_apb_clk", "npu_core_atb_clk", "npu_core_clk", - "npu_core_clk_src", "npu_core_cti_clk", "npu_cpc_clk", "npu_cpc_timer_clk", "perf_cnt_clk", "qtimer_core_clk", - "sleep_clk"; + "sleep_clk", + "at_clk", + "trig_clk", + "qdss_clk"; vdd-supply = <&npu_core_gdsc>; vdd_cx-supply = <&VDD_CX_LEVEL>; qcom,proxy-reg-names ="vdd", "vdd_cx"; @@ -77,7 +79,6 @@ qcom,npu-pwrlevel@0 { reg = <0>; clk-freq = <9600000 - 9600000 19200000 19200000 19200000 @@ -91,15 +92,16 @@ 19200000 19200000 19200000 - 19200000 9600000 19200000 + 0 + 0 + 0 0>; }; qcom,npu-pwrlevel@1 { reg = <1>; clk-freq = <300000000 - 300000000 19200000 100000000 19200000 @@ -110,18 +112,19 @@ 19200000 60000000 100000000 - 100000000 37500000 100000000 19200000 300000000 19200000 + 0 + 0 + 0 0>; }; qcom,npu-pwrlevel@2 { reg = <2>; clk-freq = <350000000 - 350000000 19200000 150000000 19200000 @@ -132,18 +135,19 @@ 19200000 120000000 150000000 - 150000000 75000000 150000000 19200000 350000000 19200000 + 0 + 0 + 0 0>; }; qcom,npu-pwrlevel@3 { reg = <3>; clk-freq = <400000000 - 400000000 19200000 200000000 19200000 @@ -154,18 +158,19 @@ 19200000 120000000 200000000 - 200000000 75000000 200000000 19200000 400000000 19200000 + 0 + 0 + 0 0>; }; qcom,npu-pwrlevel@4 { reg = <4>; clk-freq = <600000000 - 600000000 19200000 300000000 19200000 @@ -176,18 +181,19 @@ 19200000 240000000 300000000 - 300000000 150000000 300000000 19200000 600000000 19200000 + 0 + 0 + 0 0>; }; qcom,npu-pwrlevel@5 { reg = <5>; clk-freq = <715000000 - 715000000 19200000 350000000 19200000 @@ -198,12 +204,14 @@ 19200000 240000000 350000000 - 350000000 150000000 350000000 19200000 715000000 19200000 + 0 + 0 + 0 0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi index 9fc716ff912672a975cba05f0756372ddcf5e474..a0fa356d831a6b661d419495565473681cad6a6e 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pinctrl.dtsi @@ -3947,5 +3947,181 @@ bias-pull-up; }; }; + + emac { + emac_mdc: emac_mdc { + mux { + pins = "gpio7"; + function = "rgmii_mdc"; + }; + + config { + pins = "gpio7"; + bias-pull-up; + }; + }; + emac_mdio: emac_mdio { + mux { + pins = "gpio59"; + function = "rgmii_mdio"; + }; + + config { + pins = "gpio59"; + bias-pull-up; + }; + }; + + emac_rgmii_txd0: emac_rgmii_txd0 { + mux { + pins = "gpio122"; + function = "rgmii_txd0"; + }; + + config { + pins = "gpio122"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + emac_rgmii_txd1: emac_rgmii_txd1 { + mux { + pins = "gpio4"; + function = "rgmii_txd1"; + }; + + config { + pins = "gpio4"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + emac_rgmii_txd2: emac_rgmii_txd2 { + mux { + pins = "gpio5"; + function = "rgmii_txd2"; + }; + + config { + pins = "gpio5"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_txd3: emac_rgmii_txd3 { + mux { + pins = "gpio6"; + function = "rgmii_txd3"; + }; + + config { + pins = "gpio6"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_txc: emac_rgmii_txc { + mux { + pins = "gpio114"; + function = "rgmii_txc"; + }; + + config { + pins = "gpio114"; + bias-pull-up; + drive-strength = <16>; + }; + }; + emac_rgmii_tx_ctl: emac_rgmii_tx_ctl { + mux { + pins = "gpio121"; + function = "rgmii_tx"; + }; + + config { + pins = "gpio121"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + + emac_rgmii_rxd0: emac_rgmii_rxd0 { + mux { + pins = "gpio117"; + function = "rgmii_rxd0"; + }; + + config { + pins = "gpio117"; + bias-disable; /* NO pull */ + drive-strength = <2>; /* 2MA */ + }; + }; + + emac_rgmii_rxd1: emac_rgmii_rxd1 { + mux { + pins = "gpio118"; + function = "rgmii_rxd1"; + }; + + config { + pins = "gpio118"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + + emac_rgmii_rxd2: emac_rgmii_rxd2 { + mux { + pins = "gpio119"; + function = "rgmii_rxd2"; + }; + + config { + pins = "gpio119"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rxd3: emac_rgmii_rxd3 { + mux { + pins = "gpio120"; + function = "rgmii_rxd3"; + }; + + config { + pins = "gpio120"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rxc: emac_rgmii_rxc { + mux { + pins = "gpio115"; + function = "rgmii_rxc"; + }; + + config { + pins = "gpio115"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + emac_rgmii_rx_ctl: emac_rgmii_rx_ctl { + mux { + pins = "gpio116"; + function = "rgmii_rx"; + }; + + config { + pins = "gpio116"; + bias-disable; /* NO pull */ + drive-strength = <2>; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi index b01b389bbc0fec3ae29c4d448c946e905f84541d..1a2ac061dadb0282def391b2d6ac6eb896f38258 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pm.dtsi @@ -22,6 +22,7 @@ #address-cells = <1>; #size-cells = <0>; label = "L3"; + qcom,clstr-tmr-add = <1000>; qcom,psci-mode-shift = <4>; qcom,psci-mode-mask = <0xfff>; @@ -35,39 +36,14 @@ qcom,time-overhead = <99>; }; - qcom,pm-cluster-level@1 { /* D4 */ + qcom,pm-cluster-level@1 { /* LLCC off, AOSS sleep */ reg = <1>; - label = "l3-pc"; - qcom,psci-mode = <0x4>; - qcom,latency-us = <4562>; - qcom,ss-power = <408>; - qcom,energy-overhead = <2421840>; - qcom,time-overhead = <5376>; - qcom,min-child-idx = <2>; - qcom,is-reset; - }; - - qcom,pm-cluster-level@2 { /* Cx off */ - reg = <2>; - label = "cx-off"; - qcom,psci-mode = <0x224>; - qcom,latency-us = <5562>; - qcom,ss-power = <308>; - qcom,energy-overhead = <2521840>; - qcom,time-overhead = <6376>; - qcom,min-child-idx = <2>; - qcom,is-reset; - qcom,notify-rpm; - }; - - qcom,pm-cluster-level@3 { /* LLCC off, AOSS sleep */ - reg = <3>; label = "llcc-off"; qcom,psci-mode = <0xC24>; qcom,latency-us = <6562>; qcom,ss-power = <108>; - qcom,energy-overhead = <2621840>; - qcom,time-overhead = <7376>; + qcom,energy-overhead = <4000000>; + qcom,time-overhead = <5000>; qcom,min-child-idx = <2>; qcom,is-reset; qcom,notify-rpm; @@ -78,6 +54,9 @@ #size-cells = <0>; qcom,psci-mode-shift = <0>; qcom,psci-mode-mask = <0xf>; + qcom,ref-stddev = <500>; + qcom,tmr-add = <1000>; + qcom,ref-premature-cnt = <1>; qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>; qcom,pm-cpu-level@0 { /* C1 */ @@ -85,19 +64,19 @@ label = "wfi"; qcom,psci-cpu-mode = <0x1>; qcom,latency-us = <43>; - qcom,ss-power = <454>; - qcom,energy-overhead = <38639>; - qcom,time-overhead = <83>; + qcom,ss-power = <150>; + qcom,energy-overhead = <10000>; + qcom,time-overhead = <100>; }; qcom,pm-cpu-level@1 { /* C3 */ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <612>; - qcom,ss-power = <436>; - qcom,energy-overhead = <418225>; - qcom,time-overhead = <885>; + qcom,latency-us = <461>; + qcom,ss-power = <100>; + qcom,energy-overhead = <400000>; + qcom,time-overhead = <500>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -106,10 +85,10 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <700>; - qcom,ss-power = <400>; - qcom,energy-overhead = <428225>; - qcom,time-overhead = <1000>; + qcom,latency-us = <531>; + qcom,ss-power = <73>; + qcom,energy-overhead = <500000>; + qcom,time-overhead = <600>; qcom,is-reset; qcom,use-broadcast-timer; }; @@ -136,7 +115,7 @@ reg = <1>; label = "pc"; qcom,psci-cpu-mode = <0x3>; - qcom,latency-us = <612>; + qcom,latency-us = <621>; qcom,ss-power = <436>; qcom,energy-overhead = <418225>; qcom,time-overhead = <885>; @@ -148,7 +127,7 @@ reg = <2>; label = "rail-pc"; qcom,psci-cpu-mode = <0x4>; - qcom,latency-us = <700>; + qcom,latency-us = <1061>; qcom,ss-power = <400>; qcom,energy-overhead = <428225>; qcom,time-overhead = <1000>; @@ -163,6 +142,7 @@ compatible = "qcom,rpm-stats"; reg = <0xc300000 0x1000>, <0xc3f0004 0x4>; reg-names = "phys_addr_base", "offset_addr"; + qcom,num-records = <3>; }; qcom,rpmh-master-stats@b221200 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi index 6d137062270aa618b75495fa1fc1e898c7ccec0c..c1a7aaed9805ee1a3f856666f016dc0fb7df841b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-pmic-overlay.dtsi @@ -88,8 +88,8 @@ }; &pm8150b_gpios { - smb1390_stat { - smb1390_stat_default: smb1390_stat_default { + smb_stat { + smb_stat_default: smb_stat_default { pins = "gpio6"; function = "normal"; input-enable; diff --git a/arch/arm64/boot/dts/qcom/sm6150-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-qrd-dvt-overlay.dts similarity index 68% rename from arch/arm64/boot/dts/qcom/sm6150-mtp-overlay.dts rename to arch/arm64/boot/dts/qcom/sm8150-qrd-dvt-overlay.dts index cdb2c0e4e55613de94d3f7c0e950ea541b559116..6dff873d97553aad2b5320785e04ceb253e0ce3c 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-mtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd-dvt-overlay.dts @@ -13,12 +13,15 @@ /dts-v1/; /plugin/; +#include +#include +#include #include -#include "sm6150-mtp.dtsi" +#include "sm8150-qrd-dvt.dtsi" / { - model = "Qualcomm Technologies, Inc. SM6150 MTP"; - compatible = "qcom,sm6150-mtp", "qcom,sm6150", "qcom,mtp"; - qcom,board-id = <8 0>; + model = "QRD DVT"; + compatible = "qcom,sm8150-qrd", "qcom,sm8150", "qcom,qrd"; + qcom,board-id = <0x01000B 0>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd-dvt.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd-dvt.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..a737d8ff24fd2678fa6e0a3055d4276c56a7c2e5 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd-dvt.dtsi @@ -0,0 +1,18 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sm8150-qrd.dtsi" + +&usb_qmp_dp_phy { + vdd-supply = <&pm8150_l18>; + qcom,vdd-voltage-level = <0 912000 912000>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi index f347370cc17a41c722477f49b59def2aa4769896..a7884b954ec1cfb5ec46d2302cd483d5b7a9f63d 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-qrd.dtsi @@ -19,6 +19,7 @@ #include "sm8150-camera-sensor-qrd.dtsi" #include "sm8150-thermal-overlay.dtsi" #include "smb1390.dtsi" +#include "smb1355.dtsi" &vendor { bluetooth: bt_wcn3990 { @@ -170,7 +171,32 @@ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>; qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; qcom,mdss-dsi-bl-min-level = <1>; - qcom,mdss-dsi-bl-max-level = <1023>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-brightness-max-level = <1023>; + qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; + qcom,panel-mode-gpio = <&tlmm 7 0>; + qcom,platform-te-gpio = <&tlmm 8 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + +&dsi_sw43404_amoled_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-brightness-max-level = <1023>; + qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; + qcom,panel-mode-gpio = <&tlmm 7 0>; + qcom,platform-te-gpio = <&tlmm 8 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + +&dsi_sw43404_amoled_fhd_plus_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-brightness-max-level = <1023>; qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; qcom,panel-mode-gpio = <&tlmm 7 0>; qcom,platform-te-gpio = <&tlmm 8 0>; @@ -199,6 +225,15 @@ &qupv3_se4_i2c { status = "ok"; + redriver@19 { + compatible = "onnn,redriver"; + reg = <0x19>; + extcon = <&pm8150b_pdphy>, <&pm8150b_pdphy>; + eq = /bits/ 8 <0x5 0x4 0x4 0x5>; + flat-gain = /bits/ 8 <0x3 0x1 0x1 0x3>; + output-comp = /bits/ 8 <0x2 0x2 0x2 0x2>; + loss-match = /bits/ 8 <0x0 0x3 0x3 0x0>; + }; }; &ufsphy_mem { @@ -494,11 +529,13 @@ io-channel-names = "usb_in_voltage", "usb_in_current", "chg_temp"; + qcom,battery-data = <&qrd_batterydata>; + qcom,sw-jeita-enable; }; &smb1390 { pinctrl-names = "default"; - pinctrl-0 = <&smb1390_stat_default>; + pinctrl-0 = <&smb_stat_default>; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-regulator.dtsi b/arch/arm64/boot/dts/qcom/sm8150-regulator.dtsi index 56a5f3435b10605e0033db4e6d4db20ce819c8e2..40a4ab37f02a850e4d4fb08cf1d7d1eff3147da5 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-regulator.dtsi @@ -475,6 +475,24 @@ }; }; + rpmh-regulator-ldoa18 { + compatible = "qcom,rpmh-vrm-regulator"; + mboxes = <&apps_rsc 0>; + qcom,resource-name = "ldoa18"; + qcom,regulator-type = "pmic5-ldo"; + qcom,supported-modes = + ; + qcom,mode-threshold-currents = <0 30000>; + L18A: pm8150_l18: regulator-pm8150-l18 { + regulator-name = "pm8150_l18"; + qcom,set = ; + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <912000>; + qcom,init-voltage = <880000>; + }; + }; + rpmh-regulator-smpc1 { compatible = "qcom,rpmh-vrm-regulator"; mboxes = <&apps_rsc 0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi index d50dd42bdbc4c24e12d3319ba902eebaf014e09b..b863f2de64df147156fc6715f3e85a483b0d058c 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde-display.dtsi @@ -28,6 +28,8 @@ #include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi" #include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi" #include "dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi" +#include "dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi" +#include "dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi" #include &tlmm { @@ -321,6 +323,28 @@ qcom,dsi-panel = <&dsi_nt35695b_truly_fhd_video>; }; + dsi_sw43404_amoled_video_display: qcom,dsi-display@17 { + label = "dsi_sw43404_amoled_video_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sw43404_amoled_video>; + }; + + dsi_sw43404_amoled_fhd_plus_cmd_display: qcom,dsi-display@18 { + label = "dsi_sw43404_amoled_fhd_plus_cmd_display"; + qcom,display-type = "primary"; + + qcom,dsi-ctrl-num = <0>; + qcom,dsi-phy-num = <0>; + qcom,dsi-select-clocks = "src_byte_clk0", "src_pixel_clk0"; + + qcom,dsi-panel = <&dsi_sw43404_amoled_fhd_plus_cmd>; + }; + sde_dsi: qcom,dsi-display { compatible = "qcom,dsi-display"; @@ -362,7 +386,9 @@ &dsi_dual_sim_dsc_375_cmd_display &dsi_sw43404_amoled_cmd_display &dsi_nt35695b_truly_fhd_cmd_display - &dsi_nt35695b_truly_fhd_video_display>; + &dsi_nt35695b_truly_fhd_video_display + &dsi_sw43404_amoled_video_display + &dsi_sw43404_amoled_fhd_plus_cmd_display>; }; sde_wb: qcom,wb-display@0 { @@ -395,11 +421,15 @@ }; &mdss_mdp { - connectors = <&sde_wb &sde_dp &sde_dsi>; + connectors = <&sde_rscc &sde_wb &sde_dp &sde_dsi>; }; /* PHY TIMINGS REVISION P */ &dsi_dual_nt35597_truly_video { + qcom,mdss-dsi-min-refresh-rate = <53>; + qcom,mdss-dsi-max-refresh-rate = <60>; + qcom,mdss-dsi-pan-enable-dynamic-fps; + qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp"; qcom,esd-check-enabled; qcom,mdss-dsi-panel-status-check-mode = "reg_read"; qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; @@ -438,6 +468,13 @@ }; &dsi_nt35597_truly_dsc_cmd { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x9c>; + qcom,mdss-dsi-panel-on-check-value = <0x9c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 @@ -451,6 +488,13 @@ }; &dsi_nt35597_truly_dsc_video { + qcom,esd-check-enabled; + qcom,mdss-dsi-panel-status-check-mode = "reg_read"; + qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a]; + qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-panel-status-value = <0x9c>; + qcom,mdss-dsi-panel-on-check-value = <0x9c>; + qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 @@ -467,9 +511,9 @@ qcom,esd-check-enabled; qcom,mdss-dsi-panel-status-check-mode = "reg_read"; qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0c]; - qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; - qcom,mdss-dsi-panel-status-value = <0x7>; - qcom,mdss-dsi-panel-on-check-value = <0x7>; + qcom,mdss-dsi-panel-status-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-panel-status-value = <0x77>; + qcom,mdss-dsi-panel-on-check-value = <0x77>; qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ @@ -486,8 +530,8 @@ qcom,mdss-dsi-panel-status-check-mode = "reg_read"; qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0c]; qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode"; - qcom,mdss-dsi-panel-status-value = <0x7>; - qcom,mdss-dsi-panel-on-check-value = <0x7>; + qcom,mdss-dsi-panel-status-value = <0x77>; + qcom,mdss-dsi-panel-on-check-value = <0x77>; qcom,mdss-dsi-panel-status-read-length = <1>; qcom,mdss-dsi-display-timings { timing@0{ @@ -663,11 +707,34 @@ }; &dsi_sw43404_amoled_cmd { + qcom,mdss-dsi-display-timings { + timing@0 { + qcom,mdss-dsi-panel-phy-timings = [00 16 05 05 20 1f 06 + 06 03 03 04 00 13 15]; + qcom,display-topology = <2 2 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sw43404_amoled_fhd_plus_cmd { + qcom,mdss-dsi-display-timings { + timing@0 { + qcom,mdss-dsi-panel-phy-timings = [00 12 04 04 1e 1e 04 + 05 02 03 04 00 11 14]; + qcom,display-topology = <2 2 1>; + qcom,default-topology-index = <0>; + }; + }; +}; + +&dsi_sw43404_amoled_video { qcom,mdss-dsi-display-timings { timing@0 { qcom,mdss-dsi-panel-phy-timings = [00 1a 07 06 22 21 07 07 04 03 04 00 16 16]; - qcom,display-topology = <2 1 1>; + /*qcom,mdss-dsi-panel-clockrate = <700000000>;*/ + qcom,display-topology = <2 2 1>; qcom,default-topology-index = <0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi index d6d0388f2258e3ded0bf801289dc3cb51813465c..ea5861d6196319e33d6ed2ab2d73c0d803d18755 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde-pll.dtsi @@ -79,8 +79,6 @@ reg-names = "pll_base", "phy_base", "ln_tx0_base", "ln_tx1_base", "gdsc_base"; - gdsc-supply = <&mdss_core_gdsc>; - clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, <&clock_rpmh RPMH_CXO_CLK>, <&clock_gcc GCC_DISP_AHB_CLK>, @@ -89,21 +87,6 @@ clock-names = "iface_clk", "ref_clk_src", "gcc_iface", "ref_clk", "pipe_clk"; clock-rate = <0>; - - qcom,platform-supply-entries { - #address-cells = <1>; - #size-cells = <0>; - - qcom,platform-supply-entry@0 { - reg = <0>; - qcom,supply-name = "gdsc"; - qcom,supply-min-voltage = <0>; - qcom,supply-max-voltage = <0>; - qcom,supply-enable-load = <0>; - qcom,supply-disable-load = <0>; - }; - - }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi index 78780b926d67c5fd86751276b941b79a377d76e2..8626a0517ef10858573cc230bb26b118c7b8eadb 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sde.dtsi @@ -24,17 +24,20 @@ clocks = <&clock_gcc GCC_DISP_AHB_CLK>, <&clock_gcc GCC_DISP_HF_AXI_CLK>, + <&clock_gcc GCC_DISP_SF_AXI_CLK>, <&clock_dispcc DISP_CC_MDSS_AHB_CLK>, <&clock_dispcc DISP_CC_MDSS_MDP_CLK>, <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>, - <&clock_dispcc DISP_CC_MDSS_MDP_LUT_CLK>; - clock-names = "gcc_iface", "gcc_bus", + <&clock_dispcc DISP_CC_MDSS_MDP_LUT_CLK>, + <&clock_dispcc DISP_CC_MDSS_ROT_CLK>; + clock-names = "gcc_iface", "gcc_bus", "gcc_nrt_bus", "iface_clk", "core_clk", "vsync_clk", - "lut_clk"; - clock-rate = <0 0 0 300000000 19200000 300000000>; - clock-max-rate = <0 0 0 460000000 19200000 460000000>; + "lut_clk", "rot_clk"; + clock-rate = <0 0 0 0 300000000 19200000 300000000>; + clock-max-rate = <0 0 0 0 460000000 19200000 460000000>; sde-vdd-supply = <&mdss_core_gdsc>; + mmcx-supply = <&VDD_MMCX_LEVEL>; /* interrupt config */ interrupts = <0 83 0>; @@ -193,7 +196,7 @@ qcom,sde-cdp-setting = <1 1>, <1 0>; qcom,sde-qos-cpu-mask = <0x3>; - qcom,sde-qos-cpu-dma-latency = <300>; + qcom,sde-qos-cpu-dma-latency = <44>; /* offsets are relative to "mdp_phys + qcom,sde-off */ @@ -246,7 +249,7 @@ qcom,platform-supply-entry@0 { reg = <0>; - qcom,supply-name = "sde-vdd"; + qcom,supply-name = "mmcx"; qcom,supply-min-voltage = <0>; qcom,supply-max-voltage = <0>; qcom,supply-enable-load = <0>; @@ -261,33 +264,13 @@ /* data and reg bus scale settings */ qcom,sde-data-bus { - qcom,msm-bus,name = "mdss_sde_mnoc"; + qcom,msm-bus,name = "mdss_sde"; qcom,msm-bus,num-cases = <3>; qcom,msm-bus,num-paths = <2>; qcom,msm-bus,vectors-KBps = - <22 773 0 0>, <23 773 0 0>, - <22 773 0 6400000>, <23 773 0 6400000>, - <22 773 0 6400000>, <23 773 0 6400000>; - }; - - qcom,sde-llcc-bus { - qcom,msm-bus,name = "mdss_sde_llcc"; - qcom,msm-bus,num-cases = <3>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = - <132 770 0 0>, - <132 770 0 6400000>, - <132 770 0 6400000>; - }; - - qcom,sde-ebi-bus { - qcom,msm-bus,name = "mdss_sde_ebi"; - qcom,msm-bus,num-cases = <3>; - qcom,msm-bus,num-paths = <1>; - qcom,msm-bus,vectors-KBps = - <129 512 0 0>, - <129 512 0 6400000>, - <129 512 0 6400000>; + <22 512 0 0>, <23 512 0 0>, + <22 512 0 6400000>, <23 512 0 6400000>, + <22 512 0 6400000>, <23 512 0 6400000>; }; qcom,sde-reg-bus { @@ -309,7 +292,6 @@ <0xaf30000 0x3fd4>; reg-names = "drv", "wrapper"; qcom,sde-rsc-version = <2>; - status = "disabled"; vdd-supply = <&mdss_core_gdsc>; clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>, @@ -356,20 +338,6 @@ <20000 20512 0 6400000>, <20000 20512 0 6400000>; }; - - qcom,platform-supply-entries { - #address-cells = <1>; - #size-cells = <0>; - - qcom,platform-supply-entry@0 { - reg = <0>; - qcom,supply-name = "mmcx"; - qcom,supply-min-voltage = <0>; - qcom,supply-max-voltage = <0>; - qcom,supply-enable-load = <0>; - qcom,supply-disable-load = <0>; - }; - }; }; mdss_rotator: qcom,mdss_rotator@ae00000 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts index 4e590ea853d38e72c97ea2a2c4f20c5982a482d8..7280e572ceef5f5a55b3de98c0796dab91a078f5 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-2.5k-panel-overlay.dts @@ -26,5 +26,35 @@ / { model = "SDX50M 2.5k panel MTP"; compatible = "qcom,sm8150-mtp", "qcom,sm8150", "qcom,mtp"; - qcom,board-id = <0x01010008 0x1>; + qcom,board-id = <0x01010108 0x1>; +}; + +&dsi_sharp_4k_dsc_cmd_display { + /delete-property/ qcom,dsi-display-active; +}; + +&dsi_dual_nt35597_truly_video { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-mode-gpio = <&tlmm 7 0>; + qcom,platform-te-gpio = <&tlmm 8 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + +&dsi_dual_nt35597_truly_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-mode-gpio = <&tlmm 7 0>; + qcom,platform-te-gpio = <&tlmm 8 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + +&dsi_dual_nt35597_truly_cmd_display { + qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-overlay.dts index 84ae64fe701f202bd1b46d89a69b11b214852856..834f7c095d97a1489ad7c868c1d834583269c885 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m-mtp-overlay.dts @@ -26,5 +26,5 @@ / { model = "SDX50M MTP"; compatible = "qcom,sm8150-mtp", "qcom,sm8150", "qcom,mtp"; - qcom,board-id = <8 1>; + qcom,board-id = <0x01010008 0x1>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi index e715a75c3addd8e0f5ca5648c939e2ef4dcb640c..140ae927b8e3cee23a07c5f542e2b475cd07d697 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi @@ -47,7 +47,6 @@ esoc-0 = <&mdm3>; qcom,smmu-cfg = <0x1d>; qcom,addr-win = <0x0 0x20000000 0x0 0x3fffffff>; - mhi,fw-name = "sdx50m/sbl1.mbn"; }; &tlmm { diff --git a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi index 212304295be9a77c826a35fa128e4a5f7e09bf03..64d4be3cf4c070e352e65c40de6933edf8f25250 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-smp2p.dtsi @@ -64,6 +64,17 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_rdbg2_out: qcom,smp2p-rdbg2-out { + qcom,entry-name = "rdbg"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_rdbg2_in: qcom,smp2p-rdbg2-in { + qcom,entry-name = "rdbg"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; qcom,smp2p-dsps@1799000c { @@ -109,6 +120,17 @@ interrupt-controller; #interrupt-cells = <2>; }; + + smp2p_rdbg5_out: qcom,smp2p-rdbg5-out { + qcom,entry-name = "rdbg"; + #qcom,smem-state-cells = <1>; + }; + + smp2p_rdbg5_in: qcom,smp2p-rdbg5-in { + qcom,entry-name = "rdbg"; + interrupt-controller; + #interrupt-cells = <2>; + }; }; /* wlan - inbound entry from mss/WLAN PD */ diff --git a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi index 36261b8c6982c5c1bb47ef3563873c7dd025f9d0..05afdee85db2f631287ccd653149bb9c85d69d88 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi @@ -84,7 +84,6 @@ snps,disable-clk-gating; snps,has-lpm-erratum; snps,hird-threshold = /bits/ 8 <0x10>; - snps,usb3_lpm_capable; snps,ssp-u3-u0-quirk; snps,usb3-u1u2-disable; usb-core-id = <0>; diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2-qrd-dvt.dts b/arch/arm64/boot/dts/qcom/sm8150-v2-qrd-dvt.dts new file mode 100644 index 0000000000000000000000000000000000000000..297bc1c14358a94415327e00eb6f3dc7a4a6691e --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sm8150-v2-qrd-dvt.dts @@ -0,0 +1,22 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "sm8150-v2.dtsi" +#include "sm8150-qrd-dvt.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SM8150 V2 QRD DVT"; + compatible = "qcom,sm8150-qrd", "qcom,sm8150", "qcom,qrd"; + qcom,board-id = <0x01000B 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi index 52d5767c073fe921404c2e8ca6ba6751d0bdf762..269928567d90ab6c83734e85f4a136a095e07046 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150-v2.dtsi @@ -19,6 +19,10 @@ qcom,msm-id = <339 0x20000>; }; +/* Remove smmu nodes specific to SM8150 */ +/delete-node/ &apps_smmu; +/delete-node/ &kgsl_smmu; + &clock_gcc { compatible = "qcom,gcc-sm8150-v2", "syscon"; }; @@ -38,3 +42,22 @@ &clock_npucc { compatible = "qcom,npucc-sm8150-v2", "syscon"; }; +#include "msm-arm-smmu-sm8150-v2.dtsi" + +&msm_vidc { + qcom,allowed-clock-rates = <240000000 338000000 + 365000000 444000000 533000000>; + + non_secure_cb { + iommus = <&apps_smmu 0x2300 0x60>; + }; + secure_bitstream_cb { + iommus = <&apps_smmu 0x2301 0x4>; + }; + secure_pixel_cb { + iommus = <&apps_smmu 0x2303 0x20>; + }; + secure_non_pixel_cb { + iommus = <&apps_smmu 0x2304 0x60>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index c44d107083e5c2781d6bddd7dd1dbc9526241473..1498634536e5688fbf1409e29ad3133f50faf5de 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -770,6 +770,7 @@ #include "sm8150-sde-pll.dtsi" #include "sm8150-sde.dtsi" +#include "msm-rdbg.dtsi" #include "sm8150-camera.dtsi" @@ -1756,6 +1757,8 @@ qcom,pet-time = <9360>; qcom,ipi-ping; qcom,wakeup-enable; + qcom,scandump-sizes = <0x10100 0x10100 0x10100 0x10100 + 0x18100 0x18100 0x18100 0x18100>; }; qcom,npu@0x9800000 { @@ -2102,6 +2105,33 @@ }; }; + qcom,memshare { + compatible = "qcom,memshare"; + + qcom,client_1 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <0>; + qcom,allocate-boot-time; + label = "modem"; + }; + + qcom,client_2 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x0>; + qcom,client-id = <2>; + label = "modem"; + }; + + mem_client_3_size: qcom,client_3 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x500000>; + qcom,client-id = <1>; + qcom,allocate-boot-time; + label = "modem"; + }; + }; + qcom,sps { compatible = "qcom,msm_sps_4k"; qcom,pipe-attr-ee; @@ -2129,10 +2159,34 @@ reg = <0x17c0000c 0x4>; }; + ufs_ice: ufsice@1d90000 { + compatible = "qcom,ice"; + reg = <0x1d90000 0x8000>; + qcom,enable-ice-clk; + clock-names = "ufs_core_clk", "bus_clk", + "iface_clk", "ice_core_clk"; + clocks = <&clock_gcc GCC_UFS_PHY_AXI_CLK>, + <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>, + <&clock_gcc GCC_UFS_PHY_AHB_CLK>, + <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>; + qcom,op-freq-hz = <0>, <0>, <0>, <300000000>; + vdd-hba-supply = <&ufs_phy_gdsc>; + qcom,msm-bus,name = "ufs_ice_noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 650 0 0>, /* No vote */ + <1 650 1000 0>; /* Max. bandwidth */ + qcom,bus-vector-names = "MIN", + "MAX"; + qcom,instance-type = "ufs"; + }; + ufsphy_mem: ufsphy_mem@1d87000 { reg = <0x1d87000 0xda8>; /* PHY regs */ reg-names = "phy_mem"; #phy-cells = <0>; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; @@ -2152,6 +2206,7 @@ interrupts = <0 265 0>; phys = <&ufsphy_mem>; phy-names = "ufsphy"; + ufs-qcom-crypto = <&ufs_ice>; lanes-per-direction = <2>; dev-ref-clk-freq = <0>; /* 19.2 MHz */ @@ -2235,7 +2290,7 @@ /* PM QoS */ qcom,pm-qos-cpu-groups = <0x0f 0xf0>; - qcom,pm-qos-cpu-group-latency-us = <70 70>; + qcom,pm-qos-cpu-group-latency-us = <44 44>; qcom,pm-qos-default-cpu = <0>; pinctrl-names = "dev-reset-assert", "dev-reset-deassert"; @@ -2260,6 +2315,7 @@ qcom,msm_fastrpc { compatible = "qcom,msm-fastrpc-compute"; + qcom,fastrpc-adsp-audio-pdr; qcom,rpc-latency-us = <611>; qcom,msm_fastrpc_compute_cb1 { @@ -2433,9 +2489,9 @@ /* PM QoS */ qcom,pm-qos-irq-type = "affine_irq"; - qcom,pm-qos-irq-latency = <70 70>; + qcom,pm-qos-irq-latency = <44 44>; qcom,pm-qos-cpu-groups = <0x3f 0xc0>; - qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>; + qcom,pm-qos-legacy-latency-us = <44 44>, <44 44>; status = "disabled"; }; @@ -2643,6 +2699,23 @@ qcom,intents = <0x64 64>; }; + qcom,msm_cdsprm_rpmsg { + compatible = "qcom,msm-cdsprm-rpmsg"; + qcom,glink-channels = "cdsprmglink-apps-dsp"; + qcom,intents = <0x20 12>; + + qcom,cdsp-cdsp-l3-gov { + compatible = "qcom,cdsp-l3"; + qcom,target-dev = <&cdsp_cdsp_l3_lat>; + }; + + qcom,msm_cdsp_rm { + compatible = "qcom,msm-cdsp-rm"; + qcom,qos-latency-us = <44>; + qcom,qos-maxhold-ms = <20>; + }; + }; + qcom,cdsp_glink_ssr { qcom,glink-channels = "glink_ssr"; qcom,notify-edges = <&glink_modem>, @@ -2759,9 +2832,9 @@ qmp_npu0: qcom,qmp-npu-low@9818000 { compatible = "qcom,qmp-mbox"; - reg = <0x9818000 0x8000>, <0x17c00010 0x4>; + reg = <0x9818000 0x8000>, <0x9901008 0x4>; reg-names = "msgram", "irq-reg-base"; - qcom,irq-mask = <0x20>; + qcom,irq-mask = <0x12>; interrupts = ; label = "npu_qmp_low"; @@ -2770,11 +2843,11 @@ #mbox-cells = <1>; }; - qmp_npu1: qcom,qmp-npu-high@981a000 { + qmp_npu1: qcom,qmp-npu-high@9818000 { compatible = "qcom,qmp-mbox"; - reg = <0x9818000 0x8000>, <0x17c00010 0x4>; + reg = <0x9818000 0x8000>, <0x9901008 0x4>; reg-names = "msgram", "irq-reg-base"; - qcom,irq-mask = <0x40>; + qcom,irq-mask = <0x14>; interrupts = ; label = "npu_qmp_high"; @@ -2802,6 +2875,7 @@ compatible = "qcom,qseecom"; reg = <0x87900000 0x2200000>; reg-names = "secapp-region"; + memory-region = <&qseecom_mem>; qcom,hlos-num-ce-hw-instances = <1>; qcom,hlos-ce-hw-instance = <0>; qcom,qsee-ce-hw-instance = <0>; @@ -2814,6 +2888,12 @@ qcom,qsee-reentrancy-support = <2>; }; + qcom_smcinvoke: smcinvoke@87900000 { + compatible = "qcom,smcinvoke"; + reg = <0x87900000 0x2200000>; + reg-names = "secapp-region"; + }; + qcom_rng: qrng@793000 { compatible = "qcom,msm-rng"; reg = <0x793000 0x1000>; @@ -2826,7 +2906,7 @@ <1 618 0 0>, /* No vote */ <1 618 0 300000>; /* 75 MHz */ clocks = <&clock_gcc GCC_PRNG_AHB_CLK>; - clock-names = "iface"; + clock-names = "iface_clk"; }; qcom_cedev: qcedev@1de0000 { @@ -3705,6 +3785,7 @@ }; &gpu_cx_gdsc { + parent-supply = <&VDD_CX_LEVEL>; status = "ok"; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150p-v2.dtsi b/arch/arm64/boot/dts/qcom/sm8150p-v2.dtsi index 4351f650b1ea3de2a23b9fb115e8043d4295e615..d4d2cf37d7458e436a52b17732b570344f8ef57b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150p-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150p-v2.dtsi @@ -15,5 +15,5 @@ / { model = "Qualcomm Technologies, Inc. SM8150P v2"; qcom,msm-name = "SM8150P v2"; - qcom,msm-id = <356 0x20000>; + qcom,msm-id = <361 0x20000>; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150p.dtsi b/arch/arm64/boot/dts/qcom/sm8150p.dtsi index 87632a0933e0b14315aad6246f5efc0d5a5d4f7e..2ed1d50cd29612fd2e0fca600b6050957d7a39b7 100644 --- a/arch/arm64/boot/dts/qcom/sm8150p.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150p.dtsi @@ -15,5 +15,5 @@ / { model = "Qualcomm Technologies, Inc. SM8150P v1"; qcom,msm-name = "SM8150P v1"; - qcom,msm-id = <356 0x10000>; + qcom,msm-id = <361 0x10000>; }; diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..70b03901269ae4f71273fdf1738d8ebf980c8b9f --- /dev/null +++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi @@ -0,0 +1,58 @@ +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&qupv3_se4_i2c { + smb1355: qcom,smb1355@c { + compatible = "qcom,i2c-pmic"; + reg = <0xc>; + #address-cells = <1>; + #size-cells = <0>; + interrupt-parent = <&spmi_bus>; + interrupts = <0x2 0xC5 0x0 IRQ_TYPE_LEVEL_LOW>; + interrupt_names = "smb1355"; + interrupt-controller; + #interrupt-cells = <3>; + qcom,periph-map = <0x10 0x12 0x13 0x16>; + status = "disabled"; + + smb1355_revid: qcom,revid@100 { + compatible = "qcom,qpnp-revid"; + reg = <0x100 0x100>; + }; + + smb1355_charger: qcom,smb1355-charger@1000 { + compatible = "qcom,smb1355"; + qcom,pmic-revid = <&smb1355_revid>; + reg = <0x1000 0x700>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&smb1355>; + status = "disabled"; + + qcom,chgr@1000 { + reg = <0x1000 0x100>; + interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "chg-state-change"; + }; + + qcom,chgr-misc@1600 { + reg = <0x1600 0x100>; + interrupts = <0x16 0x1 IRQ_TYPE_EDGE_RISING>, + <0x16 0x6 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog-bark", + "temperature-change"; + }; + }; + }; +}; diff --git a/arch/arm64/configs/qcs405_defconfig b/arch/arm64/configs/qcs405_defconfig deleted file mode 100644 index 90325456690544158ea9ff393f2114355ee4d918..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/qcs405_defconfig +++ /dev/null @@ -1,488 +0,0 @@ -CONFIG_AUDIT=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_DEBUG=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PROFILING=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_QCS405=y -CONFIG_NR_CPUS=4 -CONFIG_PREEMPT=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_CMA_DEBUGFS=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM_DEBUG=y -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_BT=y -CONFIG_BT_RFCOMM=y -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=y -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_HIDP=y -CONFIG_CFG80211=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_MTD=y -CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_MSM_QPIC_NAND=y -CONFIG_MTD_NAND=y -CONFIG_MTD_UBI=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_UID_SYS_STATS=y -CONFIG_QPNP_MISC=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_KS8851=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_USB_NET_SMSC75XX=y -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_INPUT_EVDEV=y -CONFIG_INPUT_EVBUG=m -CONFIG_INPUT_KEYRESET=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_JOYSTICK_XPAD=y -CONFIG_INPUT_TABLET=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ATMEL_MXT=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_KEYCHORD=y -CONFIG_INPUT_UINPUT=y -CONFIG_INPUT_GPIO=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_SERIAL_MSM_HS=y -CONFIG_HW_RANDOM=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MSM_V2=y -CONFIG_SPI=y -CONFIG_SPI_DEBUG=y -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_SLIMBUS=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCS405=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_SMB1351_USB_CHARGER=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_FAN53555=y -CONFIG_REGULATOR_CPR=y -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_RPM_SMD=y -CONFIG_REGULATOR_SPM=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SOC_CAMERA=y -CONFIG_SOC_CAMERA_PLATFORM=y -CONFIG_FB=y -CONFIG_FB_MSM=y -CONFIG_FB_MSM_MDSS=y -CONFIG_FB_MSM_MDSS_WRITEBACK=y -CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y -CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_SOC=y -CONFIG_HIDRAW=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_USB_HIDDEV=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_ACM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_STORAGE_DATAFAB=y -CONFIG_USB_STORAGE_FREECOM=y -CONFIG_USB_STORAGE_ISD200=y -CONFIG_USB_STORAGE_USBAT=y -CONFIG_USB_STORAGE_SDDR09=y -CONFIG_USB_STORAGE_SDDR55=y -CONFIG_USB_STORAGE_JUMPSHOT=y -CONFIG_USB_STORAGE_ALAUDA=y -CONFIG_USB_STORAGE_KARMA=y -CONFIG_USB_STORAGE_CYPRESS_ATACB=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_SERIAL=y -CONFIG_USB_EHSET_TEST_FIXTURE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_DEBUG_FILES=y -CONFIG_USB_GADGET_DEBUG_FS=y -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=m -CONFIG_MMC_RING_BUFFER=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_SPS_DMA=y -CONFIG_UIO=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_QCOM_CLK_SMD_RPM=y -CONFIG_MDM_GCC_QCS405=y -CONFIG_MDM_DEBUGCC_QCS405=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_MAILBOX=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_RPM=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD_RPM=y -CONFIG_MSM_SPM=y -CONFIG_MSM_L2_SPM=y -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_DCC_V2=y -CONFIG_MSM_RPM_SMD=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_MSM_PM=y -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QTI_MPM=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y -CONFIG_PAGE_OWNER=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_OBJECTS=y -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_ATOMIC_SLEEP=y -CONFIG_DEBUG_LIST=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_FAULT_INJECTION_DEBUG_FS=y -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_IPC_LOGGING=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_LSM_MMAP_MIN_ADDR=4096 -CONFIG_HARDENED_USERCOPY=y -CONFIG_SECURITY_SELINUX=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm64/configs/qcs405_defconfig b/arch/arm64/configs/qcs405_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..372fd3e54d888c039678b077724b27f28e68fa29 --- /dev/null +++ b/arch/arm64/configs/qcs405_defconfig @@ -0,0 +1 @@ +vendor/qcs405_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sa8155-perf_defconfig b/arch/arm64/configs/sa8155-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..8fb0cc34e9acb07e55393536d8145670168b41ed --- /dev/null +++ b/arch/arm64/configs/sa8155-perf_defconfig @@ -0,0 +1 @@ +vendor/sa8155-perf_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sa8155_defconfig b/arch/arm64/configs/sa8155_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..005726151a08f9f0d914e8fd263611cb2ade3b59 --- /dev/null +++ b/arch/arm64/configs/sa8155_defconfig @@ -0,0 +1 @@ +vendor/sa8155_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sdmsteppe-perf_defconfig b/arch/arm64/configs/sdmsteppe-perf_defconfig deleted file mode 100644 index e4e007d601ba3ee03e043d290deb6c405f5d11c1..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sdmsteppe-perf_defconfig +++ /dev/null @@ -1,602 +0,0 @@ -CONFIG_LOCALVERSION="-perf" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_RCU_EXPERT=y -CONFIG_RCU_FAST_NO_HZ=y -CONFIG_RCU_NOCB_CPU=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y -CONFIG_SCHED_CORE_CTL=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_SCHED_AUTOGROUP=y -CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y -CONFIG_BLK_DEV_INITRD=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_BPF_SYSCALL=y -# CONFIG_MEMBARRIER is not set -CONFIG_EMBEDDED=y -# CONFIG_SLUB_DEBUG is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_SM6150=y -CONFIG_ARCH_SDMMAGPIE=y -CONFIG_PCI=y -CONFIG_PCI_MSM=y -CONFIG_SCHED_MC=y -CONFIG_NR_CPUS=8 -CONFIG_PREEMPT=y -CONFIG_HZ_100=y -CONFIG_CMA=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set -CONFIG_ARMV8_DEPRECATED=y -CONFIG_SWP_EMULATION=y -CONFIG_CP15_BARRIER_EMULATION=y -CONFIG_SETEND_EMULATION=y -# CONFIG_ARM64_VHE is not set -CONFIG_RANDOMIZE_BASE=y -# CONFIG_EFI is not set -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_BOOST=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_SOCKET_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -CONFIG_NET_SCH_INGRESS=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_SOCKEV_NLMCAST=y -CONFIG_BT=y -CONFIG_MSM_BT_POWER=y -CONFIG_CFG80211=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_NFC_NQ=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_QSEECOM=y -CONFIG_UID_SYS_STATS=y -CONFIG_MEMORY_STATE_TIME=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_UEVENT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_BONDING=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_SKY2=y -CONFIG_RMNET=y -CONFIG_SMSC911X=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_WIL6210=m -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_CLD_LL_CORE=y -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_UINPUT=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_VT is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM_GENI=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_DIAG_CHAR=y -CONFIG_MSM_ADSPRPC=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_QCOM_GENI=y -CONFIG_SPI=y -CONFIG_SPI_QCOM_GENI=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_SIMULATOR=y -CONFIG_PM8150_PMIC_SIMULATOR=y -CONFIG_PM8150B_PMIC_SIMULATOR=y -CONFIG_PM8150L_PMIC_SIMULATOR=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_PINCTRL_SDMMAGPIE=y -CONFIG_PINCTRL_SM6150=y -CONFIG_GPIO_SYSFS=y -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_FG_GEN4=y -CONFIG_QPNP_SMB5=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_THERMAL_LIMITS_DCVS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_AOP_REG_COOLING_DEVICE=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_QPNP_LCDB=y -CONFIG_REGULATOR_REFGEN=y -CONFIG_REGULATOR_RPMH=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_ADV_DEBUG=y -CONFIG_VIDEO_FIXED_MINOR_RANGES=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SPECTRA_CAMERA=y -CONFIG_MSM_VIDC_V4L2=y -CONFIG_MSM_VIDC_GOVERNORS=y -CONFIG_MSM_SDE_ROTATOR=y -CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y -CONFIG_MSM_NPU=y -CONFIG_DRM=y -CONFIG_DRM_MSM_REGISTER_LOGGING=y -CONFIG_DRM_SDE_EVTLOG_DEBUG=y -CONFIG_DRM_SDE_RSC=y -CONFIG_FB_ARMCLCD=y -CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y -CONFIG_SND_SOC=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_HID_PLANTRONICS=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_ISP1760=y -CONFIG_USB_ISP1760_HOST_ROLE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_HSUSB_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=900 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_MTP=y -CONFIG_USB_CONFIGFS_F_PTP=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y -CONFIG_USB_CONFIGFS_UEVENT=y -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_USB_CONFIGFS_F_CDEV=y -CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_GSI=y -CONFIG_USB_CONFIGFS_F_QDSS=y -CONFIG_USB_PD_POLICY=y -CONFIG_QPNP_USB_PDPHY=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_QPNP_FLASH_V2=y -CONFIG_LEDS_QPNP_HAPTICS=y -CONFIG_LEDS_QTI_TRI_LED=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_EDAC=y -CONFIG_EDAC_KRYO_ARM64=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_GPI_DMA=y -CONFIG_UIO=y -CONFIG_UIO_MSM_SHAREDMEM=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QCOM_GENI_SE=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_USB_BAM=y -CONFIG_IPA3=y -CONFIG_IPA_WDI_UNIFIED_API=y -CONFIG_RMNET_IPA3=y -CONFIG_RNDIS_IPA=y -CONFIG_IPA_UT=y -CONFIG_MSM_11AD=m -CONFIG_QCOM_MDSS_PLL=y -CONFIG_SPMI_PMIC_CLKDIV=y -CONFIG_MSM_CLK_AOP_QMP=y -CONFIG_MSM_GCC_SM8150=y -CONFIG_MSM_NPUCC_SM8150=y -CONFIG_MSM_VIDEOCC_SM8150=y -CONFIG_MSM_CAMCC_SM8150=y -CONFIG_CLOCK_CPU_OSM=y -CONFIG_MSM_DISPCC_SM8150=y -CONFIG_MSM_DEBUGCC_SM8150=y -CONFIG_MSM_CLK_RPMH=y -CONFIG_MSM_GPUCC_SM8150=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_MSM_QMP=y -CONFIG_IOMMU_IO_PGTABLE_FAST=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_GLINK_SPSS=y -CONFIG_QCOM_CPUSS_DUMP=y -CONFIG_QCOM_RUN_QUEUE_STATS=y -CONFIG_QCOM_LLCC=y -CONFIG_QCOM_SM6150_LLCC=y -CONFIG_QCOM_SDMMAGPIE_LLCC=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_MEMORY_DUMP_V2=y -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_QCOM_DCC_V2=y -CONFIG_QCOM_SECURE_BUFFER=y -CONFIG_ICNSS=y -CONFIG_ICNSS_QMI=y -CONFIG_QCOM_EUD=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_BUS_CONFIG_RPMH=y -CONFIG_QCOM_COMMAND_DB=y -CONFIG_QCOM_EARLY_RANDOM=y -CONFIG_QTI_RPMH_API=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_QTI_RPM_STATS_LOG=y -CONFIG_MSM_CDSP_LOADER=y -CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_PM=y -CONFIG_QCOM_FSA4480_I2C=y -CONFIG_MSM_PERFORMANCE=y -CONFIG_QMP_DEBUGFS_CLIENT=y -CONFIG_DEVFREQ_GOV_PASSIVE=y -CONFIG_QCOM_BIMC_BWMON=y -CONFIG_ARM_MEMLAT_MON=y -CONFIG_QCOMCCI_HWMON=y -CONFIG_QCOM_M4M_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y -CONFIG_DEVFREQ_GOV_MEMLAT=y -CONFIG_DEVFREQ_SIMPLE_DEV=y -CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y -CONFIG_ARM_GIC_V3_ACL=y -CONFIG_QCOM_LLCC_PMU=y -CONFIG_RAS=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_NVMEM_SPMI_SDAM=y -CONFIG_SENSORS_SSC=y -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -CONFIG_ESOC_MDM_DBG_ENG=y -CONFIG_MSM_TZ_LOG=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_SDCARD_FS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -CONFIG_IPC_LOGGING=y -CONFIG_DEBUG_ALIGN_RODATA=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SMACK=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y -CONFIG_CRYPTO_DEV_QCRYPTO=y -CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y diff --git a/arch/arm64/configs/sdmsteppe-perf_defconfig b/arch/arm64/configs/sdmsteppe-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..7b9298c42bcb0e921b81d92a268030a8c7d5a855 --- /dev/null +++ b/arch/arm64/configs/sdmsteppe-perf_defconfig @@ -0,0 +1 @@ +vendor/sdmsteppe-perf_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sdmsteppe_defconfig b/arch/arm64/configs/sdmsteppe_defconfig deleted file mode 100644 index 696fde9ce5c8ffcba9eb9d2b68fca84640bc16fe..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sdmsteppe_defconfig +++ /dev/null @@ -1,679 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_RCU_EXPERT=y -CONFIG_RCU_FAST_NO_HZ=y -CONFIG_RCU_NOCB_CPU=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_DEBUG=y -CONFIG_SCHED_CORE_CTL=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_SCHED_AUTOGROUP=y -CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y -CONFIG_BLK_DEV_INITRD=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_BPF_SYSCALL=y -# CONFIG_MEMBARRIER is not set -CONFIG_EMBEDDED=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_SM6150=y -CONFIG_ARCH_SDMMAGPIE=y -CONFIG_PCI=y -CONFIG_PCI_MSM=y -CONFIG_SCHED_MC=y -CONFIG_NR_CPUS=8 -CONFIG_PREEMPT=y -CONFIG_HZ_100=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set -CONFIG_ARMV8_DEPRECATED=y -CONFIG_SWP_EMULATION=y -CONFIG_CP15_BARRIER_EMULATION=y -CONFIG_SETEND_EMULATION=y -# CONFIG_ARM64_VHE is not set -CONFIG_RANDOMIZE_BASE=y -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM_DEBUG=y -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_BOOST=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_SOCKET_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -CONFIG_NET_SCH_INGRESS=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_DNS_RESOLVER=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_SOCKEV_NLMCAST=y -CONFIG_BT=y -CONFIG_MSM_BT_POWER=y -CONFIG_CFG80211=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y -CONFIG_CFG80211_INTERNAL_REGDB=y -# CONFIG_CFG80211_CRDA_SUPPORT is not set -CONFIG_RFKILL=y -CONFIG_NFC_NQ=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_QSEECOM=y -CONFIG_UID_SYS_STATS=y -CONFIG_MEMORY_STATE_TIME=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_UEVENT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_BONDING=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_RMNET=y -CONFIG_PHYLIB=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_WIL6210=m -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_CLD_LL_CORE=y -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_UINPUT=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_VT is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_MSM_GENI=y -CONFIG_SERIAL_MSM_GENI_CONSOLE=y -CONFIG_SERIAL_DEV_BUS=y -CONFIG_TTY_PRINTK=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_DIAG_CHAR=y -CONFIG_MSM_ADSPRPC=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_QCOM_GENI=y -CONFIG_SPI=y -CONFIG_SPI_QCOM_GENI=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_SIMULATOR=y -CONFIG_PM8150_PMIC_SIMULATOR=y -CONFIG_PM8150B_PMIC_SIMULATOR=y -CONFIG_PM8150L_PMIC_SIMULATOR=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_PINCTRL_SDMMAGPIE=y -CONFIG_PINCTRL_SM6150=y -CONFIG_GPIO_SYSFS=y -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_FG_GEN4=y -CONFIG_QPNP_SMB5=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_THERMAL_LIMITS_DCVS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_AOP_REG_COOLING_DEVICE=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_QPNP_LCDB=y -CONFIG_REGULATOR_REFGEN=y -CONFIG_REGULATOR_RPMH=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_ADV_DEBUG=y -CONFIG_VIDEO_FIXED_MINOR_RANGES=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SPECTRA_CAMERA=y -CONFIG_MSM_VIDC_V4L2=y -CONFIG_MSM_VIDC_GOVERNORS=y -CONFIG_MSM_SDE_ROTATOR=y -CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y -CONFIG_MSM_NPU=y -CONFIG_DRM=y -CONFIG_DRM_MSM_REGISTER_LOGGING=y -CONFIG_DRM_SDE_EVTLOG_DEBUG=y -CONFIG_DRM_SDE_RSC=y -CONFIG_FB_VIRTUAL=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y -CONFIG_SND_SOC=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_HID_PLANTRONICS=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_ISP1760=y -CONFIG_USB_ISP1760_HOST_ROLE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_HSUSB_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=900 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_MTP=y -CONFIG_USB_CONFIGFS_F_PTP=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y -CONFIG_USB_CONFIGFS_UEVENT=y -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_USB_CONFIGFS_F_CDEV=y -CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_GSI=y -CONFIG_USB_CONFIGFS_F_QDSS=y -CONFIG_USB_PD_POLICY=y -CONFIG_QPNP_USB_PDPHY=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=y -CONFIG_MMC_RING_BUFFER=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_QPNP_FLASH_V2=y -CONFIG_LEDS_QPNP_HAPTICS=y -CONFIG_LEDS_QTI_TRI_LED=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_EDAC=y -CONFIG_EDAC_KRYO_ARM64=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y -CONFIG_EDAC_QCOM_LLCC=y -CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y -CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_GPI_DMA=y -CONFIG_QCOM_GPI_DMA_DEBUG=y -CONFIG_UIO=y -CONFIG_UIO_MSM_SHAREDMEM=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QCOM_GENI_SE=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_USB_BAM=y -CONFIG_IPA3=y -CONFIG_IPA_WDI_UNIFIED_API=y -CONFIG_RMNET_IPA3=y -CONFIG_RNDIS_IPA=y -CONFIG_IPA_UT=y -CONFIG_MSM_11AD=m -CONFIG_QCOM_MDSS_PLL=y -CONFIG_SPMI_PMIC_CLKDIV=y -CONFIG_MSM_CLK_AOP_QMP=y -CONFIG_MSM_GCC_SM8150=y -CONFIG_MSM_NPUCC_SM8150=y -CONFIG_MSM_VIDEOCC_SM8150=y -CONFIG_MSM_CAMCC_SM8150=y -CONFIG_CLOCK_CPU_OSM=y -CONFIG_MSM_DISPCC_SM8150=y -CONFIG_MSM_DEBUGCC_SM8150=y -CONFIG_MSM_CLK_RPMH=y -CONFIG_MSM_GPUCC_SM8150=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_MSM_QMP=y -CONFIG_IOMMU_IO_PGTABLE_FAST=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_GLINK_SPSS=y -CONFIG_QCOM_CPUSS_DUMP=y -CONFIG_QCOM_RUN_QUEUE_STATS=y -CONFIG_QCOM_LLCC=y -CONFIG_QCOM_SM6150_LLCC=y -CONFIG_QCOM_SDMMAGPIE_LLCC=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_MEMORY_DUMP_V2=y -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_WDOG_IPI_ENABLE=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_DCC_V2=y -CONFIG_MSM_GLADIATOR_HANG_DETECT=y -CONFIG_QCOM_SECURE_BUFFER=y -CONFIG_ICNSS=y -CONFIG_ICNSS_DEBUG=y -CONFIG_ICNSS_QMI=y -CONFIG_QCOM_EUD=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_BUS_CONFIG_RPMH=y -CONFIG_QCOM_COMMAND_DB=y -CONFIG_QCOM_EARLY_RANDOM=y -CONFIG_QTI_RPMH_API=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_QTI_RPM_STATS_LOG=y -CONFIG_MSM_CDSP_LOADER=y -CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_PM=y -CONFIG_QCOM_FSA4480_I2C=y -CONFIG_MSM_PERFORMANCE=y -CONFIG_QMP_DEBUGFS_CLIENT=y -CONFIG_DEVFREQ_GOV_PASSIVE=y -CONFIG_QCOM_BIMC_BWMON=y -CONFIG_ARM_MEMLAT_MON=y -CONFIG_QCOMCCI_HWMON=y -CONFIG_QCOM_M4M_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y -CONFIG_DEVFREQ_GOV_MEMLAT=y -CONFIG_DEVFREQ_SIMPLE_DEV=y -CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y -CONFIG_ARM_GIC_V3_ACL=y -CONFIG_PHY_XGENE=y -CONFIG_QCOM_LLCC_PMU=y -CONFIG_RAS=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_NVMEM_SPMI_SDAM=y -CONFIG_SENSORS_SSC=y -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -CONFIG_ESOC_DEBUG=y -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -CONFIG_ESOC_MDM_DBG_ENG=y -CONFIG_MSM_TZ_LOG=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_EFIVAR_FS=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_SDCARD_FS=y -# CONFIG_NETWORK_FILESYSTEMS is not set -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y -CONFIG_PAGE_OWNER=y -CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y -CONFIG_DEBUG_SECTION_MISMATCH=y -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_PAGEALLOC=y -CONFIG_SLUB_DEBUG_PANIC_ON=y -CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y -CONFIG_PAGE_POISONING=y -CONFIG_DEBUG_OBJECTS=y -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_SLUB_DEBUG_ON=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_PANIC_ON_SCHED_BUG=y -CONFIG_PANIC_ON_RT_THROTTLING=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_PREEMPT is not set -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_ATOMIC_SLEEP=y -CONFIG_LOCK_TORTURE_TEST=m -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y -CONFIG_RCU_TORTURE_TEST=m -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_FAULT_INJECTION_DEBUG_FS=y -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_IPC_LOGGING=y -CONFIG_QCOM_RTB=y -CONFIG_QCOM_RTB_SEPARATE_CPUS=y -CONFIG_FUNCTION_TRACER=y -CONFIG_IRQSOFF_TRACER=y -CONFIG_PREEMPT_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_TEST_USER_COPY=m -CONFIG_MEMTEST=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_PID_IN_CONTEXTIDR=y -CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_TGU=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_PAGESPAN=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SMACK=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y -CONFIG_CRYPTO_DEV_QCRYPTO=y -CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/sdmsteppe_defconfig b/arch/arm64/configs/sdmsteppe_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..502031ffe286e948376e7ccfd0df61f2c806dcbb --- /dev/null +++ b/arch/arm64/configs/sdmsteppe_defconfig @@ -0,0 +1 @@ +vendor/sdmsteppe_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sm8150-perf_defconfig b/arch/arm64/configs/sm8150-perf_defconfig deleted file mode 100644 index 9742d70e107ce24267352c61b94b644dd503517e..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sm8150-perf_defconfig +++ /dev/null @@ -1,633 +0,0 @@ -CONFIG_LOCALVERSION="-perf" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_RCU_EXPERT=y -CONFIG_RCU_FAST_NO_HZ=y -CONFIG_RCU_NOCB_CPU=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y -CONFIG_SCHED_CORE_CTL=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_SCHED_AUTOGROUP=y -CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y -CONFIG_BLK_DEV_INITRD=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_BPF_SYSCALL=y -# CONFIG_MEMBARRIER is not set -CONFIG_EMBEDDED=y -# CONFIG_SLUB_DEBUG is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_REFCOUNT_FULL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_SM8150=y -CONFIG_PCI=y -CONFIG_PCI_MSM=y -CONFIG_SCHED_MC=y -CONFIG_NR_CPUS=8 -CONFIG_PREEMPT=y -CONFIG_HZ_100=y -CONFIG_CMA=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set -CONFIG_ARMV8_DEPRECATED=y -CONFIG_SWP_EMULATION=y -CONFIG_CP15_BARRIER_EMULATION=y -CONFIG_SETEND_EMULATION=y -# CONFIG_ARM64_VHE is not set -CONFIG_RANDOMIZE_BASE=y -# CONFIG_EFI is not set -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_BOOST=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_NET_IPVTI=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_VTI=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QTAGUID=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_SOCKET_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -CONFIG_NET_SCH_INGRESS=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_QRTR_MHI=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_SOCKEV_NLMCAST=y -CONFIG_BT=y -CONFIG_MSM_BT_POWER=y -CONFIG_CFG80211=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_RFKILL=y -CONFIG_NFC_NQ=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_MHI_BUS=y -CONFIG_MHI_QCOM=y -CONFIG_MHI_NETDEV=y -CONFIG_MHI_UCI=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_HDCP_QSEECOM=y -CONFIG_QSEECOM=y -CONFIG_UID_SYS_STATS=y -CONFIG_MEMORY_STATE_TIME=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_UEVENT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_BONDING=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_SKY2=y -CONFIG_RMNET=y -CONFIG_SMSC911X=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_USB_USBNET=y -CONFIG_WIL6210=m -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_CLD_LL_CORE=y -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ST=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_UINPUT=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_VT is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_DEVMEM is not set -CONFIG_SERIAL_MSM_GENI=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_DIAG_CHAR=y -CONFIG_MSM_FASTCVPD=y -CONFIG_MSM_ADSPRPC=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_QCOM_GENI=y -CONFIG_SPI=y -CONFIG_SPI_QCOM_GENI=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_SPMI_SIMULATOR=y -CONFIG_PM8150_PMIC_SIMULATOR=y -CONFIG_PM8150B_PMIC_SIMULATOR=y -CONFIG_PM8150L_PMIC_SIMULATOR=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_PINCTRL_SM8150=y -CONFIG_GPIO_SYSFS=y -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_FG_GEN4=y -CONFIG_SMB1355_SLAVE_CHARGER=y -CONFIG_QPNP_SMB5=y -CONFIG_SMB1390_CHARGE_PUMP=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_THERMAL_LIMITS_DCVS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_AOP_REG_COOLING_DEVICE=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y -CONFIG_QTI_ADC_TM=y -CONFIG_MFD_I2C_PMIC=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_PROXY_CONSUMER=y -CONFIG_REGULATOR_QPNP_LCDB=y -CONFIG_REGULATOR_REFGEN=y -CONFIG_REGULATOR_RPMH=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_ADV_DEBUG=y -CONFIG_VIDEO_FIXED_MINOR_RANGES=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SPECTRA_CAMERA=y -CONFIG_MSM_VIDC_V4L2=y -CONFIG_MSM_VIDC_GOVERNORS=y -CONFIG_MSM_SDE_ROTATOR=y -CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y -CONFIG_MSM_NPU=y -CONFIG_DRM=y -CONFIG_DRM_MSM_REGISTER_LOGGING=y -CONFIG_DRM_SDE_EVTLOG_DEBUG=y -CONFIG_DRM_SDE_RSC=y -CONFIG_FB_ARMCLCD=y -CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y -CONFIG_SND_SOC=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_HID_PLANTRONICS=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_ISP1760=y -CONFIG_USB_ISP1760_HOST_ROLE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_HSUSB_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=900 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_MTP=y -CONFIG_USB_CONFIGFS_F_PTP=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y -CONFIG_USB_CONFIGFS_UEVENT=y -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_USB_CONFIGFS_F_CDEV=y -CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_GSI=y -CONFIG_USB_CONFIGFS_F_QDSS=y -CONFIG_USB_PD_POLICY=y -CONFIG_QPNP_USB_PDPHY=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_QPNP_FLASH_V2=y -CONFIG_LEDS_QPNP_HAPTICS=y -CONFIG_LEDS_QTI_TRI_LED=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_EDAC=y -CONFIG_EDAC_KRYO_ARM64=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_GPI_DMA=y -CONFIG_UIO=y -CONFIG_UIO_MSM_SHAREDMEM=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QCOM_GENI_SE=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_USB_BAM=y -CONFIG_IPA3=y -CONFIG_IPA_WDI_UNIFIED_API=y -CONFIG_RMNET_IPA3=y -CONFIG_RNDIS_IPA=y -CONFIG_IPA3_MHI_PROXY=y -CONFIG_IPA_UT=y -CONFIG_MSM_11AD=m -CONFIG_SEEMP_CORE=y -CONFIG_QCOM_MDSS_PLL=y -CONFIG_SPMI_PMIC_CLKDIV=y -CONFIG_MSM_CLK_AOP_QMP=y -CONFIG_MSM_GCC_SM8150=y -CONFIG_MSM_NPUCC_SM8150=y -CONFIG_MSM_VIDEOCC_SM8150=y -CONFIG_MSM_CAMCC_SM8150=y -CONFIG_CLOCK_CPU_OSM=y -CONFIG_MSM_DISPCC_SM8150=y -CONFIG_MSM_DEBUGCC_SM8150=y -CONFIG_MSM_CLK_RPMH=y -CONFIG_MSM_GPUCC_SM8150=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_MSM_QMP=y -CONFIG_IOMMU_IO_PGTABLE_FAST=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_GLINK_SPSS=y -CONFIG_RPMSG_QCOM_GLINK_SPI=y -CONFIG_QCOM_CPUSS_DUMP=y -CONFIG_QCOM_RUN_QUEUE_STATS=y -CONFIG_QCOM_LLCC=y -CONFIG_QCOM_SM8150_LLCC=y -CONFIG_QCOM_LLCC_PERFMON=m -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_MEMORY_DUMP_V2=y -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_QCOM_DCC_V2=y -CONFIG_QCOM_SECURE_BUFFER=y -CONFIG_ICNSS=y -CONFIG_ICNSS_QMI=y -CONFIG_QCOM_EUD=y -CONFIG_QCOM_MINIDUMP=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_BUS_CONFIG_RPMH=y -CONFIG_QCOM_COMMAND_DB=y -CONFIG_QCOM_EARLY_RANDOM=y -CONFIG_MSM_SPSS_UTILS=y -CONFIG_MSM_SPCOM=y -CONFIG_QTI_RPMH_API=y -CONFIG_QSEE_IPC_IRQ_BRIDGE=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_QCOM_QDSS_BRIDGE=y -CONFIG_QTI_RPM_STATS_LOG=y -CONFIG_MSM_CDSP_LOADER=y -CONFIG_QCOM_SMCINVOKE=y -CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_PM=y -CONFIG_MSM_QBT1000=y -CONFIG_QCOM_FSA4480_I2C=y -CONFIG_MSM_PERFORMANCE=y -CONFIG_QMP_DEBUGFS_CLIENT=y -CONFIG_QCOM_SMP2P_SLEEPSTATE=y -CONFIG_DEVFREQ_GOV_PASSIVE=y -CONFIG_QCOM_BIMC_BWMON=y -CONFIG_ARM_MEMLAT_MON=y -CONFIG_QCOMCCI_HWMON=y -CONFIG_QCOM_M4M_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y -CONFIG_DEVFREQ_GOV_MEMLAT=y -CONFIG_DEVFREQ_SIMPLE_DEV=y -CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y -CONFIG_ARM_GIC_V3_ACL=y -CONFIG_ARM_DSU_PMU=y -CONFIG_QCOM_LLCC_PMU=y -CONFIG_RAS=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_NVMEM_SPMI_SDAM=y -CONFIG_SENSORS_SSC=y -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -CONFIG_ESOC_MDM_DBG_ENG=y -CONFIG_MSM_TZ_LOG=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_SDCARD_FS=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -CONFIG_IPC_LOGGING=y -CONFIG_DEBUG_ALIGN_RODATA=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_TGU=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SMACK=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y -CONFIG_CRYPTO_DEV_QCRYPTO=y -CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y diff --git a/arch/arm64/configs/sm8150-perf_defconfig b/arch/arm64/configs/sm8150-perf_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..faafc59228eed38d31213dcfe467f48b4b642fde --- /dev/null +++ b/arch/arm64/configs/sm8150-perf_defconfig @@ -0,0 +1 @@ +vendor/sm8150-perf_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/sm8150_defconfig b/arch/arm64/configs/sm8150_defconfig deleted file mode 100644 index 565aef44d40732520a4fd02fb3ac04d2d858b802..0000000000000000000000000000000000000000 --- a/arch/arm64/configs/sm8150_defconfig +++ /dev/null @@ -1,716 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_AUDIT=y -# CONFIG_AUDITSYSCALL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_RCU_EXPERT=y -CONFIG_RCU_FAST_NO_HZ=y -CONFIG_RCU_NOCB_CPU=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_BLK_CGROUP=y -CONFIG_DEBUG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_BPF=y -CONFIG_CGROUP_DEBUG=y -CONFIG_SCHED_CORE_CTL=y -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set -CONFIG_SCHED_AUTOGROUP=y -CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y -CONFIG_BLK_DEV_INITRD=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_KALLSYMS_ALL=y -CONFIG_BPF_SYSCALL=y -# CONFIG_MEMBARRIER is not set -CONFIG_EMBEDDED=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB_FREELIST_RANDOM=y -CONFIG_SLAB_FREELIST_HARDENED=y -CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_STRONG=y -CONFIG_REFCOUNT_FULL=y -CONFIG_PANIC_ON_REFCOUNT_ERROR=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SIG=y -CONFIG_MODULE_SIG_FORCE=y -CONFIG_MODULE_SIG_SHA512=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y -# CONFIG_IOSCHED_DEADLINE is not set -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_ARCH_QCOM=y -CONFIG_ARCH_SM8150=y -CONFIG_PCI=y -CONFIG_PCI_MSM=y -CONFIG_SCHED_MC=y -CONFIG_NR_CPUS=8 -CONFIG_PREEMPT=y -CONFIG_HZ_100=y -CONFIG_CLEANCACHE=y -CONFIG_CMA=y -CONFIG_CMA_DEBUGFS=y -CONFIG_ZSMALLOC=y -CONFIG_SECCOMP=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -# CONFIG_HARDEN_BRANCH_PREDICTOR is not set -CONFIG_PRINT_VMEMLAYOUT=y -CONFIG_ARMV8_DEPRECATED=y -CONFIG_SWP_EMULATION=y -CONFIG_CP15_BARRIER_EMULATION=y -CONFIG_SETEND_EMULATION=y -# CONFIG_ARM64_VHE is not set -CONFIG_RANDOMIZE_BASE=y -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM_DEBUG=y -CONFIG_CPU_IDLE=y -CONFIG_ARM_CPUIDLE=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_BOOST=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_NET_IPVTI=y -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_DIAG_DESTROY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_IPV6_VTI=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QTAGUID=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_SOCKET_IPV4=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_SOCKET_IPV6=y -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_RPFILTER=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_L2TP=y -CONFIG_L2TP_DEBUGFS=y -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_BRIDGE=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_HTB=y -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -CONFIG_NET_SCH_INGRESS=y -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=y -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_GACT=y -CONFIG_NET_ACT_MIRRED=y -CONFIG_NET_ACT_SKBEDIT=y -CONFIG_DNS_RESOLVER=y -CONFIG_QRTR=y -CONFIG_QRTR_SMD=y -CONFIG_QRTR_MHI=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_SOCKEV_NLMCAST=y -CONFIG_BT=y -CONFIG_MSM_BT_POWER=y -CONFIG_CFG80211=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y -CONFIG_CFG80211_INTERNAL_REGDB=y -# CONFIG_CFG80211_CRDA_SUPPORT is not set -CONFIG_RFKILL=y -CONFIG_NFC_NQ=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_CMA=y -CONFIG_MHI_BUS=y -CONFIG_MHI_DEBUG=y -CONFIG_MHI_QCOM=y -CONFIG_MHI_NETDEV=y -CONFIG_MHI_UCI=y -CONFIG_ZRAM=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_HDCP_QSEECOM=y -CONFIG_QSEECOM=y -CONFIG_UID_SYS_STATS=y -CONFIG_MEMORY_STATE_TIME=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_UFSHCD=y -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFSHCD_CMD_LOGGING=y -CONFIG_MD=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_UEVENT=y -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -CONFIG_NETDEVICES=y -CONFIG_BONDING=y -CONFIG_DUMMY=y -CONFIG_TUN=y -CONFIG_RMNET=y -CONFIG_PHYLIB=y -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -CONFIG_WIL6210=m -CONFIG_WCNSS_MEM_PRE_ALLOC=y -CONFIG_CLD_LL_CORE=y -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_ST=y -CONFIG_INPUT_MISC=y -CONFIG_INPUT_HBTP_INPUT=y -CONFIG_INPUT_QPNP_POWER_ON=y -CONFIG_INPUT_UINPUT=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_VT is not set -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_MSM_GENI=y -CONFIG_SERIAL_MSM_GENI_CONSOLE=y -CONFIG_SERIAL_DEV_BUS=y -CONFIG_TTY_PRINTK=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_DIAG_CHAR=y -CONFIG_MSM_FASTCVPD=y -CONFIG_MSM_ADSPRPC=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_QCOM_GENI=y -CONFIG_SPI=y -CONFIG_SPI_QCOM_GENI=y -CONFIG_SPI_SPIDEV=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y -CONFIG_SPMI_SIMULATOR=y -CONFIG_PM8150_PMIC_SIMULATOR=y -CONFIG_PM8150B_PMIC_SIMULATOR=y -CONFIG_PM8150L_PMIC_SIMULATOR=y -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -CONFIG_PINCTRL_SM8150=y -CONFIG_GPIO_SYSFS=y -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_QPNP_FG_GEN4=y -CONFIG_SMB1355_SLAVE_CHARGER=y -CONFIG_QPNP_SMB5=y -CONFIG_SMB1390_CHARGE_PUMP=y -CONFIG_THERMAL=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_GOV_LOW_LIMITS=y -CONFIG_CPU_THERMAL=y -CONFIG_DEVFREQ_THERMAL=y -CONFIG_QCOM_SPMI_TEMP_ALARM=y -CONFIG_THERMAL_TSENS=y -CONFIG_QTI_THERMAL_LIMITS_DCVS=y -CONFIG_QTI_VIRTUAL_SENSOR=y -CONFIG_QTI_AOP_REG_COOLING_DEVICE=y -CONFIG_QTI_QMI_COOLING_DEVICE=y -CONFIG_REGULATOR_COOLING_DEVICE=y -CONFIG_QTI_BCL_PMIC5=y -CONFIG_QTI_BCL_SOC_DRIVER=y -CONFIG_QTI_ADC_TM=y -CONFIG_MFD_I2C_PMIC=y -CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_PROXY_CONSUMER=y -CONFIG_REGULATOR_QPNP_LCDB=y -CONFIG_REGULATOR_REFGEN=y -CONFIG_REGULATOR_RPMH=y -CONFIG_REGULATOR_STUB=y -CONFIG_MEDIA_SUPPORT=y -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_ADV_DEBUG=y -CONFIG_VIDEO_FIXED_MINOR_RANGES=y -CONFIG_V4L_PLATFORM_DRIVERS=y -CONFIG_SPECTRA_CAMERA=y -CONFIG_MSM_VIDC_V4L2=y -CONFIG_MSM_VIDC_GOVERNORS=y -CONFIG_MSM_SDE_ROTATOR=y -CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y -CONFIG_MSM_NPU=y -CONFIG_DRM=y -CONFIG_DRM_MSM_REGISTER_LOGGING=y -CONFIG_DRM_SDE_EVTLOG_DEBUG=y -CONFIG_DRM_SDE_RSC=y -CONFIG_FB_VIRTUAL=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_USB_AUDIO=y -CONFIG_SND_USB_AUDIO_QMI=y -CONFIG_SND_SOC=y -CONFIG_UHID=y -CONFIG_HID_APPLE=y -CONFIG_HID_ELECOM=y -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MULTITOUCH=y -CONFIG_HID_PLANTRONICS=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_MSM=y -CONFIG_USB_ISP1760=y -CONFIG_USB_ISP1760_HOST_ROLE=y -CONFIG_NOP_USB_XCEIV=y -CONFIG_USB_QCOM_EMU_PHY=y -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_HSUSB_PHY=y -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -CONFIG_USB_GADGET_VBUS_DRAW=900 -CONFIG_USB_CONFIGFS=y -CONFIG_USB_CONFIGFS_NCM=y -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_MTP=y -CONFIG_USB_CONFIGFS_F_PTP=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y -CONFIG_USB_CONFIGFS_UEVENT=y -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_USB_CONFIGFS_F_CDEV=y -CONFIG_USB_CONFIGFS_F_CCID=y -CONFIG_USB_CONFIGFS_F_GSI=y -CONFIG_USB_CONFIGFS_F_QDSS=y -CONFIG_USB_PD_POLICY=y -CONFIG_QPNP_USB_PDPHY=y -CONFIG_MMC=y -CONFIG_MMC_PERF_PROFILING=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -CONFIG_MMC_TEST=y -CONFIG_MMC_RING_BUFFER=y -CONFIG_MMC_PARANOID_SD_INIT=y -CONFIG_MMC_CLKGATE=y -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PLTFM=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_QPNP_FLASH_V2=y -CONFIG_LEDS_QPNP_HAPTICS=y -CONFIG_LEDS_QTI_TRI_LED=y -CONFIG_LEDS_TRIGGER_TIMER=y -CONFIG_EDAC=y -CONFIG_EDAC_KRYO_ARM64=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y -CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y -CONFIG_EDAC_QCOM_LLCC=y -CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y -CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_QPNP=y -CONFIG_DMADEVICES=y -CONFIG_QCOM_GPI_DMA=y -CONFIG_QCOM_GPI_DMA_DEBUG=y -CONFIG_UIO=y -CONFIG_UIO_MSM_SHAREDMEM=y -CONFIG_STAGING=y -CONFIG_ASHMEM=y -CONFIG_ION=y -CONFIG_QCOM_GENI_SE=y -CONFIG_QPNP_REVID=y -CONFIG_SPS=y -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_USB_BAM=y -CONFIG_IPA3=y -CONFIG_IPA_WDI_UNIFIED_API=y -CONFIG_RMNET_IPA3=y -CONFIG_RNDIS_IPA=y -CONFIG_IPA3_MHI_PROXY=y -CONFIG_IPA_UT=y -CONFIG_MSM_11AD=m -CONFIG_SEEMP_CORE=y -CONFIG_QCOM_MDSS_PLL=y -CONFIG_SPMI_PMIC_CLKDIV=y -CONFIG_MSM_CLK_AOP_QMP=y -CONFIG_MSM_GCC_SM8150=y -CONFIG_MSM_NPUCC_SM8150=y -CONFIG_MSM_VIDEOCC_SM8150=y -CONFIG_MSM_CAMCC_SM8150=y -CONFIG_CLOCK_CPU_OSM=y -CONFIG_MSM_DISPCC_SM8150=y -CONFIG_MSM_DEBUGCC_SM8150=y -CONFIG_MSM_CLK_RPMH=y -CONFIG_MSM_GPUCC_SM8150=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_QCOM_APCS_IPC=y -CONFIG_MSM_QMP=y -CONFIG_IOMMU_IO_PGTABLE_FAST=y -CONFIG_ARM_SMMU=y -CONFIG_QCOM_LAZY_MAPPING=y -CONFIG_IOMMU_DEBUG=y -CONFIG_IOMMU_DEBUG_TRACKING=y -CONFIG_IOMMU_TESTS=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_GLINK_SPSS=y -CONFIG_RPMSG_QCOM_GLINK_SPI=y -CONFIG_QCOM_CPUSS_DUMP=y -CONFIG_QCOM_RUN_QUEUE_STATS=y -CONFIG_QCOM_LLCC=y -CONFIG_QCOM_SM8150_LLCC=y -CONFIG_QCOM_LLCC_PERFMON=m -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_MEMORY_DUMP_V2=y -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_QCOM_WDOG_IPI_ENABLE=y -CONFIG_QCOM_SMP2P=y -CONFIG_MSM_SERVICE_LOCATOR=y -CONFIG_MSM_SERVICE_NOTIFIER=y -CONFIG_MSM_SUBSYSTEM_RESTART=y -CONFIG_MSM_PIL=y -CONFIG_MSM_SYSMON_QMI_COMM=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_BOOT_STATS=y -CONFIG_MSM_CORE_HANG_DETECT=y -CONFIG_QCOM_DCC_V2=y -CONFIG_MSM_GLADIATOR_HANG_DETECT=y -CONFIG_QCOM_SECURE_BUFFER=y -CONFIG_ICNSS=y -CONFIG_ICNSS_DEBUG=y -CONFIG_ICNSS_QMI=y -CONFIG_QCOM_EUD=y -CONFIG_QCOM_MINIDUMP=y -CONFIG_QCOM_BUS_SCALING=y -CONFIG_QCOM_BUS_CONFIG_RPMH=y -CONFIG_QCOM_COMMAND_DB=y -CONFIG_QCOM_EARLY_RANDOM=y -CONFIG_MSM_SPSS_UTILS=y -CONFIG_MSM_SPCOM=y -CONFIG_QTI_RPMH_API=y -CONFIG_QSEE_IPC_IRQ_BRIDGE=y -CONFIG_QCOM_GLINK=y -CONFIG_QCOM_GLINK_PKT=y -CONFIG_QCOM_QDSS_BRIDGE=y -CONFIG_QTI_RPM_STATS_LOG=y -CONFIG_MSM_CDSP_LOADER=y -CONFIG_QCOM_SMCINVOKE=y -CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_PM=y -CONFIG_MSM_QBT1000=y -CONFIG_QCOM_FSA4480_I2C=y -CONFIG_MSM_PERFORMANCE=y -CONFIG_QMP_DEBUGFS_CLIENT=y -CONFIG_QCOM_SMP2P_SLEEPSTATE=y -CONFIG_DEVFREQ_GOV_PASSIVE=y -CONFIG_QCOM_BIMC_BWMON=y -CONFIG_ARM_MEMLAT_MON=y -CONFIG_QCOMCCI_HWMON=y -CONFIG_QCOM_M4M_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y -CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y -CONFIG_DEVFREQ_GOV_MEMLAT=y -CONFIG_DEVFREQ_SIMPLE_DEV=y -CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_EXTCON_USB_GPIO=y -CONFIG_IIO=y -CONFIG_QCOM_SPMI_ADC5=y -CONFIG_PWM=y -CONFIG_PWM_QTI_LPG=y -CONFIG_QCOM_KGSL=y -CONFIG_ARM_GIC_V3_ACL=y -CONFIG_PHY_XGENE=y -CONFIG_ARM_DSU_PMU=y -CONFIG_QCOM_LLCC_PMU=y -CONFIG_RAS=y -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_NVMEM_SPMI_SDAM=y -CONFIG_SENSORS_SSC=y -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -CONFIG_ESOC_DEBUG=y -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -CONFIG_ESOC_MDM_DBG_ENG=y -CONFIG_MSM_TZ_LOG=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_FUSE_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_EFIVAR_FS=y -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_SDCARD_FS=y -# CONFIG_NETWORK_FILESYSTEMS is not set -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y -CONFIG_PAGE_OWNER=y -CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y -CONFIG_DEBUG_SECTION_MISMATCH=y -# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_PAGEALLOC=y -CONFIG_SLUB_DEBUG_PANIC_ON=y -CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y -CONFIG_PAGE_POISONING=y -CONFIG_DEBUG_OBJECTS=y -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_SLUB_DEBUG_ON=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_STACK_USAGE=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -CONFIG_PANIC_TIMEOUT=5 -CONFIG_PANIC_ON_SCHED_BUG=y -CONFIG_PANIC_ON_RT_THROTTLING=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_STACK_END_CHECK=y -# CONFIG_DEBUG_PREEMPT is not set -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_ATOMIC_SLEEP=y -CONFIG_LOCK_TORTURE_TEST=m -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -CONFIG_DEBUG_CREDENTIALS=y -CONFIG_RCU_TORTURE_TEST=m -CONFIG_FAULT_INJECTION=y -CONFIG_FAIL_PAGE_ALLOC=y -CONFIG_UFS_FAULT_INJECTION=y -CONFIG_FAULT_INJECTION_DEBUG_FS=y -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_IPC_LOGGING=y -CONFIG_QCOM_RTB=y -CONFIG_QCOM_RTB_SEPARATE_CPUS=y -CONFIG_FUNCTION_TRACER=y -CONFIG_PREEMPTIRQ_EVENTS=y -CONFIG_IRQSOFF_TRACER=y -CONFIG_PREEMPT_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_LKDTM=m -CONFIG_ATOMIC64_SELFTEST=m -CONFIG_TEST_USER_COPY=m -CONFIG_MEMTEST=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_PID_IN_CONTEXTIDR=y -CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_CTI=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_DUMMY=y -CONFIG_CORESIGHT_REMOTE_ETM=y -CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 -CONFIG_CORESIGHT_TGU=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_PAGESPAN=y -CONFIG_FORTIFY_SOURCE=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SMACK=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y -CONFIG_CRYPTO_DEV_QCRYPTO=y -CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/sm8150_defconfig b/arch/arm64/configs/sm8150_defconfig new file mode 120000 index 0000000000000000000000000000000000000000..4ff005a12e6427e0dfd4511ecd668db6017b9933 --- /dev/null +++ b/arch/arm64/configs/sm8150_defconfig @@ -0,0 +1 @@ +vendor/sm8150_defconfig \ No newline at end of file diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..4cea42e6f8af33f8d8db8c3cb9fd8a319068afab --- /dev/null +++ b/arch/arm64/configs/vendor/qcs405_defconfig @@ -0,0 +1,530 @@ +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEBUG=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_QCS405=y +CONFIG_NR_CPUS=4 +CONFIG_PREEMPT=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_SECCOMP=y +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_BT=y +CONFIG_BT_RFCOMM=y +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=y +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_RFKILL=y +CONFIG_NTAG_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y +CONFIG_MTD_NAND=y +CONFIG_MTD_UBI=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_UID_SYS_STATS=y +CONFIG_QPNP_MISC=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_KS8851=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y +CONFIG_USB_NET_SMSC75XX=y +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=m +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL_MXT=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_HW_RANDOM=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_SLIMBUS=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_QCS405=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_SMB1351_USB_CHARGER=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_FAN53555=y +CONFIG_REGULATOR_CPR=y +CONFIG_REGULATOR_MEM_ACC=y +CONFIG_REGULATOR_RPM_SMD=y +CONFIG_REGULATOR_SPM=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=y +CONFIG_SOC_CAMERA_PLATFORM=y +CONFIG_FB=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_SOC=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_ACM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_SERIAL=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_MSM_SNPS_FEMTO_PHY=y +CONFIG_USB_MSM_SSPHY=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_DEBUG_FILES=y +CONFIG_USB_GADGET_DEBUG_FS=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=m +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_MMC_CQ_HCI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_SPS_DMA=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_QCOM_CLK_SMD_RPM=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MDM_GCC_QCS405=y +CONFIG_MDM_DEBUGCC_QCS405=y +CONFIG_CLOCK_CPU_QCS405=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MAILBOX=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_RPM=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_MSM_RPM_SMD=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMD_RPM=y +CONFIG_MSM_SPM=y +CONFIG_MSM_L2_SPM=y +CONFIG_QCOM_SCM=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_MSM_TZ_SMMU=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_PM=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_QTI_MPM=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_EVENT=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_LSM_MMAP_MIN_ADDR=4096 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y diff --git a/arch/arm64/configs/sm8150-auto-perf_defconfig b/arch/arm64/configs/vendor/sa8155-perf_defconfig similarity index 99% rename from arch/arm64/configs/sm8150-auto-perf_defconfig rename to arch/arm64/configs/vendor/sa8155-perf_defconfig index 1e5d70ac5ccca268c2473b57f1242d31ed5474db..874a31783e7c1243687f6bd28d7ff890826c23fc 100644 --- a/arch/arm64/configs/sm8150-auto-perf_defconfig +++ b/arch/arm64/configs/vendor/sa8155-perf_defconfig @@ -64,6 +64,7 @@ CONFIG_PREEMPT=y CONFIG_HZ_100=y CONFIG_CMA=y CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y CONFIG_SECCOMP=y # CONFIG_UNMAP_KERNEL_AT_EL0 is not set # CONFIG_HARDEN_BRANCH_PREDICTOR is not set @@ -535,6 +536,7 @@ CONFIG_MSM_PM=y CONFIG_QCOM_FSA4480_I2C=y CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_QCOM_CDSP_RM=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -545,6 +547,7 @@ CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y diff --git a/arch/arm64/configs/sm8150-auto_defconfig b/arch/arm64/configs/vendor/sa8155_defconfig similarity index 99% rename from arch/arm64/configs/sm8150-auto_defconfig rename to arch/arm64/configs/vendor/sa8155_defconfig index 9077f3d381f7aaeaa7fa1bc75858e10f1531e0e8..eb0060ebee4b37bd2433f0bdab7773cfbfb3f91a 100644 --- a/arch/arm64/configs/sm8150-auto_defconfig +++ b/arch/arm64/configs/vendor/sa8155_defconfig @@ -70,6 +70,7 @@ CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_CMA_DEBUGFS=y CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y CONFIG_SECCOMP=y # CONFIG_UNMAP_KERNEL_AT_EL0 is not set # CONFIG_HARDEN_BRANCH_PREDICTOR is not set @@ -327,6 +328,7 @@ CONFIG_DIAG_CHAR=y CONFIG_MSM_FASTCVPD=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_QCOM_GENI=y CONFIG_SPI=y CONFIG_SPI_QCOM_GENI=y @@ -337,6 +339,7 @@ CONFIG_PM8150_PMIC_SIMULATOR=y CONFIG_PM8150B_PMIC_SIMULATOR=y CONFIG_PM8150L_PMIC_SIMULATOR=y CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_SX150X=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_PINCTRL_SM8150=y CONFIG_GPIO_SYSFS=y @@ -388,6 +391,7 @@ CONFIG_DRM=y CONFIG_DRM_MSM_REGISTER_LOGGING=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y CONFIG_DRM_SDE_RSC=y +CONFIG_DRM_ANALOGIX_ANX7625=y CONFIG_FB_VIRTUAL=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y @@ -560,6 +564,7 @@ CONFIG_MSM_PM=y CONFIG_QCOM_FSA4480_I2C=y CONFIG_MSM_PERFORMANCE=y CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_QCOM_CDSP_RM=y CONFIG_DEVFREQ_GOV_PASSIVE=y CONFIG_QCOM_BIMC_BWMON=y CONFIG_ARM_MEMLAT_MON=y @@ -570,6 +575,7 @@ CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y CONFIG_DEVFREQ_GOV_MEMLAT=y CONFIG_DEVFREQ_SIMPLE_DEV=y CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_QCOM_SPMI_ADC5=y diff --git a/arch/arm64/configs/sdmshrike-perf_defconfig b/arch/arm64/configs/vendor/sdmshrike-perf_defconfig similarity index 99% rename from arch/arm64/configs/sdmshrike-perf_defconfig rename to arch/arm64/configs/vendor/sdmshrike-perf_defconfig index 75a748c2e54b0e7cf87429f6b39921797d84ffdb..d616eaaee5f4bf0cd415642fb656003832323125 100644 --- a/arch/arm64/configs/sdmshrike-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike-perf_defconfig @@ -48,6 +48,7 @@ CONFIG_PREEMPT=y CONFIG_HZ_100=y CONFIG_CMA=y CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y CONFIG_SECCOMP=y # CONFIG_HARDEN_BRANCH_PREDICTOR is not set CONFIG_ARMV8_DEPRECATED=y @@ -429,6 +430,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y CONFIG_MAGIC_SYSRQ=y CONFIG_PANIC_TIMEOUT=5 CONFIG_SCHEDSTATS=y @@ -450,3 +452,4 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/sdmshrike_defconfig b/arch/arm64/configs/vendor/sdmshrike_defconfig similarity index 99% rename from arch/arm64/configs/sdmshrike_defconfig rename to arch/arm64/configs/vendor/sdmshrike_defconfig index 830f06dc204a5e72c7fa33a7b3c2f25159418b43..405f65c178c971a8dbadc97277db8460d18115df 100644 --- a/arch/arm64/configs/sdmshrike_defconfig +++ b/arch/arm64/configs/vendor/sdmshrike_defconfig @@ -51,6 +51,7 @@ CONFIG_HZ_100=y CONFIG_CLEANCACHE=y CONFIG_CMA=y CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y CONFIG_SECCOMP=y # CONFIG_HARDEN_BRANCH_PREDICTOR is not set CONFIG_ARMV8_DEPRECATED=y diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..fbcf85a0d73c8a0983f2dd5ee1e79a33fd14dec6 --- /dev/null +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -0,0 +1,610 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SM6150=y +CONFIG_ARCH_SDMMAGPIE=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CMA=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y +CONFIG_SECCOMP=y +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +# CONFIG_EFI is not set +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_SOCKEV_NLMCAST=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_SKY2=y +CONFIG_RMNET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_SIMULATOR=y +CONFIG_PM8150_PMIC_SIMULATOR=y +CONFIG_PM8150B_PMIC_SIMULATOR=y +CONFIG_PM8150L_PMIC_SIMULATOR=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SDMMAGPIE=y +CONFIG_PINCTRL_SM6150=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_FG_GEN4=y +CONFIG_QPNP_SMB5=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_MSM_NPU=y +CONFIG_DRM=y +CONFIG_DRM_MSM_REGISTER_LOGGING=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_ARMCLCD=y +CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_MSM_HSUSB_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_EDAC_KRYO_ARM64=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_QCOM_GENI_SE=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_MSM_11AD=m +CONFIG_QCOM_MDSS_PLL=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_MSM_GCC_SM8150=y +CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_VIDEOCC_SM8150=y +CONFIG_MSM_CAMCC_SM8150=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_DISPCC_SM8150=y +CONFIG_MSM_DEBUGCC_SM8150=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_MSM_GPUCC_SM8150=y +CONFIG_MSM_GCC_SM6150=y +CONFIG_MSM_GPUCC_SM6150=y +CONFIG_MSM_VIDEOCC_SM6150=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_GLINK_SPI=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SM6150_LLCC=y +CONFIG_QCOM_SDMMAGPIE_LLCC=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_ICNSS=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QTI_RPMH_API=y +CONFIG_QSEE_IPC_IRQ=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_QCOM_LLCC_PMU=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SENSORS_SSC=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_IPC_LOGGING=y +CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_EVENT=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..2f456e631dff918179f16e7f6ba3c98507c99709 --- /dev/null +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -0,0 +1,685 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SM6150=y +CONFIG_ARCH_SDMMAGPIE=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y +CONFIG_SECCOMP=y +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_SOCKEV_NLMCAST=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_RMNET=y +CONFIG_PHYLIB=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SERIAL_MSM_GENI_CONSOLE=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_TTY_PRINTK=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_SIMULATOR=y +CONFIG_PM8150_PMIC_SIMULATOR=y +CONFIG_PM8150B_PMIC_SIMULATOR=y +CONFIG_PM8150L_PMIC_SIMULATOR=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SDMMAGPIE=y +CONFIG_PINCTRL_SM6150=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_FG_GEN4=y +CONFIG_QPNP_SMB5=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_MSM_NPU=y +CONFIG_DRM=y +CONFIG_DRM_MSM_REGISTER_LOGGING=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_MSM_HSUSB_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_EDAC_KRYO_ARM64=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y +CONFIG_EDAC_QCOM_LLCC=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ION=y +CONFIG_QCOM_GENI_SE=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA_UT=y +CONFIG_MSM_11AD=m +CONFIG_QCOM_MDSS_PLL=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_MSM_GCC_SM8150=y +CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_VIDEOCC_SM8150=y +CONFIG_MSM_CAMCC_SM8150=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_DISPCC_SM8150=y +CONFIG_MSM_DEBUGCC_SM8150=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_MSM_GPUCC_SM8150=y +CONFIG_MSM_GCC_SM6150=y +CONFIG_MSM_GPUCC_SM6150=y +CONFIG_MSM_VIDEOCC_SM6150=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_GLINK_SPI=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SM6150_LLCC=y +CONFIG_QCOM_SDMMAGPIE_LLCC=y +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_MSM_GLADIATOR_HANG_DETECT=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_QTI_RPMH_API=y +CONFIG_QSEE_IPC_IRQ=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_PHY_XGENE=y +CONFIG_QCOM_LLCC_PMU=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SENSORS_SSC=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_DEBUG=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EFIVAR_FS=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_TEST_USER_COPY=m +CONFIG_MEMTEST=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_TGU=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/vendor/sm8150-perf_defconfig b/arch/arm64/configs/vendor/sm8150-perf_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..92c7fddf91fc9a9d422e8d6fce27a623a6f46193 --- /dev/null +++ b/arch/arm64/configs/vendor/sm8150-perf_defconfig @@ -0,0 +1,652 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_BLK_CGROUP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SM8150=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CMA=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y +CONFIG_SECCOMP=y +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +# CONFIG_EFI is not set +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_QRTR_MHI=y +CONFIG_SOCKEV_NLMCAST=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_MHI_BUS=y +CONFIG_MHI_QCOM=y +CONFIG_MHI_NETDEV=y +CONFIG_MHI_UCI=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_SKY2=y +CONFIG_RMNET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ST=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_QTI_HAPTICS=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_FASTCVPD=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_SPMI_SIMULATOR=y +CONFIG_PM8150_PMIC_SIMULATOR=y +CONFIG_PM8150B_PMIC_SIMULATOR=y +CONFIG_PM8150L_PMIC_SIMULATOR=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SM8150=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_FG_GEN4=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_QNOVO5=y +CONFIG_SMB1390_CHARGE_PUMP=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_ADC_TM=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_MSM_NPU=y +CONFIG_DRM=y +CONFIG_DRM_MSM_REGISTER_LOGGING=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_ARMCLCD=y +CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_REDRIVER_NB7VPQ904M=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_HSUSB_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_EDAC_KRYO_ARM64=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_QCOM_GENI_SE=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA3_MHI_PROXY=y +CONFIG_IPA_UT=y +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_MSM_GCC_SM8150=y +CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_VIDEOCC_SM8150=y +CONFIG_MSM_CAMCC_SM8150=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_DISPCC_SM8150=y +CONFIG_MSM_DEBUGCC_SM8150=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_MSM_GPUCC_SM8150=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_GLINK_SPSS=y +CONFIG_RPMSG_QCOM_GLINK_SPI=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SM8150_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_QMI_DFC=y +CONFIG_QCOM_QMI_POWER_COLLAPSE=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_ICNSS=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_MSM_SPSS_UTILS=y +CONFIG_MSM_SPCOM=y +CONFIG_QTI_RPMH_API=y +CONFIG_QSEE_IPC_IRQ_BRIDGE=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QCOM_QDSS_BRIDGE=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_MSM_QBT1000=y +CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_QCOM_SMP2P_SLEEPSTATE=y +CONFIG_QCOM_CDSP_RM=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_LLCC_PMU=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SENSORS_SSC=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=-1 +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_IPC_LOGGING=y +CONFIG_DEBUG_ALIGN_RODATA=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_TGU=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_STACK_HASH_ORDER_SHIFT=12 diff --git a/arch/arm64/configs/vendor/sm8150_defconfig b/arch/arm64/configs/vendor/sm8150_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..27341a2cf5edfe5c2073f22b71b1de3b56bd0d2a --- /dev/null +++ b/arch/arm64/configs/vendor/sm8150_defconfig @@ -0,0 +1,733 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_BLK_CGROUP=y +CONFIG_DEBUG_BLK_CGROUP=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_DEFAULT_USE_ENERGY_AWARE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_RANDOM=y +CONFIG_SLAB_FREELIST_HARDENED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y +CONFIG_PANIC_ON_REFCOUNT_ERROR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SM8150=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_HAVE_LOW_MEMORY_KILLER=y +CONFIG_SECCOMP=y +# CONFIG_UNMAP_KERNEL_AT_EL0 is not set +# CONFIG_HARDEN_BRANCH_PREDICTOR is not set +CONFIG_PRINT_VMEMLAYOUT=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_RANDOMIZE_BASE=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPVTI=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_DNS_RESOLVER=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y +CONFIG_QRTR_MHI=y +CONFIG_SOCKEV_NLMCAST=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_CERTIFICATION_ONUS=y +CONFIG_CFG80211_REG_CELLULAR_HINTS=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y +CONFIG_DMA_CMA=y +CONFIG_MHI_BUS=y +CONFIG_MHI_DEBUG=y +CONFIG_MHI_QCOM=y +CONFIG_MHI_NETDEV=y +CONFIG_MHI_UCI=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_HDCP_QSEECOM=y +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_RMNET=y +CONFIG_PHYLIB=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=y +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=y +CONFIG_PPPOL2TP=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ST=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_QTI_HAPTICS=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SERIAL_MSM_GENI_CONSOLE=y +CONFIG_SERIAL_DEV_BUS=y +CONFIG_TTY_PRINTK=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_DIAG_CHAR=y +CONFIG_MSM_FASTCVPD=y +CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SPI=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_SPMI_SIMULATOR=y +CONFIG_PM8150_PMIC_SIMULATOR=y +CONFIG_PM8150B_PMIC_SIMULATOR=y +CONFIG_PM8150L_PMIC_SIMULATOR=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_PINCTRL_SM8150=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_FG_GEN4=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB5=y +CONFIG_QPNP_QNOVO5=y +CONFIG_SMB1390_CHARGE_PUMP=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_TSENS=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_AOP_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y +CONFIG_REGULATOR_COOLING_DEVICE=y +CONFIG_QTI_BCL_PMIC5=y +CONFIG_QTI_BCL_SOC_DRIVER=y +CONFIG_QTI_ADC_TM=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_PROXY_CONSUMER=y +CONFIG_REGULATOR_QPNP_LCDB=y +CONFIG_REGULATOR_REFGEN=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_MSM_NPU=y +CONFIG_DRM=y +CONFIG_DRM_MSM_REGISTER_LOGGING=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_PLANTRONICS=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_REDRIVER_NB7VPQ904M=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_QCOM_EMU_PHY=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_HSUSB_PHY=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=900 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_UAC2=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_PARANOID_SD_INIT=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_QTI_TRI_LED=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_EDAC=y +CONFIG_EDAC_KRYO_ARM64=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y +CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y +CONFIG_EDAC_QCOM_LLCC=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_QCOM_GENI_SE=y +CONFIG_QPNP_REVID=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_USB_BAM=y +CONFIG_IPA3=y +CONFIG_IPA_WDI_UNIFIED_API=y +CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y +CONFIG_IPA3_MHI_PROXY=y +CONFIG_IPA_UT=y +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_SPMI_PMIC_CLKDIV=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_MSM_GCC_SM8150=y +CONFIG_MSM_NPUCC_SM8150=y +CONFIG_MSM_VIDEOCC_SM8150=y +CONFIG_MSM_CAMCC_SM8150=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_DISPCC_SM8150=y +CONFIG_MSM_DEBUGCC_SM8150=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_MSM_GPUCC_SM8150=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_QCOM_APCS_IPC=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_GLINK_SMEM=y +CONFIG_RPMSG_QCOM_GLINK_SPSS=y +CONFIG_RPMSG_QCOM_GLINK_SPI=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SM8150_LLCC=y +CONFIG_QCOM_LLCC_PERFMON=m +CONFIG_QCOM_QMI_HELPERS=y +CONFIG_QCOM_QMI_DFC=y +CONFIG_QCOM_QMI_POWER_COLLAPSE=y +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_WDOG_IPI_ENABLE=y +CONFIG_QCOM_SMP2P=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_SYSMON_QMI_COMM=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_QCOM_DCC_V2=y +CONFIG_MSM_GLADIATOR_HANG_DETECT=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_ICNSS_QMI=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_MINIDUMP=y +CONFIG_QCOM_BUS_SCALING=y +CONFIG_QCOM_BUS_CONFIG_RPMH=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_MSM_SPSS_UTILS=y +CONFIG_MSM_SPCOM=y +CONFIG_QTI_RPMH_API=y +CONFIG_QSEE_IPC_IRQ_BRIDGE=y +CONFIG_QCOM_GLINK=y +CONFIG_QCOM_GLINK_PKT=y +CONFIG_QCOM_QDSS_BRIDGE=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_QCOM_SMCINVOKE=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_MSM_QBT1000=y +CONFIG_QCOM_FSA4480_I2C=y +CONFIG_MEM_SHARE_QMI_SERVICE=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_QMP_DEBUGFS_CLIENT=y +CONFIG_QCOM_SMP2P_SLEEPSTATE=y +CONFIG_QCOM_CDSP_RM=y +CONFIG_DEVFREQ_GOV_PASSIVE=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_DEVFREQ_GOV_CDSPL3=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_SPMI_ADC5=y +CONFIG_PWM=y +CONFIG_PWM_QTI_LPG=y +CONFIG_QCOM_KGSL=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_PHY_XGENE=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_LLCC_PMU=y +CONFIG_RAS=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_NVMEM_SPMI_SDAM=y +CONFIG_SENSORS_SSC=y +CONFIG_ESOC=y +CONFIG_ESOC_DEV=y +CONFIG_ESOC_CLIENT=y +CONFIG_ESOC_DEBUG=y +CONFIG_ESOC_MDM_4x=y +CONFIG_ESOC_MDM_DRV=y +CONFIG_ESOC_MDM_DBG_ENG=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EFIVAR_FS=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_TIMEOUT=-1 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_PREEMPTIRQ_EVENTS=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_LKDTM=m +CONFIG_ATOMIC64_SELFTEST=m +CONFIG_TEST_USER_COPY=m +CONFIG_MEMTEST=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_TGU=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_XZ_DEC=y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index a5f476752f4e917edb75a4fe7becc799f899d51c..ac16e2efb348226ec6ff4053de868e9325c81d1e 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -44,7 +44,8 @@ #define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_HW_DBM 26 +#define ARM64_SSBD 27 -#define ARM64_NCAPS 27 +#define ARM64_NCAPS 28 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 428ee1f2468c55959d52cf1cffde922399387b9f..3128d4dc4a02549c31ea3e7fdf0e087ac37f7c28 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -23,6 +23,12 @@ #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) #define cpu_feature(x) ilog2(HWCAP_ ## x) +#define ARM64_SSBD_UNKNOWN -1 +#define ARM64_SSBD_FORCE_DISABLE 0 +#define ARM64_SSBD_KERNEL 1 +#define ARM64_SSBD_FORCE_ENABLE 2 +#define ARM64_SSBD_MITIGATED 3 + #ifndef __ASSEMBLY__ #include @@ -262,6 +268,22 @@ static inline bool system_uses_ttbr0_pan(void) !cpus_have_const_cap(ARM64_HAS_PAN); } +static inline int arm64_get_ssbd_state(void) +{ +#ifdef CONFIG_ARM64_SSBD + extern int ssbd_state; + return ssbd_state; +#else + return ARM64_SSBD_UNKNOWN; +#endif +} + +#ifdef CONFIG_ARM64_SSBD +void arm64_set_ssbd_mitigation(bool state); +#else +static inline void arm64_set_ssbd_mitigation(bool state) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 3483ed12d9a422c7f5c4b61a44366db6b4dadcd8..9c5ec3690774cfec3fd8121846717d32c5ec3549 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -75,6 +75,7 @@ #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 +#define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -100,6 +101,9 @@ #define QCOM_CPU_PART_FALKOR 0xC00 #define QCOM_CPU_PART_KRYO 0x200 +#define NVIDIA_CPU_PART_DENVER 0x003 +#define NVIDIA_CPU_PART_CARMEL 0x004 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) @@ -116,6 +120,8 @@ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) +#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) +#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index ae0ef18f68b90181a23b155b334d3485a9e8785f..e873dc6f40a45d72233e9685efd1acb3f4fbba2e 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -93,6 +93,7 @@ void arch_setup_new_exec(void); #define TIF_SINGLESTEP 21 #define TIF_32BIT 22 /* 32bit process */ #define TIF_MM_RELEASED 24 +#define TIF_SSBD 25 /* Wants SSB mitigation */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 255c8c36f487f2129d17ce7617996f60ea49a9e7..a15a07e94182a3978b4b00639791f472f45cc923 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -59,6 +59,7 @@ arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o ifeq ($(CONFIG_KVM),y) arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o endif +arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index b5a28336c07712af8d10aa62f1669b8a798065d8..e28b8bed64172470c4b7980329abeddef781c11f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -16,7 +16,9 @@ * along with this program. If not, see . */ +#include #include +#include #include #include #include @@ -148,8 +150,6 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, } #include -#include -#include static void call_smc_arch_workaround_1(void) { @@ -228,6 +228,142 @@ static int qcom_enable_link_stack_sanitization(void *data) } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ +#ifdef CONFIG_ARM64_SSBD +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; + +static const struct ssbd_options { + const char *str; + int state; +} ssbd_options[] = { + { "force-on", ARM64_SSBD_FORCE_ENABLE, }, + { "force-off", ARM64_SSBD_FORCE_DISABLE, }, + { "kernel", ARM64_SSBD_KERNEL, }, +}; + +static int __init ssbd_cfg(char *buf) +{ + int i; + + if (!buf || !buf[0]) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { + int len = strlen(ssbd_options[i].str); + + if (strncmp(buf, ssbd_options[i].str, len)) + continue; + + ssbd_state = ssbd_options[i].state; + return 0; + } + + return -EINVAL; +} +early_param("ssbd", ssbd_cfg); + +void arm64_set_ssbd_mitigation(bool state) +{ + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); + break; + + default: + WARN_ON_ONCE(1); + break; + } +} + +static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, + int scope) +{ + struct arm_smccc_res res; + bool required = true; + s32 val; + + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + switch (psci_ops.conduit) { + case PSCI_CONDUIT_HVC: + arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + case PSCI_CONDUIT_SMC: + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_2, &res); + break; + + default: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + } + + val = (s32)res.a0; + + switch (val) { + case SMCCC_RET_NOT_SUPPORTED: + ssbd_state = ARM64_SSBD_UNKNOWN; + return false; + + case SMCCC_RET_NOT_REQUIRED: + pr_info_once("%s mitigation not required\n", entry->desc); + ssbd_state = ARM64_SSBD_MITIGATED; + return false; + + case SMCCC_RET_SUCCESS: + required = true; + break; + + case 1: /* Mitigation not required on this CPU */ + required = false; + break; + + default: + WARN_ON(1); + return false; + } + + switch (ssbd_state) { + case ARM64_SSBD_FORCE_DISABLE: + pr_info_once("%s disabled from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(false); + required = false; + break; + + case ARM64_SSBD_KERNEL: + if (required) { + __this_cpu_write(arm64_ssbd_callback_required, 1); + arm64_set_ssbd_mitigation(true); + } + break; + + case ARM64_SSBD_FORCE_ENABLE: + pr_info_once("%s forced from command-line\n", entry->desc); + arm64_set_ssbd_mitigation(true); + required = true; + break; + + default: + WARN_ON(1); + break; + } + + return required; +} +#endif /* CONFIG_ARM64_SSBD */ + #define MIDR_RANGE(model, min, max) \ .def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \ @@ -425,6 +561,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), .enable = enable_smccc_arch_workaround_1, }, +#endif +#ifdef CONFIG_ARM64_SSBD + { + .desc = "Speculative Store Bypass Disable", + .def_scope = SCOPE_LOCAL_CPU, + .capability = ARM64_SSBD, + .matches = has_ssbd_mitigation, + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index aacc6a18ac379c3c7d601b4012ef6758c3e3ab51..5c0a83096aaa7a22b9b8d66633160ebb92d47194 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -877,7 +877,7 @@ static int __init parse_kpti(char *str) __kpti_forced = enabled ? 1 : -1; return 0; } -__setup("kpti=", parse_kpti); +early_param("kpti", parse_kpti); #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #ifdef CONFIG_ARM64_HW_AFDBM diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 93958d1341bbd7f2f54f970acf0a3465a3d8d7f0..3a61a8a3de392c156cd8129bd74fe1930e703759 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -18,6 +18,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -137,6 +138,24 @@ alternative_else_nop_endif add \dst, \dst, #(\sym - .entry.tramp.text) .endm + // This macro corrupts x0-x3. It is the caller's duty + // to save/restore them if required. + .macro apply_ssbd, state, targ, tmp1, tmp2 +#ifdef CONFIG_ARM64_SSBD + ldr \tmp1, =ssbd_state + ldr \tmp1, [\tmp1] + cmp \tmp1, #ARM64_SSBD_KERNEL + b.ne \targ + ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 + cbz \tmp2, \targ + ldr \tmp2, [tsk, #TSK_TI_FLAGS] + tbnz \tmp2, #TIF_SSBD, \targ + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 + mov w1, #\state + smc #0 +#endif + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@ -163,6 +182,14 @@ alternative_else_nop_endif ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. + apply_ssbd 1, 1f, x22, x23 + +#ifdef CONFIG_ARM64_SSBD + ldp x0, x1, [sp, #16 * 0] + ldp x2, x3, [sp, #16 * 1] +#endif +1: + mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE @@ -301,6 +328,8 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: + apply_ssbd 0, 5f, x0, x1 +5: .endif msr elr_el1, x21 // set up the return data diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 095d3c170f5d2e44eb1f849009ef3b658bb2c667..a028cc95afe17383485877553657d2f21b230043 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -313,6 +313,17 @@ int swsusp_arch_suspend(void) sleep_cpu = -EINVAL; __cpu_suspend_exit(); + + /* + * Just in case the boot kernel did turn the SSBD + * mitigation off behind our back, let's set the state + * to what we expect it to be. + */ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_ENABLE: + case ARM64_SSBD_KERNEL: + arm64_set_ssbd_mitigation(true); + } } local_dbg_restore(flags); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9cbb6123208f8dd4164fa90316a12bbf05489c13..edaf346d13d5fe31f4ec1a10d5a9647e46d92274 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -247,15 +248,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) - bp = tsk->thread.debug.hbp_break[idx]; + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + bp = tsk->thread.debug.hbp_break[idx]; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) - bp = tsk->thread.debug.hbp_watch[idx]; + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + bp = tsk->thread.debug.hbp_watch[idx]; break; } +out: return bp; } @@ -1194,9 +1200,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); /* Watchpoint */ if (num < 0) { ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); @@ -1207,7 +1211,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, } else { ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); } - set_fs(old_fs); if (!ret) ret = put_user(kdata, data); @@ -1220,7 +1223,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata = 0; - mm_segment_t old_fs = get_fs(); if (num == 0) return 0; @@ -1229,12 +1231,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, if (ret) return ret; - set_fs(KERNEL_DS); if (num < 0) ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); else ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); - set_fs(old_fs); return ret; } diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 0bdc96c61bc0f2b14f627142a2778409c9c6a7d0..43442b3a463f59417ab00a3e993aa02d22b1cad9 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -676,11 +676,12 @@ static void do_signal(struct pt_regs *regs) unsigned long continue_addr = 0, restart_addr = 0; int retval = 0; struct ksignal ksig; + bool syscall = in_syscall(regs); /* * If we were from a system call, check for system call restarting... */ - if (in_syscall(regs)) { + if (syscall) { continue_addr = regs->pc; restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); retval = regs->regs[0]; @@ -732,7 +733,7 @@ static void do_signal(struct pt_regs *regs) * Handle restarting a different system call. As above, if a debugger * has chosen to restart at a different PC, ignore the restart. */ - if (in_syscall(regs) && regs->pc == restart_addr) { + if (syscall && regs->pc == restart_addr) { if (retval == -ERESTART_RESTARTBLOCK) setup_restart_syscall(regs); user_rewind_single_step(current); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a6c8d9455fa076f825a1309f56478e11fdc1f2da..043f562dde165e2436686270c971a27d57f1d9de 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -355,7 +355,7 @@ void __cpu_die(unsigned int cpu) pr_crit("CPU%u: cpu didn't die\n", cpu); return; } - pr_notice("CPU%u: shutdown\n", cpu); + pr_info("CPU%u: shutdown\n", cpu); /* * Now that the dying CPU is beyond the point of no return w.r.t. diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c new file mode 100644 index 0000000000000000000000000000000000000000..3432e5ef9f41882c06462b7f3ec4ff91f02fd931 --- /dev/null +++ b/arch/arm64/kernel/ssbd.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 ARM Ltd, All Rights Reserved. + */ + +#include +#include +#include + +#include + +/* + * prctl interface for SSBD + * FIXME: Drop the below ifdefery once merged in 4.18. + */ +#ifdef PR_SPEC_STORE_BYPASS +static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl) +{ + int state = arm64_get_ssbd_state(); + + /* Unsupported */ + if (state == ARM64_SSBD_UNKNOWN) + return -EINVAL; + + /* Treat the unaffected/mitigated state separately */ + if (state == ARM64_SSBD_MITIGATED) { + switch (ctrl) { + case PR_SPEC_ENABLE: + return -EPERM; + case PR_SPEC_DISABLE: + case PR_SPEC_FORCE_DISABLE: + return 0; + } + } + + /* + * Things are a bit backward here: the arm64 internal API + * *enables the mitigation* when the userspace API *disables + * speculation*. So much fun. + */ + switch (ctrl) { + case PR_SPEC_ENABLE: + /* If speculation is force disabled, enable is not allowed */ + if (state == ARM64_SSBD_FORCE_ENABLE || + task_spec_ssb_force_disable(task)) + return -EPERM; + task_clear_spec_ssb_disable(task); + clear_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + case PR_SPEC_FORCE_DISABLE: + if (state == ARM64_SSBD_FORCE_DISABLE) + return -EPERM; + task_set_spec_ssb_disable(task); + task_set_spec_ssb_force_disable(task); + set_tsk_thread_flag(task, TIF_SSBD); + break; + default: + return -ERANGE; + } + + return 0; +} + +int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, + unsigned long ctrl) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_set(task, ctrl); + default: + return -ENODEV; + } +} + +static int ssbd_prctl_get(struct task_struct *task) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_UNKNOWN: + return -EINVAL; + case ARM64_SSBD_FORCE_ENABLE: + return PR_SPEC_DISABLE; + case ARM64_SSBD_KERNEL: + if (task_spec_ssb_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; + case ARM64_SSBD_FORCE_DISABLE: + return PR_SPEC_ENABLE; + default: + return PR_SPEC_NOT_AFFECTED; + } +} + +int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +{ + switch (which) { + case PR_SPEC_STORE_BYPASS: + return ssbd_prctl_get(task); + default: + return -ENODEV; + } +} +#endif /* PR_SPEC_STORE_BYPASS */ diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 77cd655e6eb72bb72f284172f2cbb0685309fd81..7a655e60cf4b3f342dcedd7e8a425a08e89c2072 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -62,6 +62,14 @@ void notrace __cpu_suspend_exit(void) */ if (hw_breakpoint_restore) hw_breakpoint_restore(cpu); + + /* + * On resume, firmware implementing dynamic mitigation will + * have turned the mitigation on. If the user has forcefully + * disabled it, make sure their wishes are obeyed. + */ + if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) + arm64_set_ssbd_mitigation(false); } /* diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index cb3e4393b412ed979387a745098e4de04d0a172a..8bdfb7a00d95d2a9794210016a83ccd611e18b81 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -68,6 +68,12 @@ jiffies = jiffies_64; #define TRAMP_TEXT #endif +#define RTIC_BSS \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__bss_rtic_start) = .; \ + KEEP(*(.bss.rtic)) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__bss_rtic_end) = .; /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -237,6 +243,10 @@ SECTIONS STABS_DEBUG HEAD_SYMBOLS + + .bss : { /* bss segment */ + RTIC_BSS + } } /* diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 9c55acd236e6ba0ab871d7b4484e5f00d061bbc1..195233513c5bb0cd6ac0ab72830b50207ead5e04 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -1979,7 +1979,7 @@ bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping) { unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long); int vmid = VMID_HLOS; - bool min_iova_align = 0; + int min_iova_align = 0; iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a3e6e81d6d63f39402c832ad3c7d0169b1d2ffff..9e493cab40a963e3de7ed913a23fc95810a0babe 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -358,14 +358,12 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -static int __do_page_fault(struct mm_struct *mm, unsigned long addr, +static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { - struct vm_area_struct *vma; int fault; - vma = find_vma(mm, addr); fault = VM_FAULT_BADMAP; if (unlikely(!vma)) goto out; @@ -408,6 +406,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, int fault, sig, code, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + struct vm_area_struct *vma = NULL; if (notify_page_fault(regs, esr)) return 0; @@ -446,6 +445,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + /* + * let's try a speculative page fault without grabbing the + * mmap_sem. + */ + fault = handle_speculative_fault(mm, addr, mm_flags, &vma); + if (fault != VM_FAULT_RETRY) + goto done; + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -468,7 +475,10 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, #endif } - fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); + if (!vma || !can_reuse_spf_vma(vma, addr)) + vma = find_vma(mm, addr); + + fault = __do_page_fault(vma, addr, mm_flags, vm_flags, tsk); major |= fault & VM_FAULT_MAJOR; if (fault & VM_FAULT_RETRY) { @@ -491,11 +501,20 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags |= FAULT_FLAG_TRIED; + + /* + * Do not try to reuse this vma and fetch it + * again since we will release the mmap_sem. + */ + vma = NULL; + goto retry; } } up_read(&mm->mmap_sem); +done: + /* * Handle the "normal" (no error) case first. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 31f8d30e2a5fbb146914fc0ec6e5ec9295f62dd5..22f0c81e5504cec16d97f49c715e921d287463be 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -242,8 +242,9 @@ ENDPROC(idmap_cpu_replace_ttbr1) .macro __idmap_kpti_put_pgtable_ent_ng, type orr \type, \type, #PTE_NG // Same bit for blocks and pages - str \type, [cur_\()\type\()p] // Update the entry and ensure it - dc civac, cur_\()\type\()p // is visible to all CPUs. + str \type, [cur_\()\type\()p] // Update the entry and ensure + dmb sy // that it is visible to all + dc civac, cur_\()\type\()p // CPUs. .endm /* diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 66f5e9a61efca05782cd45d4fec819a4993cfb15..7288e31d37139d0f5c6beb637b5f24fcf7de3f7b 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, memcpy((void *) dst, src, count); } +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset((void __force *)addr, value, size); +} + #define PCI_IO_ADDR (volatile void __iomem *) /* diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index 617506d1a5596679b19b27af252c3199e61ab1f1..7cd0a2259269659b2b90222c074d06d18bc5b907 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) memcpy(dst, src, len); return csum_partial(dst, len, sum); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 22123f7e8f750c4c16fb456952810af9e777e415..2004b3f72d804a3fa61667d41405fd374b9bf3da 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -1017,7 +1017,7 @@ int __init mac_platform_init(void) struct resource swim_rsrc = { .flags = IORESOURCE_MEM, .start = (resource_size_t)swim_base, - .end = (resource_size_t)swim_base + 0x2000, + .end = (resource_size_t)swim_base + 0x1FFF, }; platform_device_register_simple("swim", -1, &swim_rsrc, 1); diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index c2a38321c96d6dd94e96554687db7d2b362fea05..3b420f6d88222f7551c0a462105c887757454626 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c @@ -89,7 +89,8 @@ static inline void free_io_area(void *addr) for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { if (tmp->addr == addr) { *p = tmp->next; - __iounmap(tmp->addr, tmp->size); + /* remove gap added in get_io_area() */ + __iounmap(tmp->addr, tmp->size - IO_SIZE); kfree(tmp); return; } diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 6054d49e608eec038e1bbd49599bc783270aa09a..8c9cbf13d32a0a471bc6f2653bbb3c459b1b2c2c 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void) */ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) cpu_wait = NULL; + + /* + * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail" + * Enable ExternalSync for sync instruction to take effect + */ + set_c0_config7(MIPS_CONF7_ES); break; #endif } diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 2cd49b60e030424b624a8fb26f5ace63997b1ed6..f7aad80c69ab24c091ff8dabcaada310adcf95c0 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -51,6 +51,8 @@ ranges = <0x02000000 0 0x40000000 0x40000000 0 0x40000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci0_intc 1>, <0 0 0 2 &pci0_intc 2>, @@ -79,6 +81,8 @@ ranges = <0x02000000 0 0x20000000 0x20000000 0 0x20000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci1_intc 1>, <0 0 0 2 &pci1_intc 2>, @@ -107,6 +111,8 @@ ranges = <0x02000000 0 0x16000000 0x16000000 0 0x100000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci2_intc 1>, <0 0 0 2 &pci2_intc 2>, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37ecad9d195847c49ddffb15a6165fc4d..cea8ad864b3f6f416cb45687bfbcb5bd882933a7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) #define war_io_reorder_wmb() wmb() #else -#define war_io_reorder_wmb() do { } while (0) +#define war_io_reorder_wmb() barrier() #endif #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ BUG(); \ } \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } @@ -412,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ __val = *__addr; \ slow; \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__addr, __val); \ } diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index a6810923b3f0214dfa36eab4a28320a65bba7790..60c787d943b059ed33561bb127e60ce07ef0198c 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -680,6 +680,8 @@ #define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2) +/* ExternalSync */ +#define MIPS_CONF7_ES (_ULCAST_(1) << 8) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) @@ -2745,6 +2747,7 @@ __BUILD_SET_C0(status) __BUILD_SET_C0(cause) __BUILD_SET_C0(config) __BUILD_SET_C0(config5) +__BUILD_SET_C0(config7) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index f2ee7e1e3342e498be961f8995fc91b1de1f2744..cff52b283e03843519201ca8fe8754e0899c0c3c 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra) EXPORT_SYMBOL(_mcount) PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ - bne t1, t2, static_trace + beq t1, t2, fgraph_trace nop + MCOUNT_SAVE_REGS + + move a0, ra /* arg1: self return address */ + jalr t2 /* (1) call *ftrace_trace_function */ + move a1, AT /* arg2: parent's return address */ + + MCOUNT_RESTORE_REGS + +fgraph_trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER + PTR_LA t1, ftrace_stub PTR_L t3, ftrace_graph_return bne t1, t3, ftrace_graph_caller nop @@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount) bne t1, t3, ftrace_graph_caller nop #endif - b ftrace_stub -#ifdef CONFIG_32BIT - addiu sp, sp, 8 -#else - nop -#endif -static_trace: - MCOUNT_SAVE_REGS - - move a0, ra /* arg1: self return address */ - jalr t2 /* (1) call *ftrace_trace_function */ - move a1, AT /* arg2: parent's return address */ - - MCOUNT_RESTORE_REGS #ifdef CONFIG_32BIT addiu sp, sp, 8 #endif + .globl ftrace_stub ftrace_stub: RETURN_BACK diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 513826a43efd8a04a08bc2a4e38761b6add43743..6a71d3151a232b03d9c42aef73a9ae2bc7e3f60e 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data) * Checks all the children of @parent for a matching @id. If none * found, it allocates a new device and returns it. */ -static struct parisc_device * alloc_tree_node(struct device *parent, char id) +static struct parisc_device * __init alloc_tree_node( + struct device *parent, char id) { struct match_id_data d = { .id = id, @@ -825,8 +826,8 @@ void walk_lower_bus(struct parisc_device *dev) * devices which are not physically connected (such as extra serial & * keyboard ports). This problem is not yet solved. */ -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct device *parent) +static void __init walk_native_bus(unsigned long io_io_low, + unsigned long io_io_high, struct device *parent) { int i, devices_found = 0; unsigned long hpa = io_io_low; diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 30c28ab145409b5966f7237ec2b6ca07121adc10..ab4d5580bb02b298d90caf83d083496025a8ba9e 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -418,8 +418,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) } #ifdef CONFIG_PROC_FS -int __init -setup_profiling_timer(unsigned int multiplier) +int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index f7e684560186f9c3d5db133b8e66281c0f3c0e12..42a873226a04b6b2dc98900d8a1cca970db3fbb2 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -205,7 +205,7 @@ static int __init rtc_init(void) device_initcall(rtc_init); #endif -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { static struct pdc_tod tod_data; if (pdc_tod_read(&tod_data) == 0) { diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 6f07c687fc053ae0d1228a5fb3f214a3f3dd44d4..c194f4c8e66b79f36b19a6d9a473ee9f66487fa0 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -597,6 +597,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) * actually hit this code path. */ + isync slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbmte r7,r0 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 29d2b60501408eb0968089ea244ffa652a2da7f3..d0020bc1f2095c6d1433dc390d30aa327b99a9e6 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1155,6 +1155,9 @@ void fadump_cleanup(void) init_fadump_mem_struct(&fdm, be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_invalidate_dump(&fdm); + } else if (fw_dump.dump_registered) { + /* Un-register Firmware-assisted dump if it was registered. */ + fadump_unregister_dump(&fdm); } } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 53b9c1dfd7d978dddf909d3699e06713d83c025a..ceafad83ef50b013e8ceffe155ba017f80ea4635 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -175,8 +175,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) if (cpu_has_feature(CPU_FTR_DAWR)) { length_max = 512 ; /* 64 doublewords */ /* DAWR region can't cross 512 boundary */ - if ((bp->attr.bp_addr >> 10) != - ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) + if ((bp->attr.bp_addr >> 9) != + ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9)) return -EINVAL; } if (info->len > diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index f52ad5bb710960906b8ae61400688845e2811dd5..81750d9624abbd14164e2a740e43b4b7ea393b9f 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -2362,6 +2362,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, /* Create a new breakpoint request if one doesn't exist already */ hw_breakpoint_init(&attr); attr.bp_addr = hw_brk.address; + attr.bp_len = 8; arch_bp_generic_fields(hw_brk.type, &attr.bp_type); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index da6ba9ba73eda7065616daf9ca250a2678297fab..b73961b95c345855d291db431ef896ce9078dd33 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1131,7 +1131,7 @@ static int init_nest_pmu_ref(void) static void cleanup_all_core_imc_memory(void) { - int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); struct imc_mem_info *ptr = core_imc_pmu->mem_info; int size = core_imc_pmu->counter_mem_size; @@ -1239,7 +1239,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, if (!pmu_ptr->pmu.name) return -ENOMEM; - nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core); + nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), GFP_KERNEL); diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h index c9a50362343107b079875233faac064301435084..e9a6c35f8a297a351335a1d185217bfba12d12af 100644 --- a/arch/powerpc/platforms/powernv/copy-paste.h +++ b/arch/powerpc/platforms/powernv/copy-paste.h @@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset) : "b" (offset), "b" (paste_address) : "memory", "cr0"); - return (cr >> CR0_SHIFT) & CR0_MASK; + /* We mask with 0xE to ignore SO */ + return (cr >> CR0_SHIFT) & 0xE; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 443d5ca719958e5170374edbcaaf7d2cc52b8104..028d6d12ba32c000f31056621bf4b61936eaf00e 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -78,7 +78,7 @@ static int pnv_save_sprs_for_deep_states(void) uint64_t msr_val = MSR_IDLE; uint64_t psscr_val = pnv_deepest_stop_psscr_val; - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { uint64_t pir = get_hard_smp_processor_id(cpu); uint64_t hsprg0_val = (uint64_t)&paca[cpu]; @@ -741,7 +741,7 @@ static int __init pnv_init_idle_states(void) int cpu; pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n"); - for_each_possible_cpu(cpu) { + for_each_present_cpu(cpu) { int base_cpu = cpu_first_thread_sibling(cpu); int idx = cpu_thread_in_core(cpu); int i; diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index de470caf07848e28864cbb8ff0129e4ed7aaf021..fc222a0c2ac46b06095ed831827d40e84994e3ca 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = { .open = simple_open, }; -static void flush_memory_region(u64 base, u64 size) -{ - unsigned long line_size = ppc64_caches.l1d.size; - u64 end = base + size; - u64 addr; - - base = round_down(base, line_size); - end = round_up(end, line_size); - - for (addr = base; addr < end; addr += line_size) - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); -} - static int check_memblock_online(struct memory_block *mem, void *arg) { if (mem->state != MEM_ONLINE) @@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - /* RCU grace period? */ - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - lock_device_hotplug(); remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); unlock_device_hotplug(); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 57f9e55f4352d41ad61da71671d0d675d336b8b6..677b29ef4532b15c8dfa2aec01b2405ced8454f3 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3591,7 +3591,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) WARN_ON(pe->table_group.group); } - pnv_pci_ioda2_table_free_pages(tbl); iommu_tce_table_put(tbl); } diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 4205f6d42b6938cb5a65946932672c3daf19abd4..a5bd03642678906a21e1d5fc8f853b4264bfd2e8 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -43,7 +43,11 @@ void __ref cpu_probe(void) #endif #if defined(CONFIG_CPU_J2) +#if defined(CONFIG_SMP) unsigned cpu = hard_smp_processor_id(); +#else + unsigned cpu = 0; +#endif if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); if (cpu != 0) return; diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 9300f7630d2ad8f0bfa6e3360bd5af5c04e98181..efe075a04533dd0d22dc7d99fbcd60f05ecaeb64 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -66,6 +66,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 7 #define TIF_NOTIFY_RESUME 8 #define TIF_SECCOMP 9 /* secure computing */ +#define TIF_MM_RELEASED 10 /* task MM has been released */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6965ee8c4b8aa8521c5dd686037f600a7baeff83..228732654cfe14c2c9733b1b9830d954a5f4baab 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3331,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; - flip_smm_bit(&x86_pmu.attr_freeze_on_smi); + if (x86_pmu.version > 1) + flip_smm_bit(&x86_pmu.attr_freeze_on_smi); if (!cpuc->shared_regs) return; @@ -3494,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_dying = intel_pmu_cpu_dying, }; +static struct attribute *intel_pmu_attrs[]; + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -3524,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = { .format_attrs = intel_arch3_formats_attr, .events_sysfs_show = intel_event_sysfs_show, + .attrs = intel_pmu_attrs, + .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, @@ -3902,8 +3907,6 @@ __init int intel_pmu_init(void) x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); - - x86_pmu.attrs = intel_pmu_attrs; /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events, when not running in a hypervisor: diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 8243fdbb9b9ca0815be53b68416a75744e154675..2dae3f585c01587a8011f0e56b5e2dc77e7f2421 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3035,11 +3035,19 @@ static struct intel_uncore_type *bdx_msr_uncores[] = { NULL, }; +/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */ +static struct event_constraint bdx_uncore_pcu_constraints[] = { + EVENT_CONSTRAINT(0x80, 0xe, 0x80), + EVENT_CONSTRAINT_END +}; + void bdx_uncore_cpu_init(void) { if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; + + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } static struct intel_uncore_type bdx_uncore_ha = { diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 4db77731e130120fc36f3697f5ec7f8823ed6637..a04f0c242a289256b1b6516bb408b884bb46907d 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, { unsigned long mask; - asm ("cmp %1,%2; sbb %0,%0;" + asm volatile ("cmp %1,%2; sbb %0,%0;" :"=r" (mask) :"g"(size),"r" (index) :"cc"); diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index b3e32b010ab194ed613034234c403c4067502776..c2c01f84df75f1f9b35a3c898686a82973026d88 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +#define POP_SS_OPCODE 0x1f +#define MOV_SREG_OPCODE 0x8e + +/* + * Intel SDM Vol.3A 6.8.3 states; + * "Any single-step trap that would be delivered following the MOV to SS + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is + * suppressed." + * This function returns true if @insn is MOV SS or POP SS. On these + * instructions, single stepping is suppressed. + */ +static inline int insn_masking_exception(struct insn *insn) +{ + return insn->opcode.bytes[0] == POP_SS_OPCODE || + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); +} + #endif /* _ASM_X86_INSN_H */ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index ee23a43386a2908c140e96b8b85e8b82bc4fbd27..8493303d8b2e140f3c5e27f21d13376c6f68be71 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -107,11 +107,12 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address from which to read. * @val: [OUT] Value read from memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to read from memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * read_phys: Read bytes of standard (non-emulated/special) memory. @@ -129,10 +130,11 @@ struct x86_emulate_ops { * @addr: [IN ] Linear address to which to write. * @val: [OUT] Value write to memory, zero-extended to 'u_long'. * @bytes: [IN ] Number of bytes to write to memory. + * @system:[IN ] Whether the access is forced to be at CPL0. */ int (*write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, - struct x86_exception *fault); + struct x86_exception *fault, bool system); /* * fetch: Read bytes of standard (non-emulated/special) memory. * Used for instruction fetch. diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index c895f38a7a5eb42b8d51d13f9dff50d520ffc376..0b2330e191694c638b40cc2b5b7f797d31e18d6e 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -751,6 +751,9 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 18dd8f22e353ab339aa32a0553a16ab5451363aa..665d0f6cd62f76af9ec04b0196ee9c69568c7f0d 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -773,6 +773,8 @@ static __init void rdt_quirks(void) case INTEL_FAM6_SKYLAKE_X: if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); + else + set_rdt_options("!l3cat"); } } diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 231ad23b24a98ee59b0b232f038592b405c9e66b..8fec687b3e44e1859196da16abad444d909f10a2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -48,7 +48,7 @@ static struct dentry *dfs_inj; static u8 n_banks; -#define MAX_FLAG_OPT_SIZE 3 +#define MAX_FLAG_OPT_SIZE 4 #define NBCFG 0x44 enum injection_type { diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 4b8187639c2df38fd961e40ac0f4126787cfe096..c51353569492544c317c318b66385034889aa9b6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -143,6 +143,11 @@ static struct severity { SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), USER ), + MCESEV( + PANIC, "Data load in unrecoverable area of kernel", + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), + KERNEL + ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 28d27de08545943c9cbb1f91ec052b964b3e45dd..58f887f5e03636de85b7785c95bf3d7d658ce573 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -760,23 +760,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll); static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { - int i, ret = 0; char *tmp; + int i; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(msr_ops.status(i)); - if (m->status & MCI_STATUS_VAL) { - __set_bit(i, validp); - if (quirk_no_way_out) - quirk_no_way_out(i, m, regs); - } + if (!(m->status & MCI_STATUS_VAL)) + continue; + + __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + mce_read_aux(m, i); *msg = tmp; - ret = 1; + return 1; } } - return ret; + return 0; } /* @@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* + * Local machine check may already know that we have to panic. + * Broadcast machine check begins rendezvous in mce_start() * Go through all banks in exclusion of the other CPUs. This way we * don't report duplicated events on shared banks because the first one - * to see it will clear it. If this is a Local MCE, then no need to - * perform rendezvous. + * to see it will clear it. */ - if (!lmce) + if (lmce) { + if (no_way_out) + mce_panic("Fatal local machine check", &m, msg); + } else { order = mce_start(&no_way_out); + } for (i = 0; i < cfg->banks; i++) { __clear_bit(i, toclear); @@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code) no_way_out = worst >= MCE_PANIC_SEVERITY; } else { /* - * Local MCE skipped calling mce_reign() - * If we found a fatal error, we need to panic here. + * If there was a fatal machine check we should have + * already called mce_panic earlier in this function. + * Since we re-read the banks, we might have found + * something new. Check again to see if we found a + * fatal error. We call "mce_severity()" again to + * make sure we have the right "msg". */ - if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) - mce_panic("Machine check from unknown source", - NULL, NULL); + if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) { + mce_severity(&m, cfg->tolerant, &msg, true); + mce_panic("Local fatal machine check!", &m, msg); + } } /* diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index fb095ba0c02fb0c981318d5687822ea24552e959..f24cd9f1799a062a8df2c3c25bb164ba28537f89 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); - efi_map_sz = ALIGN(efi_map_sz, 16); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - kbuf.bufsz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data); @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; - efi_setup_data_offset = efi_map_offset + efi_map_sz; + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index ce06ec9c2323fad4f477b83e91b84c51c0a58c99..f1030c522e06c868146f7e13f13a6b9c54cbaf67 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -369,6 +369,10 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(insn)) + return 0; + #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(insn)) { diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 697a4ce0430827c89be2cbd86caedfac97e884f7..736348ead4218a0007b715efbc1d56bd1bb73e65 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev) /* Skylake */ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev) { - u32 capid0; + u32 capid0, capid5; pci_read_config_dword(pdev, 0x84, &capid0); + pci_read_config_dword(pdev, 0x98, &capid5); - if ((capid0 & 0xc0) == 0xc0) + /* + * CAPID0{7:6} indicate whether this is an advanced RAS SKU + * CAPID5{8:5} indicate that various NVDIMM usage modes are + * enabled, so memory machine check recovery is also enabled. + */ + if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0)) static_branch_inc(&mcsafe_key); + } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ef4efb931efa5b4d29ce489baf879af7ac9fc045..ed8d78fd4f8cf381196890e5257de5f90d4ff21c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -828,16 +828,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) - return; cond_local_irq_enable(regs); if (!user_mode(regs)) { - if (!fixup_exception(regs, trapnr)) { - task->thread.error_code = error_code; - task->thread.trap_nr = trapnr; + if (fixup_exception(regs, trapnr)) + return; + + task->thread.error_code = error_code; + task->thread.trap_nr = trapnr; + + if (notify_die(DIE_TRAP, str, regs, error_code, + trapnr, SIGFPE) != NOTIFY_STOP) die(str, regs, error_code); - } return; } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 495c776de4b470f8eb53236a0ddeb2ca8f043b6b..e1ea13ae53b903ec3417f11f2a67351b26521381 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool if (is_prefix_bad(insn)) return -ENOTSUPP; + /* We should not singlestep on the exception masking instructions */ + if (insn_masking_exception(insn)) + return -ENOTSUPP; + if (x86_64) good_insns = good_insns_64; else diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index fab073b195288dbcd266fbdd86b027757ad5093b..5f758568fc448afc457cc533db6683b7c073fd4f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -811,6 +811,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) return assign_eip_near(ctxt, ctxt->_eip + rel); } +static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, + void *data, unsigned size) +{ + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); +} + +static int linear_write_system(struct x86_emulate_ctxt *ctxt, + ulong linear, void *data, + unsigned int size) +{ + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); +} + static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, @@ -822,7 +835,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); } static int segmented_write_std(struct x86_emulate_ctxt *ctxt, @@ -836,7 +849,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt, rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); + return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); } /* @@ -1509,8 +1522,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; - return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_read_system(ctxt, addr, desc, sizeof *desc); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, @@ -1573,8 +1585,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), - &ctxt->exception); + return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); } /* allowed just for 8 bytes segments */ @@ -1588,8 +1599,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, if (rc != X86EMUL_CONTINUE) return rc; - return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, - &ctxt->exception); + return linear_write_system(ctxt, addr, desc, sizeof *desc); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, @@ -1750,8 +1760,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { - ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, - sizeof(base3), &ctxt->exception); + ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); if (ret != X86EMUL_CONTINUE) return ret; if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | @@ -2064,11 +2073,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; - rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); + rc = linear_read_system(ctxt, cs_addr, &cs, 2); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); + rc = linear_read_system(ctxt, eip_addr, &eip, 2); if (rc != X86EMUL_CONTINUE) return rc; @@ -2912,12 +2921,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif - r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); + r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; - r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); + r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) @@ -3046,35 +3055,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); - ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -3190,38 +3194,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { - const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); - ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ - ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, - ldt_sel_offset - eip_offset, &ctxt->exception); + ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, + ldt_sel_offset - eip_offset); if (ret != X86EMUL_CONTINUE) return ret; - ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, - &ctxt->exception); + ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; - ret = ops->write_std(ctxt, new_tss_base, - &tss_seg.prev_task_link, - sizeof tss_seg.prev_task_link, - &ctxt->exception); + ret = linear_write_system(ctxt, new_tss_base, + &tss_seg.prev_task_link, + sizeof tss_seg.prev_task_link); if (ret != X86EMUL_CONTINUE) return ret; } @@ -4152,7 +4152,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt) maxphyaddr = eax & 0xff; else maxphyaddr = 36; - rsvd = rsvd_bits(maxphyaddr, 62); + rsvd = rsvd_bits(maxphyaddr, 63); + if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) + rsvd &= ~CR3_PCID_INVD; } if (new_val & rsvd) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index dc97f2544b6f8e840156725be640477cff3b6426..5d13abecb384514029d6991dc60efdc983a00d27 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) struct kvm_run *run = vcpu->run; kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); - return 1; + return kvm_skip_emulated_instruction(vcpu); } int kvm_hv_hypercall(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 029aa13188749561c9f630c8781f90fbe7491a51..cfa155078ebb70b006788c2692d8a3ee9f6e4422 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4756,9 +4756,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, } if (!ret && svm) { - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, - host_irq, e->gsi, - vcpu_info.vector, + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, + e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4c88572d2b81ad7726a8e321a828b4bcb2bfb273..90747865205d2eba664bfe16ebe678893c99f1e4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7317,8 +7317,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, - sizeof(*vmpointer), &e)) { + if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7399,6 +7398,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } + /* CPL=0 must be checked manually. */ + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); @@ -7458,6 +7463,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu) */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { + if (vmx_get_cpl(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 0; + } + if (!to_vmx(vcpu)->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; @@ -7790,9 +7800,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; - /* _system ok, as hardware has verified cpl=0 */ - kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); + /* _system ok, nested_vmx_check_permission has verified cpl=0 */ + kvm_write_guest_virt_system(vcpu, gva, &field_value, + (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); @@ -7828,8 +7838,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, - &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &field_value, + (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7933,10 +7943,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; - /* ok to use *_system, as hardware has verified cpl=0 */ - if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, - (void *)&to_vmx(vcpu)->nested.current_vmptr, - sizeof(u64), &e)) { + /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ + if (kvm_write_guest_virt_system(vcpu, vmcs_gva, + (void *)&to_vmx(vcpu)->nested.current_vmptr, + sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -7983,8 +7993,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -8048,8 +8057,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, - sizeof(operand), &e)) { + if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } @@ -10310,6 +10318,16 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, return true; } +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !page_address_valid(vcpu, vmcs12->apic_access_addr)) + return -EINVAL; + else + return 0; +} + static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -10953,6 +10971,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -12163,7 +12184,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index adac01d0181a28e2b63f11210164c42882015c7d..2f3fe25639b345fe6de659369702b8aa9aa9b135 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -836,7 +836,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) } if (is_long_mode(vcpu) && - (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62))) + (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) return 1; else if (is_pae(vcpu) && is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) @@ -4492,11 +4492,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; } -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, @@ -4504,12 +4503,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); -static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, unsigned int bytes, - struct x86_exception *exception) +static int emulator_read_std(struct x86_emulate_ctxt *ctxt, + gva_t addr, void *val, unsigned int bytes, + struct x86_exception *exception, bool system) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); + u32 access = 0; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, @@ -4521,18 +4525,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE; } -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, - gva_t addr, void *val, - unsigned int bytes, - struct x86_exception *exception) +static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, + struct kvm_vcpu *vcpu, u32 access, + struct x86_exception *exception) { - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, - PFERR_WRITE_MASK, + access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); @@ -4553,6 +4555,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, out: return r; } + +static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception, + bool system) +{ + struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); + u32 access = PFERR_WRITE_MASK; + + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) + access |= PFERR_USER_MASK; + + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + access, exception); +} + +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception) +{ + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + PFERR_WRITE_MASK, exception); +} EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, @@ -5287,8 +5310,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, - .read_std = kvm_read_guest_virt_system, - .write_std = kvm_write_guest_virt_system, + .read_std = emulator_read_std, + .write_std = emulator_write_std, .read_phys = kvm_read_guest_phys_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, @@ -6274,12 +6297,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; - int op_64_bit, r; - - r = kvm_skip_emulated_instruction(vcpu); + int op_64_bit; - if (kvm_hv_hypercall_enabled(vcpu->kvm)) - return kvm_hv_hypercall(vcpu); + if (kvm_hv_hypercall_enabled(vcpu->kvm)) { + if (!kvm_hv_hypercall(vcpu)) + return 0; + goto out; + } nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); @@ -6300,7 +6324,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; - goto out; + goto out_error; } switch (nr) { @@ -6320,12 +6344,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = -KVM_ENOSYS; break; } -out: +out_error: if (!op_64_bit) ret = (u32)ret; kvm_register_write(vcpu, VCPU_REGS_RAX, ret); + +out: ++vcpu->stat.hypercalls; - return r; + return kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 6d112d8f799cdd0e9697fea525911a349bc9394a..d4b59cf0dc519d872b4e4a463739e13f24329902 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -213,11 +213,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); u64 get_kvmclock_ns(struct kvm *kvm); -int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, +int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); -int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 82f5252c723a4a544593067981d90191c7b22c1d..071cbbbb60d96974a9f790fd6b69e3ff9ffe1162 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -706,7 +706,9 @@ void __init init_mem_mapping(void) */ int devmem_is_allowed(unsigned long pagenr) { - if (page_is_ram(pagenr)) { + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) + != REGION_DISJOINT) { /* * For disallowed memory regions in the low 1MB range, * request that the page be shown as all zeros. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bb77606d04e04710d3dc0e5cefb457e567d52278..a9deb2b0397de7e83fe7c44e5553b7ababbe2bb3 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1159,6 +1159,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { +out_image: image = NULL; if (header) bpf_jit_binary_free(header); @@ -1169,8 +1170,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); - prog = orig_prog; - goto out_addrs; + goto out_image; } break; } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index f7af598c4f553508e4a8dbcee431aa1fb776f9da..ae369c2bbc3ebbf508e7c64bd32a8a9e77950848 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) + if (!pgd_present(*pgd)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); - if (!(p4d_val(*p4d) & _PAGE_PRESENT)) + if (!p4d_present(*p4d)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 754d5391d9fa766b72c2a9c1b5ba4b6098f46d7b..854508b00bbb9ea9f9ca187261cb417c3c01adde 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -64,6 +64,19 @@ static void __init xen_hvm_init_mem_mapping(void) { early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); + + /* + * The virtual address of the shared_info page has changed, so + * the vcpu_info pointer for VCPU 0 is now stale. + * + * The prepare_boot_cpu callback will re-initialize it via + * xen_vcpu_setup, but we can't rely on that to be called for + * old Xen versions (xen_have_vector_callback == 0). + * + * It is, in any case, bad to have a stale vcpu_info pointer + * so reset it now. + */ + xen_vcpu_info_reset(0); } static void __init init_hvm_pv_info(void) diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index c0c756c76afebf08d282fb35c2fc49b641120bb4..db6d90e451de908d6be6165e6e2e47c5379619e0 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -70,6 +71,8 @@ static void cpu_bringup(void) cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); + speculative_store_bypass_ht_init(); + xen_setup_cpu_clockevents(); notify_cpu_starting(cpu); @@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) } set_cpu_sibling_map(0); + speculative_store_bypass_ht_init(); + xen_pmu_init(0); if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index bae697a06a984536bc51ce21cb3e402d5cfbd065..2986bc88a18e77fb1158037be662002ad94e9ec5 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -336,7 +336,7 @@ do_unaligned_user (struct pt_regs *regs) info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void *) regs->excvaddr; - force_sig_info(SIGSEGV, &info, current); + force_sig_info(SIGBUS, &info, current); } #endif diff --git a/block/bio.c b/block/bio.c index 90f19d7df66cf73ce80e8eade310105bfa17ba3d..61975a2bd9e0dd2224138b760e528a73855006e3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -605,7 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; - + bio->bi_dio_inode = bio_src->bi_dio_inode; bio_clone_blkcg_association(bio, bio_src); } EXPORT_SYMBOL(__bio_clone_fast); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d3f56baee9366220d13db3cc8668ae1a7a1c1994..3dc7c0b4adcbb59e8f1afabba91c72b608d680a0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1149,18 +1149,16 @@ int blkcg_init_queue(struct request_queue *q) rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); + if (IS_ERR(blkg)) + goto err_unlock; + q->root_blkg = blkg; + q->root_rl.blkg = blkg; spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); - if (IS_ERR(blkg)) - return PTR_ERR(blkg); - - q->root_blkg = blkg; - q->root_rl.blkg = blkg; - ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); @@ -1168,6 +1166,13 @@ int blkcg_init_queue(struct request_queue *q) spin_unlock_irq(q->queue_lock); } return ret; + +err_unlock: + spin_unlock_irq(q->queue_lock); + rcu_read_unlock(); + if (preloaded) + radix_tree_preload_end(); + return PTR_ERR(blkg); } /** @@ -1374,17 +1379,12 @@ void blkcg_deactivate_policy(struct request_queue *q, __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { - /* grab blkcg lock too while removing @pd from @blkg */ - spin_lock(&blkg->blkcg->lock); - if (blkg->pd[pol->plid]) { if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } - - spin_unlock(&blkg->blkcg->lock); } spin_unlock_irq(q->queue_lock); diff --git a/block/blk-core.c b/block/blk-core.c index 322941cc379fcc0082e3880481a6679a93973e7d..c13b98d883a02cb721cd1bd7765ce9cf98a80aae 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3152,6 +3152,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) dst->cpu = src->cpu; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); + if (src->rq_flags & RQF_SPECIAL_PAYLOAD) { + dst->rq_flags |= RQF_SPECIAL_PAYLOAD; + dst->special_vec = src->special_vec; + } dst->nr_phys_segments = src->nr_phys_segments; dst->ioprio = src->ioprio; dst->extra_len = src->extra_len; diff --git a/block/blk-merge.c b/block/blk-merge.c index 8d60a5bbcef930d514cc3a75a273bc3506922904..4f7e70419ba24ee616f41e5bfa4167887fb7150c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -7,7 +7,7 @@ #include #include #include - +#include #include #include "blk.h" @@ -660,6 +660,11 @@ static void blk_account_io_merge(struct request *req) } } +static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt) +{ + return (!pfk_allow_merge_bio(bio, nxt)); +} + /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. @@ -698,6 +703,8 @@ static struct request *attempt_merge(struct request_queue *q, if (req->write_hint != next->write_hint) return NULL; + if (crypto_not_mergeable(req->bio, next->bio)) + return 0; /* * If we are allowed to merge, then append bio list * from next to rq and release next. merge_requests_fn @@ -829,6 +836,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) if (rq->write_hint != bio->bi_write_hint) return false; + if (crypto_not_mergeable(rq->bio, bio)) + return false; + return true; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 007f966113640bc1c1f3295e856eaf3b75ae0829..49979c095f31c4885c020d469a068738fa30fa15 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); } +static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, + bool reserved) +{ + struct mq_inflight *mi = priv; + + if (rq->part == mi->part) + mi->inflight[rq_data_dir(rq)]++; +} + +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + struct mq_inflight mi = { .part = part, .inflight = inflight, }; + + inflight[0] = inflight[1] = 0; + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); +} + void blk_freeze_queue_start(struct request_queue *q) { int freeze_depth; @@ -2233,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) mutex_lock(&set->tag_list_lock); list_del_rcu(&q->tag_set_list); - INIT_LIST_HEAD(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_SHARED; @@ -2241,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_depth(set, false); } mutex_unlock(&set->tag_list_lock); - synchronize_rcu(); + INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, diff --git a/block/blk-mq.h b/block/blk-mq.h index 4933af9d61f736ed1b99630231b6988071b1e6d2..877237e090838648d29f13a7c50c5e1e05fa4bd9 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -136,6 +136,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) } void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); + unsigned int inflight[2]); +void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); #endif diff --git a/block/blk-zoned.c b/block/blk-zoned.c index ff57fb51b3380bb14d7c6e449f4dada1cf6ab904..77fce6f09f781fee9d92242dd2cbdfed74dec9e6 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -286,7 +286,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!rep.nr_zones) return -EINVAL; - zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL); + if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone)) + return -ERANGE; + + zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone), + GFP_KERNEL | __GFP_ZERO); if (!zones) return -ENOMEM; @@ -308,7 +312,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, } out: - kfree(zones); + kvfree(zones); return ret; } diff --git a/block/genhd.c b/block/genhd.c index dd305c65ffb05d5016f1c602e65849a2fbf8e418..449ef56bba708920973ef62e20464e8c00a880e5 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part, } } +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]) +{ + if (q->mq_ops) { + blk_mq_in_flight_rw(q, part, inflight); + return; + } + + inflight[0] = atomic_read(&part->in_flight[0]); + inflight[1] = atomic_read(&part->in_flight[1]); +} + struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); diff --git a/block/partition-generic.c b/block/partition-generic.c index 08dabcd8b6aefc6844bbb9d9e9c001e6ff71fb33..db57cced9b987371e6c8a3c72ff6721b9d540bda 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev, jiffies_to_msecs(part_stat_read(p, time_in_queue))); } -ssize_t part_inflight_show(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct hd_struct *p = dev_to_part(dev); + struct request_queue *q = part_to_disk(p)->queue; + unsigned int inflight[2]; - return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), - atomic_read(&p->in_flight[1])); + part_in_flight_rw(q, p, inflight); + return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } #ifdef CONFIG_FAIL_MAKE_REQUEST diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index ce2df8c9c583970be0177d2a50de85316fb1c2b2..7e6a43ffdcbedac9bea63c1c8662924e3b5f5881 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen, return -EINVAL; } + if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { + /* Discard the BIT STRING metadata */ + if (vlen < 1 || *(const u8 *)value != 0) + return -EBADMSG; + + value++; + vlen--; + } + ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; diff --git a/drivers/Makefile b/drivers/Makefile index 9fe69a2413304a64b1b89d6256e8b7884e6129c9..2d4eae87489e60b5712fe8ee375890fda139f44c 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -79,6 +79,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ +obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SCSI) += scsi/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ @@ -136,7 +137,6 @@ obj-$(CONFIG_NEW_LEDS) += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ -obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ ifndef CONFIG_ARCH_USES_GETTIMEOFFSET obj-y += clocksource/ diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index a2be3fd2c72b3d4b84cbd695ab871798fc2a626f..602ae58ee2d81a9754b9a3beda4c3084695c964d 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -229,11 +229,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, .setup = byt_pwm_setup, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, .setup = bsw_pwm_setup, }; diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index ebb626ffb5fa2d38c853ddcbbe7227aa09c7f7dd..4bde16fb97d8818f59e893adf9bb642fed5f9d5c 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c @@ -12,23 +12,64 @@ #define pr_fmt(fmt) "ACPI: watchdog: " fmt #include +#include #include #include #include "internal.h" +static const struct dmi_system_id acpi_watchdog_skip[] = { + { + /* + * On Lenovo Z50-70 there are two issues with the WDAT + * table. First some of the instructions use RTC SRAM + * to store persistent information. This does not work well + * with Linux RTC driver. Second, more important thing is + * that the instructions do not actually reset the system. + * + * On this particular system iTCO_wdt seems to work just + * fine so we prefer that over WDAT for now. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033. + */ + .ident = "Lenovo Z50-70", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20354"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"), + }, + }, + {} +}; + +static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void) +{ + const struct acpi_table_wdat *wdat = NULL; + acpi_status status; + + if (acpi_disabled) + return NULL; + + if (dmi_check_system(acpi_watchdog_skip)) + return NULL; + + status = acpi_get_table(ACPI_SIG_WDAT, 0, + (struct acpi_table_header **)&wdat); + if (ACPI_FAILURE(status)) { + /* It is fine if there is no WDAT */ + return NULL; + } + + return wdat; +} + /** * Returns true if this system should prefer ACPI based watchdog instead of * the native one (which are typically the same hardware). */ bool acpi_has_watchdog(void) { - struct acpi_table_header hdr; - - if (acpi_disabled) - return false; - - return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr)); + return !!acpi_watchdog_get_wdat(); } EXPORT_SYMBOL_GPL(acpi_has_watchdog); @@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void) struct platform_device *pdev; struct resource *resources; size_t nresources = 0; - acpi_status status; int i; - status = acpi_get_table(ACPI_SIG_WDAT, 0, - (struct acpi_table_header **)&wdat); - if (ACPI_FAILURE(status)) { + wdat = acpi_watchdog_get_wdat(); + if (!wdat) { /* It is fine if there is no WDAT */ return; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 8082871b409a6d631a80b5d2327e9fcb6a1509d4..2ef0ad6a33d6c692da5465a817ebfde87620c8c6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, + /* + * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using + * the Low Power S0 Idle firmware interface (see + * https://bugzilla.kernel.org/show_bug.cgi?id=199057). + */ + { + .callback = init_no_lps0, + .ident = "ThinkPad X1 Tablet(2016)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), + }, + }, {}, }; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 18391d0c0cd7ce51a82f766682208da6c9d2d001..75eb50041c99e6d543baafeee97d984ec78bd950 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -686,7 +686,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); @@ -712,7 +712,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -776,7 +776,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); for (i = 0; i < 2; i++) { u16 val; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8b61123d2c3c1cd55489feb389b68735077219d5..781b898e5785e3b9a5a1e153757ff7536d7a4eaf 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -361,6 +361,13 @@ struct ahci_host_priv { * be overridden anytime before the host is activated. */ void (*start_engine)(struct ata_port *ap); + /* + * Optional ahci_stop_engine override, if not set this gets set to the + * default ahci_stop_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + int (*stop_engine)(struct ata_port *ap); + irqreturn_t (*irq_handler)(int irq, void *dev_instance); /* only required for per-port MSI(-X) support */ diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index de7128d81e9ccbc168627a02bb2a39d3e4f11c5c..0045dacd814b44ec21f87e4acceb07e69056f214 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +/** + * ahci_mvebu_stop_engine + * + * @ap: Target ata port + * + * Errata Ref#226 - SATA Disk HOT swap issue when connected through + * Port Multiplier in FIS-based Switching mode. + * + * To avoid the issue, according to design, the bits[11:8, 0] of + * register PxFBS are cleared when Port Command and Status (0x18) bit[0] + * changes its value from 1 to 0, i.e. falling edge of Port + * Command and Status bit[0] sends PULSE that resets PxFBS + * bits[11:8; 0]. + * + * This function is used to override function of "ahci_stop_engine" + * from libahci.c by adding the mvebu work around(WA) to save PxFBS + * value before the PxCMD ST write of 0, then restore PxFBS value. + * + * Return: 0 on success; Error code otherwise. + */ +int ahci_mvebu_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp, port_fbs; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* save the port PxFBS register for later restore */ + port_fbs = readl(port_mmio + PORT_FBS); + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* + * bit #15 PxCMD signal doesn't clear PxFBS, + * restore the PxFBS register right after clearing the PxCMD ST, + * no need to wait for the PxCMD bit #15. + */ + writel(port_fbs, port_mmio + PORT_FBS); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) { @@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) if (rc) return rc; + hpriv->stop_engine = ahci_mvebu_stop_engine; + if (of_device_is_compatible(pdev->dev.of_node, "marvell,armada-380-ahci")) { dram = mv_mbus_dram_info(); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index b6b0bf76dfc7bb7fe90f45418aab974bf73b6f87..ab5ac103bfb88349c57d8b6947a3f38694e81ce2 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -94,7 +94,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c2b5941d9184db2637604864baf4597d398c4911..ad58da7c9affd8e4ec381d8bb0fd7f23d6fa0310 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) PORT_CMD_ISSUE, 0x0, 1, 100)) return -EBUSY; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); ahci_start_fis_rx(ap); /* @@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = xgene_ahci_do_hardreset(link, deadline, &online); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 3e286d86ab42acbc54c84488e74ffc37f13dff65..5ae268b8514e228b9b7830ad8346e029e7984f13 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) if (!hpriv->start_engine) hpriv->start_engine = ahci_start_engine; + if (!hpriv->stop_engine) + hpriv->stop_engine = ahci_stop_engine; + if (!hpriv->irq_handler) hpriv->irq_handler = ahci_single_level_irq_intr; } @@ -887,9 +890,10 @@ static void ahci_start_port(struct ata_port *ap) static int ahci_deinit_port(struct ata_port *ap, const char **emsg) { int rc; + struct ahci_host_priv *hpriv = ap->host->private_data; /* disable DMA */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) { *emsg = "failed to stop engine"; return rc; @@ -1299,7 +1303,7 @@ int ahci_kick_engine(struct ata_port *ap) int busy, rc; /* stop engine */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) goto out_restart; @@ -1538,7 +1542,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -2064,14 +2068,14 @@ void ahci_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) { /* restart engine */ - ahci_stop_engine(ap); + hpriv->stop_engine(ap); hpriv->start_engine(ap); } sata_pmp_error_handler(ap); if (!ata_dev_enabled(ap->link.device)) - ahci_stop_engine(ap); + hpriv->stop_engine(ap); } EXPORT_SYMBOL_GPL(ahci_error_handler); @@ -2118,7 +2122,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) return; /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2178,7 +2182,7 @@ static void ahci_enable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2211,7 +2215,7 @@ static void ahci_disable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 71008dbabe98ffef68b54047d9eafedc6b9e4774..cad2530a5b52b8940fd965a1382b22a9f1bd4a44 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4543,9 +4543,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM | ATA_HORKAGE_NOLPM, }, - /* Sandisk devices which are known to not handle LPM well */ - { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, - /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index ea20e0eb4d5ac37b613641702c9e2056455f0868..711dd91b5e2c457211a2b34044cc77123a391a0f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ -static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, - va_list args) +static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, + const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index de4ddd0e8550a66dac6856099180bc59dc622d05..b3ed8f9953a862ea3ae67ef065ca5469330a44e0 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -35,7 +35,7 @@ struct zpodd { static int eject_tray(struct ata_device *dev) { struct ata_taskfile tf; - static const char cdb[] = { GPCMD_START_STOP_UNIT, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT, 0, 0, 0, 0x02, /* LoEj */ 0, 0, 0, 0, 0, 0, 0, diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index aafb8cc03523212dee4d8d3fe66434af2406ae76..e67815b896fcc40772ac45fdf8e8be7253cecafa 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, int rc; int retry = 100; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 2c2ed9cf879626692706bdf86f447cb4d8c05c26..f9413755177babf1c5f6a5c5663483314cbea2d3 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -14,9 +14,6 @@ menuconfig AUXDISPLAY If you say N, all options in this submenu will be skipped and disabled. -config CHARLCD - tristate "Character LCD core support" if COMPILE_TEST - if AUXDISPLAY config HD44780 @@ -157,8 +154,6 @@ config HT16K33 Say yes here to add support for Holtek HT16K33, RAM mapping 16*8 LED controller driver with keyscan. -endif # AUXDISPLAY - config ARM_CHARLCD bool "ARM Ltd. Character LCD Driver" depends on PLAT_VERSATILE @@ -169,6 +164,8 @@ config ARM_CHARLCD line and the Linux version on the second line, but that's still useful. +endif # AUXDISPLAY + config PANEL tristate "Parallel port LCD/Keypad Panel support" depends on PARPORT @@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE printf()-formatted message is valid with newline and escape codes. endif # PANEL + +config CHARLCD + tristate "Character LCD core support" if COMPILE_TEST diff --git a/drivers/base/core.c b/drivers/base/core.c index 425a460f921121bc79207dff3769ffb7baec02df..9db30ee99917bfb245965149ef6656a14266e65c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -217,6 +217,13 @@ struct device_link *device_link_add(struct device *consumer, link->rpm_active = true; } pm_runtime_new_link(consumer); + /* + * If the link is being added by the consumer driver at probe + * time, balance the decrementation of the supplier's runtime PM + * usage counter after consumer probe in driver_probe_device(). + */ + if (consumer->links.status == DL_DEV_PROBING) + pm_runtime_get_noresume(supplier); } get_device(supplier); link->supplier = supplier; @@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer, switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Balance the decrementation of the supplier's - * runtime PM usage counter after consumer probe - * in driver_probe_device(). + * Some callers expect the link creation during + * consumer driver probe to resume the supplier + * even without DL_FLAG_RPM_ACTIVE. */ if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_get_sync(supplier); + pm_runtime_resume(supplier); link->status = DL_STATE_CONSUMER_PROBE; break; @@ -1466,7 +1473,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) dir = kzalloc(sizeof(*dir), GFP_KERNEL); if (!dir) - return NULL; + return ERR_PTR(-ENOMEM); dir->class = class; kobject_init(&dir->kobj, &class_dir_ktype); @@ -1476,7 +1483,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); if (retval < 0) { kobject_put(&dir->kobj); - return NULL; + return ERR_PTR(retval); } return &dir->kobj; } @@ -1578,8 +1585,8 @@ static int device_add_class_symlinks(struct device *dev) struct device_node *of_node = dev_of_node(dev); int error; - if (of_node) { - error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node"); + if (of_node && of_node_kobj(of_node)) { + error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); if (error) dev_warn(dev, "Error %d creating of_node link\n",error); /* An error here doesn't warrant bringing down the device */ @@ -1783,6 +1790,10 @@ int device_add(struct device *dev) parent = get_device(dev->parent); kobj = get_device_parent(dev, parent); + if (IS_ERR(kobj)) { + error = PTR_ERR(kobj); + goto parent_error; + } if (kobj) dev->kobj.parent = kobj; @@ -1881,6 +1892,7 @@ int device_add(struct device *dev) kobject_del(&dev->kobj); Error: cleanup_glue_dir(dev, glue_dir); +parent_error: put_device(parent); name_error: kfree(dev->p); @@ -2700,6 +2712,11 @@ int device_move(struct device *dev, struct device *new_parent, device_pm_lock(); new_parent = get_device(new_parent); new_parent_kobj = get_device_parent(dev, new_parent); + if (IS_ERR(new_parent_kobj)) { + error = PTR_ERR(new_parent_kobj); + put_device(new_parent); + goto out; + } pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), __func__, new_parent ? dev_name(new_parent) : ""); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index b56ca174f582829f4f3a6b47e87155e868a97fe5..ff58c8efa1efaf86e704688402a497eb10ae1c3a 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -209,6 +209,59 @@ static struct attribute_group cpu_isolated_attr_group = { #endif +static ssize_t show_sched_load_boost(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t rc; + unsigned int boost; + struct cpu *cpu = container_of(dev, struct cpu, dev); + int cpuid = cpu->dev.id; + + boost = per_cpu(sched_load_boost, cpuid); + rc = snprintf(buf, PAGE_SIZE-2, "%d\n", boost); + + return rc; +} + +static ssize_t __ref store_sched_load_boost(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + int boost; + struct cpu *cpu = container_of(dev, struct cpu, dev); + int cpuid = cpu->dev.id; + + err = kstrtoint(strstrip((char *)buf), 0, &boost); + if (err) + return err; + + /* + * -100 is low enough to cancel out CPU's load and make it near zro. + * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt} + * can take without overflow. + */ + if (boost < -100 || boost > 1000) + return -EINVAL; + + per_cpu(sched_load_boost, cpuid) = boost; + + return count; +} + +static DEVICE_ATTR(sched_load_boost, 0644, + show_sched_load_boost, + store_sched_load_boost); + +static struct attribute *sched_cpu_attrs[] = { + &dev_attr_sched_load_boost.attr, + NULL +}; + +static struct attribute_group sched_cpu_attr_group = { + .attrs = sched_cpu_attrs, +}; + static const struct attribute_group *common_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, @@ -216,6 +269,7 @@ static const struct attribute_group *common_cpu_attr_groups[] = { #ifdef CONFIG_HOTPLUG_CPU &cpu_isolated_attr_group, #endif + &sched_cpu_attr_group, NULL }; @@ -226,6 +280,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = { #ifdef CONFIG_HOTPLUG_CPU &cpu_isolated_attr_group, #endif + &sched_cpu_attr_group, NULL }; diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index b3b78079aa9f9e0a69c9f59540e68cb1c1bfcb73..c276ba1c0a19ed1e830586b0530afcf5ffafbf5b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2162,6 +2162,9 @@ int genpd_dev_pm_attach(struct device *dev) genpd_lock(pd); ret = genpd_power_on(pd, 0); genpd_unlock(pd); + + if (ret) + genpd_remove_device(pd, dev); out: return ret ? -EPROBE_DEFER : 0; } diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 0459b1204694567f69c6e37c22774d41f5eab502..d4862775b9f6a475cafa7449ed920365f27ad8a7 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -552,7 +552,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, } /* Scaling up? Scale voltage before frequency */ - if (freq > old_freq) { + if (freq >= old_freq) { ret = _set_opp_voltage(dev, reg, new_supply); if (ret) goto restore_voltage; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 86258b00a1d4d1960a67fb021babc668884a72a7..6fb64e73bc9678e079a77823b5e4a3e31dba8dca 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = { static void nbd_dev_remove(struct nbd_device *nbd) { struct gendisk *disk = nbd->disk; + struct request_queue *q; + if (disk) { + q = disk->queue; del_gendisk(disk); - blk_cleanup_queue(disk->queue); + blk_cleanup_queue(q); blk_mq_free_tag_set(&nbd->tag_set); disk->private_data = NULL; put_disk(disk); @@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd) static void nbd_size_update(struct nbd_device *nbd) { struct nbd_config *config = nbd->config; + struct block_device *bdev = bdget_disk(nbd->disk, 0); + blk_queue_logical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize); set_capacity(nbd->disk, config->bytesize >> 9); + if (bdev) { + if (bdev->bd_disk) + bd_set_size(bdev, config->bytesize); + else + bdev->bd_invalidated = 1; + bdput(bdev); + } kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); } @@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, struct nbd_config *config = nbd->config; config->blksize = blocksize; config->bytesize = blocksize * nr_blocks; + if (nbd->task_recv != NULL) + nbd_size_update(nbd); } static void nbd_complete_rq(struct request *req) @@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b if (ret) return ret; - bd_set_size(bdev, config->bytesize); if (max_part) bdev->bd_invalidated = 1; mutex_unlock(&nbd->config_lock); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index fe4fd8aee19ff1c64e2e2646f8ad9904d6890029..9057dad2a64c874492dbfca5096a574e42e42b71 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3841,7 +3841,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev) { dout("%s rbd_dev %p\n", __func__, rbd_dev); - cancel_delayed_work_sync(&rbd_dev->watch_dwork); cancel_work_sync(&rbd_dev->acquired_lock_work); cancel_work_sync(&rbd_dev->released_lock_work); cancel_delayed_work_sync(&rbd_dev->lock_dwork); @@ -3859,6 +3858,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev) rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED; mutex_unlock(&rbd_dev->watch_mutex); + cancel_delayed_work_sync(&rbd_dev->watch_dwork); ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index c9f0ac083a3e42e22d8666398b13496ed6d5b50c..6f4ebd5e54c8ef3577c960a91e86f5646dfd8779 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -936,6 +936,12 @@ static int qca_setup(struct hci_uart *hu) } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ ret = 0; + } else if (ret == -EAGAIN) { + /* + * Userspace firmware loader will return -EAGAIN in case no + * patch/nvm-config is found, so run with original fw/config. + */ + ret = 0; } /* Setup bdaddr */ diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c index f97833224b727f2c6cdf4cac545b79f5ee13c40c..178720a2d86a746a981a5d8cd5ce43b950843caf 100644 --- a/drivers/bus/mhi/controllers/mhi_qcom.c +++ b/drivers/bus/mhi/controllers/mhi_qcom.c @@ -24,18 +24,18 @@ #include #include "mhi_qcom.h" -static struct pci_device_id mhi_pcie_device_id[] = { - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, - {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, - {0}, +struct firmware_info { + unsigned int dev_id; + const char *fw_image; + const char *edl_image; }; -static struct pci_driver mhi_pcie_driver; +static const struct firmware_info firmware_table[] = { + {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"}, + {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"}, + /* default, set to debug.mbn */ + {.fw_image = "debug.mbn"}, +}; void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) { @@ -345,9 +345,10 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) struct mhi_controller *mhi_cntrl; struct mhi_dev *mhi_dev; struct device_node *of_node = pci_dev->dev.of_node; + const struct firmware_info *firmware_info; bool use_bb; u64 addr_win[2]; - int ret; + int ret, i; if (!of_node) return ERR_PTR(-ENODEV); @@ -417,6 +418,15 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) if (ret) goto error_register; + for (i = 0; i < ARRAY_SIZE(firmware_table); i++) { + firmware_info = firmware_table + i; + if (mhi_cntrl->dev_id == firmware_info->dev_id) + break; + } + + mhi_cntrl->fw_image = firmware_info->fw_image; + mhi_cntrl->edl_image = firmware_info->edl_image; + return mhi_cntrl; error_register: @@ -497,6 +507,17 @@ static const struct dev_pm_ops pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) }; +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + static struct pci_driver mhi_pcie_driver = { .name = "mhi", .id_table = mhi_pcie_device_id, diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c index 47276a3ed03e94f6d227c71dc12b250c3558e4bd..b05493fcdd60b7257de7d70aa3cf7cbc0ea1cdbd 100644 --- a/drivers/bus/mhi/core/mhi_boot.c +++ b/drivers/bus/mhi/core/mhi_boot.c @@ -54,7 +54,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) struct image_info *rddm_image = mhi_cntrl->rddm_image; const u32 delayus = 100; u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; - void __iomem *base = mhi_cntrl->bhi; + void __iomem *base = mhi_cntrl->bhie; MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), @@ -137,7 +137,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) /* download ramdump image from device */ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) { - void __iomem *base = mhi_cntrl->bhi; + void __iomem *base = mhi_cntrl->bhie; rwlock_t *pm_lock = &mhi_cntrl->pm_lock; struct image_info *rddm_image = mhi_cntrl->rddm_image; struct mhi_buf *mhi_buf; @@ -219,7 +219,7 @@ EXPORT_SYMBOL(mhi_download_rddm_img); static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { - void __iomem *base = mhi_cntrl->bhi; + void __iomem *base = mhi_cntrl->bhie; rwlock_t *pm_lock = &mhi_cntrl->pm_lock; u32 tx_status; @@ -275,8 +275,8 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, int i, ret; void __iomem *base = mhi_cntrl->bhi; rwlock_t *pm_lock = &mhi_cntrl->pm_lock; - dma_addr_t phys = dma_map_single(mhi_cntrl->dev, buf, size, - DMA_TO_DEVICE); + dma_addr_t dma_addr = dma_map_single(mhi_cntrl->dev, buf, size, + DMA_TO_DEVICE); struct { char *name; u32 offset; @@ -288,7 +288,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, { NULL }, }; - if (dma_mapping_error(mhi_cntrl->dev, phys)) + if (dma_mapping_error(mhi_cntrl->dev, dma_addr)) return -ENOMEM; MHI_LOG("Starting BHI programming\n"); @@ -301,8 +301,10 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, } mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(phys)); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(phys)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK; mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id); @@ -337,12 +339,12 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, goto invalid_pm_state; } - dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE); + dma_unmap_single(mhi_cntrl->dev, dma_addr, size, DMA_TO_DEVICE); return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; invalid_pm_state: - dma_unmap_single(mhi_cntrl->dev, phys, size, DMA_TO_DEVICE); + dma_unmap_single(mhi_cntrl->dev, dma_addr, size, DMA_TO_DEVICE); return -EIO; } @@ -392,7 +394,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, /* last entry is for vector table */ if (i == segments - 1) - vec_size = sizeof(struct __packed bhi_vec_entry) * i; + vec_size = sizeof(struct bhi_vec_entry) * i; mhi_buf->len = vec_size; mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, @@ -504,7 +506,7 @@ void mhi_fw_load_worker(struct work_struct *work) if (size > firmware->size) size = firmware->size; - buf = kmalloc(size, GFP_KERNEL); + buf = kmemdup(firmware->data, size, GFP_KERNEL); if (!buf) { MHI_ERR("Could not allocate memory for image\n"); release_firmware(firmware); @@ -512,7 +514,6 @@ void mhi_fw_load_worker(struct work_struct *work) } /* load sbl image */ - memcpy(buf, firmware->data, size); ret = mhi_fw_load_sbl(mhi_cntrl, buf, size); kfree(buf); diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c index af57b942a315cf3c082e319c5e38e6dd913a036c..1db34b69eace83b641fe6bf242c1d6e088b3e2d0 100644 --- a/drivers/bus/mhi/core/mhi_dtr.c +++ b/drivers/bus/mhi/core/mhi_dtr.c @@ -37,7 +37,7 @@ struct __packed dtr_ctrl_msg { #define CTRL_MSG_RTS BIT(1) #define CTRL_MSG_DCD BIT(0) #define CTRL_MSG_DSR BIT(1) -#define CTRL_MSG_RI BIT(2) +#define CTRL_MSG_RI BIT(3) #define CTRL_HOST_STATE (0x10) #define CTRL_DEVICE_STATE (0x11) #define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF) @@ -212,7 +212,7 @@ static int mhi_dtr_probe(struct mhi_device *mhi_dev, static const struct mhi_device_id mhi_dtr_table[] = { { .chan = "IP_CTRL" }, - { NULL }, + {}, }; static struct mhi_driver mhi_dtr_driver = { diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index cd455571b599e17173b47bd2aa32bf2a8dc25197..b5b9d6fd6bcadc3b563ef2087ab2e07d5d5f50d3 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -333,7 +333,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) er_ctxt->msivec = mhi_event->msi; mhi_event->db_cfg.db_mode = true; - ring->el_size = sizeof(struct __packed mhi_tre); + ring->el_size = sizeof(struct mhi_tre); ring->len = ring->el_size * ring->elements; ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); if (ret) @@ -358,7 +358,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { struct mhi_ring *ring = &mhi_cmd->ring; - ring->el_size = sizeof(struct __packed mhi_tre); + ring->el_size = sizeof(struct mhi_tre); ring->elements = CMD_EL_PER_RING; ring->len = ring->el_size * ring->elements; ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); @@ -643,7 +643,7 @@ int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; - tre_ring->el_size = sizeof(struct __packed mhi_tre); + tre_ring->el_size = sizeof(struct mhi_tre); tre_ring->len = tre_ring->el_size * tre_ring->elements; chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); @@ -736,7 +736,6 @@ int mhi_device_configure(struct mhi_device *mhi_dev, return 0; } -#if defined(CONFIG_OF) static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, struct device_node *of_node) { @@ -992,15 +991,6 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, int ret; struct mhi_timesync *mhi_tsync; - /* parse firmware image info (optional parameters) */ - of_property_read_string(of_node, "mhi,fw-name", &mhi_cntrl->fw_image); - of_property_read_string(of_node, "mhi,edl-name", &mhi_cntrl->fw_image); - mhi_cntrl->fbc_download = of_property_read_bool(of_node, "mhi,dl-fbc"); - of_property_read_u32(of_node, "mhi,sbl-size", - (u32 *)&mhi_cntrl->sbl_size); - of_property_read_u32(of_node, "mhi,seg-len", - (u32 *)&mhi_cntrl->seg_len); - /* parse MHI channel configuration */ ret = of_parse_ch_cfg(mhi_cntrl, of_node); if (ret) @@ -1055,13 +1045,6 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, return ret; } -#else -static int of_parse_dt(struct mhi_controller *mhi_cntrl, - struct device_node *of_node) -{ - return -EINVAL; -} -#endif int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) { @@ -1070,6 +1053,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) struct mhi_event *mhi_event; struct mhi_chan *mhi_chan; struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; if (!mhi_cntrl->of_node) return -EINVAL; @@ -1091,8 +1075,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); - if (!mhi_cntrl->mhi_cmd) + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; goto error_alloc_cmd; + } INIT_LIST_HEAD(&mhi_cntrl->transition_list); mutex_init(&mhi_cntrl->pm_mutex); @@ -1147,31 +1133,59 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; } - mhi_cntrl->parent = mhi_bus.dentry; + /* register controller with mhi_bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) { + ret = -ENOMEM; + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_CONTROLLER_TYPE; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_cntrl->parent = debugfs_lookup(mhi_bus_type.name, NULL); mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; - /* add to list */ + /* adding it to this list only for debug purpose */ mutex_lock(&mhi_bus.lock); list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list); mutex_unlock(&mhi_bus.lock); return 0; +error_add_dev: + mhi_dealloc_device(mhi_cntrl, mhi_dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + error_alloc_cmd: kfree(mhi_cntrl->mhi_chan); kfree(mhi_cntrl->mhi_event); - return -ENOMEM; + return ret; }; EXPORT_SYMBOL(of_register_mhi_controller); void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) { + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); kfree(mhi_cntrl->mhi_chan); kfree(mhi_cntrl->mhi_tsync); + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + mutex_lock(&mhi_bus.lock); list_del(&mhi_cntrl->node); mutex_unlock(&mhi_bus.lock); @@ -1265,7 +1279,11 @@ static int mhi_match(struct device *dev, struct device_driver *drv) struct mhi_driver *mhi_drv = to_mhi_driver(drv); const struct mhi_device_id *id; - for (id = mhi_drv->id_table; id->chan; id++) + /* if controller type there is no client driver associated with it */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) if (!strcmp(mhi_dev->chan_name, id->chan)) { mhi_dev->id = id; return 1; @@ -1357,6 +1375,10 @@ static int mhi_driver_remove(struct device *dev) }; int dir; + /* control device has no work to do */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name); /* reset both channels */ @@ -1463,14 +1485,13 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) static int __init mhi_init(void) { - struct dentry *dentry; int ret; mutex_init(&mhi_bus.lock); INIT_LIST_HEAD(&mhi_bus.controller_list); - dentry = debugfs_create_dir("mhi", NULL); - if (!IS_ERR_OR_NULL(dentry)) - mhi_bus.dentry = dentry; + + /* parent directory */ + debugfs_create_dir(mhi_bus_type.name, NULL); ret = bus_register(&mhi_bus_type); diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index df085301cac4424780b53d5821832f7dd51f86d9..cfae808c61de8e86f8407041d6fc136737ceefd4 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -48,6 +48,10 @@ extern struct bus_type mhi_bus_type; #define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) #define BHIOFF_BHIOFF_SHIFT (0) +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + #define DEBUGOFF (0x30) #define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) #define DEBUGOFF_DEBUGOFF_SHIFT (0) @@ -186,15 +190,14 @@ extern struct bus_type mhi_bus_type; #define BHI_STATUS_RESET (0) /* MHI BHIE offsets */ -#define BHIE_OFFSET (0x0124) /* BHIE register space offset from BHI base */ -#define BHIE_MSMSOCID_OFFS (BHIE_OFFSET + 0x0000) -#define BHIE_TXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x002C) -#define BHIE_TXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0030) -#define BHIE_TXVECSIZE_OFFS (BHIE_OFFSET + 0x0034) -#define BHIE_TXVECDB_OFFS (BHIE_OFFSET + 0x003C) +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) #define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) #define BHIE_TXVECDB_SEQNUM_SHFT (0) -#define BHIE_TXVECSTATUS_OFFS (BHIE_OFFSET + 0x0044) +#define BHIE_TXVECSTATUS_OFFS (0x0044) #define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) #define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) #define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) @@ -202,13 +205,13 @@ extern struct bus_type mhi_bus_type; #define BHIE_TXVECSTATUS_STATUS_RESET (0x00) #define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) #define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) -#define BHIE_RXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x0060) -#define BHIE_RXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0064) -#define BHIE_RXVECSIZE_OFFS (BHIE_OFFSET + 0x0068) -#define BHIE_RXVECDB_OFFS (BHIE_OFFSET + 0x0070) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) #define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) #define BHIE_RXVECDB_SEQNUM_SHFT (0) -#define BHIE_RXVECSTATUS_OFFS (BHIE_OFFSET + 0x0078) +#define BHIE_RXVECSTATUS_OFFS (0x0078) #define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) #define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) #define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) @@ -217,47 +220,47 @@ extern struct bus_type mhi_bus_type; #define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) #define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) -struct __packed mhi_event_ctxt { +struct mhi_event_ctxt { u32 reserved : 8; u32 intmodc : 8; u32 intmodt : 16; u32 ertype; u32 msivec; - u64 rbase; - u64 rlen; - u64 rp; - u64 wp; + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); }; -struct __packed mhi_chan_ctxt { +struct mhi_chan_ctxt { u32 chstate : 8; u32 brstmode : 2; u32 pollcfg : 6; u32 reserved : 16; u32 chtype; u32 erindex; - u64 rbase; - u64 rlen; - u64 rp; - u64 wp; + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); }; -struct __packed mhi_cmd_ctxt { +struct mhi_cmd_ctxt { u32 reserved0; u32 reserved1; u32 reserved2; - u64 rbase; - u64 rlen; - u64 rp; - u64 wp; + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); }; -struct __packed mhi_tre { +struct mhi_tre { u64 ptr; u32 dword[2]; }; -struct __packed bhi_vec_entry { +struct bhi_vec_entry { u64 dma_addr; u64 size; }; @@ -623,7 +626,6 @@ struct mhi_timesync { struct mhi_bus { struct list_head controller_list; struct mutex lock; - struct dentry *dentry; }; /* default MHI timeout */ diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index e80d5790f15cd206e138de88b940ea965592fd62..67acfa93dc95aa3f197858cf2910cfd841315e16 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -559,6 +559,10 @@ int mhi_destroy_device(struct device *dev, void *data) mhi_dev = to_mhi_device(dev); mhi_cntrl = mhi_dev->mhi_cntrl; + /* only destroying virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name); /* notify the client and remove the device from mhi bus */ @@ -1722,19 +1726,41 @@ int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, } EXPORT_SYMBOL(mhi_get_no_free_descriptors); +static int __mhi_bdf_to_controller(struct device *dev, void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} + struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, u32 dev_id) { - struct mhi_controller *itr, *tmp; + struct mhi_device tmp, *mhi_dev; + struct device *dev; + + tmp.domain = domain; + tmp.bus = bus; + tmp.slot = slot; + tmp.dev_id = dev_id; - list_for_each_entry_safe(itr, tmp, &mhi_bus.controller_list, node) - if (itr->domain == domain && itr->bus == bus && - itr->slot == slot && itr->dev_id == dev_id) - return itr; + dev = bus_find_device(&mhi_bus_type, NULL, &tmp, + __mhi_bdf_to_controller); + if (!dev) + return NULL; + + mhi_dev = to_mhi_device(dev); - return NULL; + return mhi_dev->mhi_cntrl; } EXPORT_SYMBOL(mhi_bdf_to_controller); diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index 5ed4e2a0856edd263aadb3c3ac1af2b4468e5a26..80e29c1e99e8f6a1de38d34b997c9d37e25d5c75 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -777,6 +777,9 @@ void mhi_pm_st_worker(struct work_struct *work) case MHI_ST_TRANSITION_AMSS: mhi_pm_amss_transition(mhi_cntrl); break; + case MHI_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; default: break; } @@ -787,7 +790,7 @@ void mhi_pm_st_worker(struct work_struct *work) int mhi_async_power_up(struct mhi_controller *mhi_cntrl) { int ret; - u32 bhi_offset; + u32 val; enum MHI_EE current_ee; enum MHI_ST_TRANSITION next_state; @@ -822,14 +825,27 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) /* setup bhi offset & intvec */ write_lock_irq(&mhi_cntrl->pm_lock); - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_offset); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); if (ret) { write_unlock_irq(&mhi_cntrl->pm_lock); MHI_ERR("Error getting bhi offset\n"); goto error_bhi_offset; } - mhi_cntrl->bhi = mhi_cntrl->regs + bhi_offset; + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* setup bhie offset */ + if (mhi_cntrl->fbc_download) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhie offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); mhi_cntrl->pm_state = MHI_PM_POR; mhi_cntrl->ee = MHI_EE_MAX; diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c index 8b4ba70800367d517a2e003281c3a622c6646a75..c6ec258d43cbad53b24d288b23db517dbb9424ff 100644 --- a/drivers/bus/mhi/devices/mhi_netdev.c +++ b/drivers/bus/mhi/devices/mhi_netdev.c @@ -977,7 +977,7 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev, static const struct mhi_device_id mhi_netdev_match_table[] = { { .chan = "IP_HW0" }, { .chan = "IP_HW_ADPL" }, - { NULL }, + {}, }; static struct mhi_driver mhi_netdev_driver = { diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c index dd7581e36d03803eaf85c43a920b4f0bafd48309..eed5429e918ad400e513ff9697e8d8908a779f65 100644 --- a/drivers/bus/mhi/devices/mhi_uci.c +++ b/drivers/bus/mhi/devices/mhi_uci.c @@ -278,8 +278,8 @@ static ssize_t mhi_uci_write(struct file *file, (nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE)) > 0); - if (ret == -ERESTARTSYS) { - MSG_LOG("Exit signal caught for node\n"); + if (ret == -ERESTARTSYS || !uci_dev->enabled) { + MSG_LOG("Exit signal caught for node or not enabled\n"); return -ERESTARTSYS; } @@ -540,8 +540,8 @@ static void mhi_uci_remove(struct mhi_device *mhi_dev) return; } - mutex_unlock(&uci_dev->mutex); MSG_LOG("Exit\n"); + mutex_unlock(&uci_dev->mutex); } static int mhi_uci_probe(struct mhi_device *mhi_dev, @@ -655,7 +655,7 @@ static const struct mhi_device_id mhi_uci_match_table[] = { { .chan = "TF", .driver_data = 0x1000 }, { .chan = "BL", .driver_data = 0x1000 }, { .chan = "DUN", .driver_data = 0x1000 }, - { NULL }, + {}, }; static struct mhi_driver mhi_uci_driver = { diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d96a3842a4002e05d4753bd4765b9c5c3de5513d..28019ef948d9a59e50ae565339a8d9e3fee9b747 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -606,5 +606,12 @@ config MSM_ADSPRPC applications DSP processor. Say M if you want to enable this module. +config MSM_RDBG + tristate "QTI Remote debug driver" + help + Implements a shared memory based transport mechanism that allows + for a debugger running on a host PC to communicate with a remote + stub running on peripheral subsystems such as the ADSP, MODEM etc. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 96ecee7cebc04d841a0b0418c5af74cfb91f7a72..f40528c0ac81b28e4dc9bc537b96b29a1cd6fb4a 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -65,4 +65,5 @@ obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o ifdef CONFIG_COMPAT obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o endif -obj-$(CONFIG_MSM_FASTCVPD) += fastcvpd.o \ No newline at end of file +obj-$(CONFIG_MSM_FASTCVPD) += fastcvpd.o +obj-$(CONFIG_MSM_RDBG) += rdbg.o diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 39a0bdd4404a288305671c42f47a20464263e949..7a5c8eb5a4780996d4220127b0524baf446f5976 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -61,6 +63,9 @@ #define VMID_ADSP_Q6 6 #define DEBUGFS_SIZE 1024 +#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsprpc" +#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio" + #define RPC_TIMEOUT (5 * HZ) #define BALIGN 128 #define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ @@ -71,6 +76,11 @@ #define FASTRPC_CTX_MAGIC (0xbeeddeed) #define FASTRPC_CTX_MAX (256) #define FASTRPC_CTXID_MASK (0xFF0) +#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */ +#define MINOR_NUM_DEV 0 +#define MINOR_NUM_SECURE_DEV 1 +#define NON_SECURE_CHANNEL 0 +#define SECURE_CHANNEL 1 #define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0) #ifndef ION_FLAG_CACHED @@ -112,6 +122,9 @@ (int64_t *)(perf_ptr + offset)\ : (int64_t *)NULL) : (int64_t *)NULL) +static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *nb, + unsigned long code, + void *data); static struct dentry *debugfs_root; static struct dentry *debugfs_global_file; @@ -227,12 +240,23 @@ struct fastrpc_session_ctx { int used; }; +struct fastrpc_static_pd { + char *spdname; + struct notifier_block pdrnb; + struct notifier_block get_service_nb; + void *pdrhandle; + int pdrcount; + int prevpdrcount; + int ispdup; +}; + struct fastrpc_channel_ctx { char *name; char *subsys; struct rpmsg_device *chan; struct device *dev; struct fastrpc_session_ctx session[NUM_SESSIONS]; + struct fastrpc_static_pd spd[NUM_SESSIONS]; struct completion work; struct completion workport; struct notifier_block nb; @@ -246,6 +270,8 @@ struct fastrpc_channel_ctx { struct secure_vm rhvm; int ramdumpenabled; void *remoteheap_ramdump_dev; + /* Indicates, if channel is restricted to secure node only */ + int secure; }; struct fastrpc_apps { @@ -265,6 +291,7 @@ struct fastrpc_apps { int rpmsg_register; spinlock_t ctxlock; struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX]; + bool legacy_remote_heap; }; struct fastrpc_mmap { @@ -330,6 +357,7 @@ struct fastrpc_file { int cid; int ssrcount; int pd; + char *spdname; int file_close; struct fastrpc_apps *apps; struct hlist_head perf; @@ -339,6 +367,8 @@ struct fastrpc_file { int qos_request; struct mutex map_mutex; struct mutex internal_map_mutex; + /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */ + int dev_minor; }; static struct fastrpc_apps gfa; @@ -347,6 +377,14 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { { .name = "adsprpc-smd", .subsys = "adsp", + .spd = { + { + .spdname = + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, + .pdrnb.notifier_call = + fastrpc_audio_pdr_notifier_cb, + } + }, }, { .name = "mdsprpc-smd", @@ -1149,6 +1187,21 @@ static void fastrpc_notify_drivers(struct fastrpc_apps *me, int cid) spin_unlock(&me->hlock); } + +static void fastrpc_notify_pdr_drivers(struct fastrpc_apps *me, char *spdname) +{ + struct fastrpc_file *fl; + struct hlist_node *n; + + spin_lock(&me->hlock); + hlist_for_each_entry_safe(fl, n, &me->drivers, hn) { + if (fl->spdname && !strcmp(spdname, fl->spdname)) + fastrpc_notify_users(fl); + } + spin_unlock(&me->hlock); + +} + static void context_list_ctor(struct fastrpc_ctx_lst *me) { INIT_HLIST_HEAD(&me->interrupted); @@ -1420,9 +1473,17 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) if (map && (map->attr & FASTRPC_ATTR_COHERENT)) continue; - if (rpra[i].buf.len && ctx->overps[oix]->mstart) - dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv), - uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len)); + if (rpra && rpra[i].buf.len && ctx->overps[oix]->mstart) { + if (map && map->handle) { + dma_buf_begin_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + dma_buf_end_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + } else + dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv), + uint64_to_ptr(rpra[i].buf.pv + + rpra[i].buf.len)); + } } PERF_END); for (i = bufs; rpra && i < bufs + handles; i++) { @@ -1431,11 +1492,6 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv; } - if (!ctx->fl->sctx->smmu.coherent) { - PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_FLUSH), - dmac_flush_range((char *)rpra, (char *)rpra + ctx->used); - PERF_END); - } bail: return err; } @@ -1521,14 +1577,31 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx) if (buf_page_start(ptr_to_uint64((void *)rpra)) == buf_page_start(rpra[i].buf.pv)) continue; - if (!IS_CACHE_ALIGNED((uintptr_t)uint64_to_ptr(rpra[i].buf.pv))) - dmac_flush_range(uint64_to_ptr(rpra[i].buf.pv), - (char *)(uint64_to_ptr(rpra[i].buf.pv + 1))); + if (!IS_CACHE_ALIGNED((uintptr_t) + uint64_to_ptr(rpra[i].buf.pv))) { + if (map && map->handle) { + dma_buf_begin_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + dma_buf_end_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + } else + dmac_flush_range( + uint64_to_ptr(rpra[i].buf.pv), (char *) + uint64_to_ptr(rpra[i].buf.pv + 1)); + } + end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len); - if (!IS_CACHE_ALIGNED(end)) - dmac_flush_range((char *)end, - (char *)end + 1); + if (!IS_CACHE_ALIGNED(end)) { + if (map && map->handle) { + dma_buf_begin_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + dma_buf_end_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + } else + dmac_flush_range((char *)end, + (char *)end + 1); + } } } @@ -1537,7 +1610,6 @@ static void inv_args(struct smq_invoke_ctx *ctx) int i, inbufs, outbufs; uint32_t sc = ctx->sc; remote_arg64_t *rpra = ctx->rpra; - int used = ctx->used; inbufs = REMOTE_SCALARS_INBUFS(sc); outbufs = REMOTE_SCALARS_OUTBUFS(sc); @@ -1559,17 +1631,16 @@ static void inv_args(struct smq_invoke_ctx *ctx) continue; } if (map && map->buf) { - dma_buf_begin_cpu_access(map->buf, DMA_BIDIRECTIONAL); - dma_buf_end_cpu_access(map->buf, DMA_BIDIRECTIONAL); - } - else + dma_buf_begin_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + dma_buf_end_cpu_access(map->buf, + DMA_BIDIRECTIONAL); + } else dmac_inv_range((char *)uint64_to_ptr(rpra[i].buf.pv), (char *)uint64_to_ptr(rpra[i].buf.pv + rpra[i].buf.len)); } - if (rpra) - dmac_inv_range(rpra, (char *)rpra + used); } static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, @@ -1623,7 +1694,11 @@ static void fastrpc_init(struct fastrpc_apps *me) init_completion(&me->channel[i].work); init_completion(&me->channel[i].workport); me->channel[i].sesscount = 0; + /* All channels are secure by default except CDSP */ + me->channel[i].secure = SECURE_CHANNEL; } + /* Set CDSP channel to non secure */ + me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL; } static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl); @@ -1737,7 +1812,28 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, return err; } +static int fastrpc_get_adsp_session(char *name, int *session) +{ + struct fastrpc_apps *me = &gfa; + int err = 0, i; + + for (i = 0; i < NUM_SESSIONS; i++) { + if (!me->channel[0].spd[i].spdname) + continue; + if (!strcmp(name, me->channel[0].spd[i].spdname)) + break; + } + VERIFY(err, i < NUM_SESSIONS); + if (err) + goto bail; + *session = i; +bail: + return err; +} + +static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl); static int fastrpc_channel_open(struct fastrpc_file *fl); +static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl); static int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init_attrs *uproc) { @@ -1877,7 +1973,13 @@ static int fastrpc_init_process(struct fastrpc_file *fl, inbuf.pgid = current->tgid; inbuf.namelen = init->filelen; inbuf.pageslen = 0; - if (!me->staticpd_flags) { + + if (!strcmp(proc_name, "audiopd")) { + fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME; + VERIFY(err, !fastrpc_mmap_remove_pdr(fl)); + } + + if (!me->staticpd_flags && !(me->legacy_remote_heap)) { inbuf.pageslen = 1; mutex_lock(&fl->map_mutex); VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem, @@ -2058,29 +2160,34 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, { int err = 0; struct fastrpc_apps *me = &gfa; + int tgid = 0; int destVM[1] = {VMID_HLOS}; int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC}; if (map->flags == ADSP_MMAP_HEAP_ADDR) { struct fastrpc_ioctl_invoke_crc ioctl; struct scm_desc desc = {0}; - remote_arg_t ra[1]; + remote_arg_t ra[2]; int err = 0; struct { uint8_t skey; } routargs; - ra[0].buf.pv = (void *)&routargs; - ra[0].buf.len = sizeof(routargs); + if (fl == NULL) + goto bail; + tgid = fl->tgid; + ra[0].buf.pv = (void *)&tgid; + ra[0].buf.len = sizeof(tgid); + + ra[1].buf.pv = (void *)&routargs; + ra[1].buf.len = sizeof(routargs); ioctl.inv.handle = 1; - ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1); + ioctl.inv.sc = REMOTE_SCALARS_MAKE(9, 1, 1); ioctl.inv.pra = ra; ioctl.fds = NULL; ioctl.attrs = NULL; ioctl.crc = NULL; - if (fl == NULL) - goto bail; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); @@ -2196,6 +2303,33 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl) return err; } +static int fastrpc_mmap_remove_pdr(struct fastrpc_file *fl) +{ + struct fastrpc_apps *me = &gfa; + int session = 0, err = 0; + + VERIFY(err, !fastrpc_get_adsp_session( + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session)); + if (err) + goto bail; + if (me->channel[fl->cid].spd[session].pdrcount != + me->channel[fl->cid].spd[session].prevpdrcount) { + if (fastrpc_mmap_remove_ssr(fl)) + pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n"); + me->channel[fl->cid].spd[session].prevpdrcount = + me->channel[fl->cid].spd[session].pdrcount; + } + if (!me->channel[fl->cid].spd[session].ispdup) { + VERIFY(err, 0); + if (err) { + err = -ENOTCONN; + goto bail; + } + } +bail: + return err; +} + static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, size_t len, struct fastrpc_mmap **ppmap); @@ -2555,6 +2689,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s\n\n", chan->name); + len += scnprintf(fileinfo + len, + DEBUGFS_SIZE - len, "%s %d\n", + "secure:", chan->secure); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s %d\n", "sesscount:", chan->sesscount); @@ -2583,6 +2720,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer, len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s %d\n\n", "SSRCOUNT:", fl->ssrcount); + len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, + "%s %d\n\n", + "DEV_MINOR:", fl->dev_minor); len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len, "%s\n", "LIST OF BUFS:"); @@ -2670,10 +2810,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) err = -ENOTCONN; goto bail; } - pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, - MAJOR(me->dev_no), cid); - - if (me->channel[cid].ssrcount != + if (cid == ADSP_DOMAIN_ID && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { mutex_lock(&fl->map_mutex); if (fastrpc_mmap_remove_ssr(fl)) @@ -2696,6 +2833,19 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) struct fastrpc_file *fl = NULL; struct fastrpc_apps *me = &gfa; + /* + * Indicates the device node opened + * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV + */ + int dev_minor = MINOR(inode->i_rdev); + + VERIFY(err, ((dev_minor == MINOR_NUM_DEV) || + (dev_minor == MINOR_NUM_SECURE_DEV))); + if (err) { + pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor); + return err; + } + VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL))); if (err) return err; @@ -2712,6 +2862,8 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) fl->apps = me; fl->mode = FASTRPC_MODE_SERIAL; fl->cid = -1; + fl->dev_minor = dev_minor; + if (debugfs_file != NULL) fl->debugfs_file = debugfs_file; memset(&fl->perf, 0, sizeof(fl->perf)); @@ -2739,6 +2891,20 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info) VERIFY(err, cid < NUM_CHANNELS); if (err) goto bail; + /* Check to see if the device node is non-secure */ + if (fl->dev_minor == MINOR_NUM_DEV) { + /* + * For non secure device node check and make sure that + * the channel allows non-secure access + * If not, bail. Session will not start. + * cid will remain -1 and client will not be able to + * invoke any other methods without failure + */ + if (fl->apps->channel[cid].secure == SECURE_CHANNEL) { + err = -EPERM; + goto bail; + } + } fl->cid = cid; fl->ssrcount = fl->apps->channel[cid].ssrcount; VERIFY(err, !fastrpc_session_alloc_locked( @@ -2863,6 +3029,28 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, if (err) goto bail; break; + case FASTRPC_IOCTL_MMAP_64: + K_COPY_FROM_USER(err, 0, &p.mmap, param, + sizeof(p.mmap)); + if (err) + goto bail; + VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap))); + if (err) + goto bail; + K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap)); + if (err) + goto bail; + break; + case FASTRPC_IOCTL_MUNMAP_64: + K_COPY_FROM_USER(err, 0, &p.munmap, param, + sizeof(p.munmap)); + if (err) + goto bail; + VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl, + &p.munmap))); + if (err) + goto bail; + break; case FASTRPC_IOCTL_MUNMAP_FD: K_COPY_FROM_USER(err, 0, &p.munmap_fd, param, sizeof(p.munmap_fd)); @@ -3011,6 +3199,64 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, return NOTIFY_DONE; } +static int fastrpc_audio_pdr_notifier_cb(struct notifier_block *pdrnb, + unsigned long code, + void *data) +{ + struct fastrpc_apps *me = &gfa; + struct fastrpc_static_pd *spd; + struct notif_data *notifdata = data; + + spd = container_of(pdrnb, struct fastrpc_static_pd, pdrnb); + if (code == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) { + mutex_lock(&me->smd_mutex); + spd->pdrcount++; + spd->ispdup = 0; + pr_info("ADSPRPC: Audio PDR notifier %d %s\n", + MAJOR(me->dev_no), spd->spdname); + mutex_unlock(&me->smd_mutex); + if (!strcmp(spd->spdname, + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME)) + me->staticpd_flags = 0; + fastrpc_notify_pdr_drivers(me, spd->spdname); + } else if (code == SUBSYS_RAMDUMP_NOTIFICATION) { + if (me->channel[0].remoteheap_ramdump_dev && + notifdata->enable_ramdump) { + me->channel[0].ramdumpenabled = 1; + } + } else if (code == SERVREG_NOTIF_SERVICE_STATE_UP_V01) { + spd->ispdup = 1; + } + + return NOTIFY_DONE; +} + +static int fastrpc_get_service_location_notify(struct notifier_block *nb, + unsigned long opcode, void *data) +{ + struct fastrpc_static_pd *spd; + struct pd_qmi_client_data *pdr = data; + int curr_state = 0; + + spd = container_of(nb, struct fastrpc_static_pd, get_service_nb); + if (opcode == LOCATOR_DOWN) { + pr_err("ADSPRPC: Audio PD restart notifier locator down\n"); + return NOTIFY_DONE; + } + + if (pdr->total_domains == 1) { + spd->pdrhandle = service_notif_register_notifier( + pdr->domain_list[0].name, + pdr->domain_list[0].instance_id, + &spd->pdrnb, &curr_state); + if (IS_ERR(spd->pdrhandle)) + pr_err("ADSPRPC: Unable to register notifier\n"); + } else + pr_err("ADSPRPC: Service returned invalid domains\n"); + + return NOTIFY_DONE; +} + static const struct file_operations fops = { .open = fastrpc_device_open, .release = fastrpc_device_release, @@ -3102,7 +3348,7 @@ static int fastrpc_cb_probe(struct device *dev) sess->smmu.dev->dma_parms = devm_kzalloc(sess->smmu.dev, sizeof(*sess->smmu.dev->dma_parms), GFP_KERNEL); dma_set_max_seg_size(sess->smmu.dev, DMA_BIT_MASK(32)); - dma_set_seg_boundary(sess->smmu.dev, DMA_BIT_MASK(64)); + dma_set_seg_boundary(sess->smmu.dev, (unsigned long)DMA_BIT_MASK(64)); if (of_get_property(dev->of_node, "shared-cb", NULL) != NULL) { VERIFY(err, !of_property_read_u32(dev->of_node, "shared-cb", @@ -3169,6 +3415,25 @@ static void init_secure_vmid_list(struct device *dev, char *prop_name, } } +static void configure_secure_channels(uint32_t secure_domains) +{ + struct fastrpc_apps *me = &gfa; + int ii = 0; + /* + * secure_domains contains the bitmask of the secure channels + * Bit 0 - ADSP + * Bit 1 - MDSP + * Bit 2 - SLPI + * Bit 3 - CDSP + */ + for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) { + int secure = (secure_domains >> ii) & 0x01; + + me->channel[ii].secure = secure; + } +} + + static int fastrpc_probe(struct platform_device *pdev) { int err = 0; @@ -3179,7 +3444,8 @@ static int fastrpc_probe(struct platform_device *pdev) struct platform_device *ion_pdev; struct cma *cma; uint32_t val; - + int ret = 0; + uint32_t secure_domains; if (of_device_is_compatible(dev->of_node, "qcom,msm-fastrpc-compute")) { @@ -3189,6 +3455,16 @@ static int fastrpc_probe(struct platform_device *pdev) of_property_read_u32(dev->of_node, "qcom,rpc-latency-us", &me->latency); + if (of_get_property(dev->of_node, + "qcom,secure-domains", NULL) != NULL) { + VERIFY(err, !of_property_read_u32(dev->of_node, + "qcom,secure-domains", + &secure_domains)); + if (!err) + configure_secure_channels(secure_domains); + else + pr_info("adsprpc: unable to read the domain configuration from dts\n"); + } } if (of_device_is_compatible(dev->of_node, "qcom,msm-fastrpc-compute-cb")) @@ -3234,7 +3510,28 @@ static int fastrpc_probe(struct platform_device *pdev) } return 0; } + me->legacy_remote_heap = of_property_read_bool(dev->of_node, + "qcom,fastrpc-legacy-remote-heap"); + if (of_property_read_bool(dev->of_node, + "qcom,fastrpc-adsp-audio-pdr")) { + int session; + VERIFY(err, !fastrpc_get_adsp_session( + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, &session)); + if (err) + goto spdbail; + me->channel[0].spd[session].get_service_nb.notifier_call = + fastrpc_get_service_location_notify; + ret = get_service_location( + AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME, + AUDIO_PDR_ADSP_SERVICE_NAME, + &me->channel[0].spd[session].get_service_nb); + if (ret) + pr_err("ADSPRPC: Get service location failed: %d\n", + ret); + } +spdbail: + err = 0; VERIFY(err, !of_platform_populate(pdev->dev.of_node, fastrpc_match_table, NULL, &pdev->dev)); @@ -3302,12 +3599,14 @@ static int __init fastrpc_device_init(void) { struct fastrpc_apps *me = &gfa; struct device *dev = NULL; + struct device *secure_dev = NULL; int err = 0, i; memset(me, 0, sizeof(*me)); fastrpc_init(me); me->dev = NULL; + me->legacy_remote_heap = 0; VERIFY(err, 0 == platform_driver_register(&fastrpc_driver)); if (err) goto register_bail; @@ -3318,7 +3617,7 @@ static int __init fastrpc_device_init(void) cdev_init(&me->cdev, &fops); me->cdev.owner = THIS_MODULE; VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), - 1)); + NUM_DEVICES)); if (err) goto cdev_init_bail; me->class = class_create(THIS_MODULE, "fastrpc"); @@ -3326,14 +3625,30 @@ static int __init fastrpc_device_init(void) if (err) goto class_create_bail; me->compat = (fops.compat_ioctl == NULL) ? 0 : 1; + + /* + * Create devices and register with sysfs + * Create first device with minor number 0 + */ dev = device_create(me->class, NULL, - MKDEV(MAJOR(me->dev_no), 0), - NULL, gcinfo[0].name); + MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV), + NULL, DEVICE_NAME); VERIFY(err, !IS_ERR_OR_NULL(dev)); if (err) goto device_create_bail; + + /* Create secure device with minor number for secure device */ + secure_dev = device_create(me->class, NULL, + MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV), + NULL, DEVICE_NAME_SECURE); + VERIFY(err, !IS_ERR_OR_NULL(secure_dev)); + if (err) + goto device_create_bail; + for (i = 0; i < NUM_CHANNELS; i++) { - me->channel[i].dev = dev; + me->channel[i].dev = secure_dev; + if (i == CDSP_DOMAIN_ID) + me->channel[i].dev = dev; me->channel[i].ssrcount = 0; me->channel[i].prevssrcount = 0; me->channel[i].issubsystemup = 1; @@ -3361,7 +3676,11 @@ static int __init fastrpc_device_init(void) &me->channel[i].nb); } if (!IS_ERR_OR_NULL(dev)) - device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0)); + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_DEV)); + if (!IS_ERR_OR_NULL(secure_dev)) + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_SECURE_DEV)); class_destroy(me->class); class_create_bail: cdev_del(&me->cdev); @@ -3383,10 +3702,15 @@ static void __exit fastrpc_device_exit(void) for (i = 0; i < NUM_CHANNELS; i++) { if (!gcinfo[i].name) continue; - device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i)); subsys_notif_unregister_notifier(me->channel[i].handle, &me->channel[i].nb); } + + /* Destroy the secure and non secure devices */ + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV)); + device_destroy(me->class, MKDEV(MAJOR(me->dev_no), + MINOR_NUM_SECURE_DEV)); + class_destroy(me->class); cdev_del(&me->cdev); unregister_chrdev_region(me->dev_no, NUM_CHANNELS); diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c index e7d50451440f1df215a74743b57200ca7163afbd..804cedade6550e4c87823bd5223782323ad38197 100644 --- a/drivers/char/adsprpc_compat.c +++ b/drivers/char/adsprpc_compat.c @@ -39,6 +39,10 @@ _IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc) #define COMPAT_FASTRPC_IOCTL_CONTROL \ _IOWR('R', 12, struct compat_fastrpc_ioctl_control) +#define COMPAT_FASTRPC_IOCTL_MMAP_64 \ + _IOWR('R', 14, struct compat_fastrpc_ioctl_mmap_64) +#define COMPAT_FASTRPC_IOCTL_MUNMAP_64 \ + _IOWR('R', 15, struct compat_fastrpc_ioctl_munmap_64) struct compat_remote_buf { compat_uptr_t pv; /* buffer pointer */ @@ -82,11 +86,24 @@ struct compat_fastrpc_ioctl_mmap { compat_uptr_t vaddrout; /* dsps virtual address */ }; +struct compat_fastrpc_ioctl_mmap_64 { + compat_int_t fd; /* ion fd */ + compat_uint_t flags; /* flags for dsp to map with */ + compat_u64 vaddrin; /* optional virtual address */ + compat_size_t size; /* size */ + compat_u64 vaddrout; /* dsps virtual address */ +}; + struct compat_fastrpc_ioctl_munmap { compat_uptr_t vaddrout; /* address to unmap */ compat_size_t size; /* size */ }; +struct compat_fastrpc_ioctl_munmap_64 { + compat_u64 vaddrout; /* address to unmap */ + compat_size_t size; /* size */ +}; + struct compat_fastrpc_ioctl_init { compat_uint_t flags; /* one of FASTRPC_INIT_* macros */ compat_uptr_t file; /* pointer to elf file */ @@ -206,6 +223,28 @@ static int compat_get_fastrpc_ioctl_mmap( return err; } +static int compat_get_fastrpc_ioctl_mmap_64( + struct compat_fastrpc_ioctl_mmap_64 __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_uint_t u; + compat_int_t i; + compat_size_t s; + compat_u64 p; + int err; + + err = get_user(i, &map32->fd); + err |= put_user(i, &map->fd); + err |= get_user(u, &map32->flags); + err |= put_user(u, &map->flags); + err |= get_user(p, &map32->vaddrin); + err |= put_user(p, &map->vaddrin); + err |= get_user(s, &map32->size); + err |= put_user(s, &map->size); + + return err; +} + static int compat_put_fastrpc_ioctl_mmap( struct compat_fastrpc_ioctl_mmap __user *map32, struct fastrpc_ioctl_mmap __user *map) @@ -219,6 +258,19 @@ static int compat_put_fastrpc_ioctl_mmap( return err; } +static int compat_put_fastrpc_ioctl_mmap_64( + struct compat_fastrpc_ioctl_mmap_64 __user *map32, + struct fastrpc_ioctl_mmap __user *map) +{ + compat_u64 p; + int err; + + err = get_user(p, &map->vaddrout); + err |= put_user(p, &map32->vaddrout); + + return err; +} + static int compat_get_fastrpc_ioctl_munmap( struct compat_fastrpc_ioctl_munmap __user *unmap32, struct fastrpc_ioctl_munmap __user *unmap) @@ -235,6 +287,22 @@ static int compat_get_fastrpc_ioctl_munmap( return err; } +static int compat_get_fastrpc_ioctl_munmap_64( + struct compat_fastrpc_ioctl_munmap_64 __user *unmap32, + struct fastrpc_ioctl_munmap __user *unmap) +{ + compat_u64 p; + compat_size_t s; + int err; + + err = get_user(p, &unmap32->vaddrout); + err |= put_user(p, &unmap->vaddrout); + err |= get_user(s, &unmap32->size); + err |= put_user(s, &unmap->size); + + return err; +} + static int compat_get_fastrpc_ioctl_perf( struct compat_fastrpc_ioctl_perf __user *perf32, struct fastrpc_ioctl_perf __user *perf) @@ -355,6 +423,27 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap(map32, map)); return err; } + case COMPAT_FASTRPC_IOCTL_MMAP_64: + { + struct compat_fastrpc_ioctl_mmap_64 __user *map32; + struct fastrpc_ioctl_mmap __user *map; + long ret; + + map32 = compat_ptr(arg); + VERIFY(err, NULL != (map = compat_alloc_user_space( + sizeof(*map)))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_mmap_64(map32, map)); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MMAP_64, + (unsigned long)map); + if (ret) + return ret; + VERIFY(err, 0 == compat_put_fastrpc_ioctl_mmap_64(map32, map)); + return err; + } case COMPAT_FASTRPC_IOCTL_MUNMAP: { struct compat_fastrpc_ioctl_munmap __user *unmap32; @@ -372,6 +461,23 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP, (unsigned long)unmap); } + case COMPAT_FASTRPC_IOCTL_MUNMAP_64: + { + struct compat_fastrpc_ioctl_munmap_64 __user *unmap32; + struct fastrpc_ioctl_munmap __user *unmap; + + unmap32 = compat_ptr(arg); + VERIFY(err, NULL != (unmap = compat_alloc_user_space( + sizeof(*unmap)))); + if (err) + return -EFAULT; + VERIFY(err, 0 == compat_get_fastrpc_ioctl_munmap_64(unmap32, + unmap)); + if (err) + return err; + return filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_MUNMAP_64, + (unsigned long)unmap); + } case COMPAT_FASTRPC_IOCTL_INIT: /* fall through */ case COMPAT_FASTRPC_IOCTL_INIT_ATTRS: diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h index 207e1ab08e21d1327c3a3dc094d63d4dd1e6976c..61f25dfd6cb750bad716b38d1fc6afe3c7c6876c 100644 --- a/drivers/char/adsprpc_shared.h +++ b/drivers/char/adsprpc_shared.h @@ -19,6 +19,8 @@ #define FASTRPC_IOCTL_INVOKE _IOWR('R', 1, struct fastrpc_ioctl_invoke) #define FASTRPC_IOCTL_MMAP _IOWR('R', 2, struct fastrpc_ioctl_mmap) #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 3, struct fastrpc_ioctl_munmap) +#define FASTRPC_IOCTL_MMAP_64 _IOWR('R', 14, struct fastrpc_ioctl_mmap_64) +#define FASTRPC_IOCTL_MUNMAP_64 _IOWR('R', 15, struct fastrpc_ioctl_munmap_64) #define FASTRPC_IOCTL_INVOKE_FD _IOWR('R', 4, struct fastrpc_ioctl_invoke_fd) #define FASTRPC_IOCTL_SETMODE _IOWR('R', 5, uint32_t) #define FASTRPC_IOCTL_INIT _IOWR('R', 6, struct fastrpc_ioctl_init) @@ -34,6 +36,7 @@ #define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" #define DEVICE_NAME "adsprpc-smd" +#define DEVICE_NAME_SECURE "adsprpc-smd-secure" /* Set for buffers that have no virtual mapping in userspace */ #define FASTRPC_ATTR_NOVA 0x1 @@ -203,6 +206,11 @@ struct fastrpc_ioctl_munmap { size_t size; /* size */ }; +struct fastrpc_ioctl_munmap_64 { + uint64_t vaddrout; /* address to unmap */ + size_t size; /* size */ +}; + struct fastrpc_ioctl_mmap { int fd; /* ion fd */ uint32_t flags; /* flags for dsp to map with */ @@ -211,6 +219,14 @@ struct fastrpc_ioctl_mmap { uintptr_t vaddrout; /* dsps virtual address */ }; +struct fastrpc_ioctl_mmap_64 { + int fd; /* ion fd */ + uint32_t flags; /* flags for dsp to map with */ + uint64_t vaddrin; /* optional virtual address */ + size_t size; /* size */ + uint64_t vaddrout; /* dsps virtual address */ +}; + struct fastrpc_ioctl_munmap_fd { int fd; /* fd */ uint32_t flags; /* control flags */ diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index c381c8e396fcc0e43b5a1b2a5ebd0c9e80ad732f..79d8c84693a185264990d40185006f5eaec0f145 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty return 0; } -int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) +static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; u32 *gp; @@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) return 0; } -void null_cache_flush(void) +static void null_cache_flush(void) { mb(); } diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index e25653a2c81fbf953829c1e0de005443bfe5b060..8c77d88245c5b1645b6379c79e90352d89ed639f 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -690,7 +690,7 @@ int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry, byte_mask = 0x01 << (item_num % 8); offset = equip_id * 514; - if (offset + byte_index > DCI_LOG_MASK_SIZE) { + if (offset + byte_index >= DCI_LOG_MASK_SIZE) { pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n", __func__, offset, log_code, byte_index); return 0; @@ -717,7 +717,7 @@ int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry, bit_index = event_id % 8; byte_mask = 0x1 << bit_index; - if (byte_index > DCI_EVENT_MASK_SIZE) { + if (byte_index >= DCI_EVENT_MASK_SIZE) { pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n", __func__, event_id, byte_index); return 0; diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 14177c6cb46088b247e346c7473028e1820db962..914147a015c4963a6d208268ee3ff862d7bdf2ac 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -195,10 +195,11 @@ static void diag_send_log_mask_update(uint8_t peripheral, int equip_id) } mask_info->update_buf = temp; mask_info->update_buf_len = header_len + mask_size; + buf = temp; } memcpy(buf, &ctrl_pkt, header_len); - if (mask_size > 0) + if (mask_size > 0 && mask_size <= LOG_MASK_SIZE) memcpy(buf + header_len, mask->ptr, mask_size); mutex_unlock(&mask->lock); @@ -297,9 +298,16 @@ static void diag_send_event_mask_update(uint8_t peripheral) } else { mask_info->update_buf = temp; mask_info->update_buf_len = temp_len; + buf = temp; } } - memcpy(buf + sizeof(header), mask_info->ptr, num_bytes); + if (num_bytes > 0 && num_bytes < mask_info->mask_len) + memcpy(buf + sizeof(header), mask_info->ptr, num_bytes); + else { + pr_err("diag: num_bytes(%d) is not satisfying length condition\n", + num_bytes); + goto err; + } write_len += num_bytes; break; default: @@ -415,6 +423,7 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) } else { mask_info->update_buf = temp; mask_info->update_buf_len = temp_len; + buf = temp; pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n", __func__, mask_info->update_buf_len); } @@ -922,10 +931,12 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : DIAG_CTRL_MASK_ALL_DISABLED; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { - mutex_lock(&mask->lock); - memset(mask->ptr, req->rt_mask, - mask->range * sizeof(uint32_t)); - mutex_unlock(&mask->lock); + if (mask && mask->ptr) { + mutex_lock(&mask->lock); + memset(mask->ptr, req->rt_mask, + mask->range * sizeof(uint32_t)); + mutex_unlock(&mask->lock); + } } mutex_unlock(&driver->msg_mask_lock); mutex_unlock(&mask_info->lock); @@ -1337,6 +1348,8 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, mutex_lock(&mask_info->lock); for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) { + if (!mask || !mask->ptr) + continue; if (mask->equip_id != req->equip_id) continue; mutex_lock(&mask->lock); @@ -1464,9 +1477,11 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, return -EINVAL; } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { - mutex_lock(&mask->lock); - memset(mask->ptr, 0, mask->range); - mutex_unlock(&mask->lock); + if (mask && mask->ptr) { + mutex_lock(&mask->lock); + memset(mask->ptr, 0, mask->range); + mutex_unlock(&mask->lock); + } } mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED; mutex_unlock(&driver->md_session_lock); diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 427dc84f2780f75409c093d56b5071b89abf97d5..d49c822c55454fcae5ba6b0ff99f529b80887129 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1721,8 +1721,8 @@ static void diag_switch_logging_clear_mask( static int diag_switch_logging(struct diag_logging_mode_param_t *param) { int new_mode, i = 0; - int curr_mode, err = 0; - uint8_t do_switch = 1, peripheral = 0; + int curr_mode, err = 0, peripheral = 0; + uint8_t do_switch = 1; uint32_t peripheral_mask = 0, pd_mask = 0; if (!param) @@ -1736,6 +1736,10 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) if (param->pd_mask) { pd_mask = diag_translate_mask(param->pd_mask); + param->diag_id = 0; + param->pd_val = 0; + param->peripheral = -EINVAL; + for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) { if (pd_mask & (1 << i)) { if (diag_search_diagid_by_pd(i, ¶m->diag_id, @@ -1745,6 +1749,12 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) } } } + + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "diag: pd_mask = %d, diag_id = %d, peripheral = %d, pd_val = %d\n", + param->pd_mask, param->diag_id, + param->peripheral, param->pd_val); + if (!param->diag_id || (param->pd_val < UPD_WLAN) || (param->pd_val >= NUM_MD_SESSIONS)) { @@ -1754,22 +1764,26 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) return -EINVAL; } - DIAG_LOG(DIAG_DEBUG_USERSPACE, - "diag: pd_mask = %d, diag_id = %d, peripheral = %d, pd_val = %d\n", - param->pd_mask, param->diag_id, - param->peripheral, param->pd_val); - peripheral = param->peripheral; + if ((peripheral < PERIPHERAL_MODEM) || + (peripheral >= NUM_PERIPHERALS)) { + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "Invalid peripheral: %d\n", peripheral); + return -EINVAL; + } i = param->pd_val - UPD_WLAN; + mutex_lock(&driver->md_session_lock); if (driver->md_session_map[peripheral] && (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask) && !driver->pd_session_clear[i]) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag_fr: User PD is already logging onto active peripheral logging\n"); + mutex_unlock(&driver->md_session_lock); driver->pd_session_clear[i] = 0; return -EINVAL; } + mutex_unlock(&driver->md_session_lock); peripheral_mask = diag_translate_mask(param->pd_mask); param->peripheral_mask = peripheral_mask; diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index f7784fdbb7ef6da3027d2a265fddf2d3ad7b814a..4a1de090281815e350ce377cc306a6d2ae490a42 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -48,6 +48,7 @@ #define STM_RSP_SUPPORTED_INDEX 7 #define STM_RSP_STATUS_INDEX 8 #define STM_RSP_NUM_BYTES 9 +#define RETRY_MAX_COUNT 1000 struct diag_md_hdlc_reset_work { int pid; @@ -248,28 +249,22 @@ static void pack_rsp_and_send(unsigned char *buf, int len, * if its supporting qshrink4 feature. */ if (info && info->peripheral_mask) { - if (info->peripheral_mask == DIAG_CON_ALL || - (info->peripheral_mask & (1 << APPS_DATA)) || - (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) { - rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); - } else { - for (i = 0; i < NUM_MD_SESSIONS; i++) { - if (info->peripheral_mask & (1 << i)) - break; - } - rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); + for (i = 0; i < NUM_MD_SESSIONS; i++) { + if (info->peripheral_mask & (1 << i)) + break; } + rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD); } else rsp_ctxt = driver->rsp_buf_ctxt; mutex_unlock(&driver->md_session_lock); /* * Keep trying till we get the buffer back. It should probably - * take one or two iterations. When this loops till UINT_MAX, it + * take one or two iterations. When this loops till RETRY_MAX_COUNT, it * means we did not get a write complete for the previous * response. */ - while (retry_count < UINT_MAX) { + while (retry_count < RETRY_MAX_COUNT) { if (!driver->rsp_buf_busy) break; /* @@ -347,27 +342,21 @@ static void encode_rsp_and_send(unsigned char *buf, int len, * if its supporting qshrink4 feature. */ if (info && info->peripheral_mask) { - if (info->peripheral_mask == DIAG_CON_ALL || - (info->peripheral_mask & (1 << APPS_DATA)) || - (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) { - rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); - } else { - for (i = 0; i < NUM_MD_SESSIONS; i++) { - if (info->peripheral_mask & (1 << i)) - break; - } - rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); + for (i = 0; i < NUM_MD_SESSIONS; i++) { + if (info->peripheral_mask & (1 << i)) + break; } + rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, TYPE_CMD); } else rsp_ctxt = driver->rsp_buf_ctxt; mutex_unlock(&driver->md_session_lock); /* * Keep trying till we get the buffer back. It should probably - * take one or two iterations. When this loops till UINT_MAX, it + * take one or two iterations. When this loops till RETRY_MAX_COUNT, it * means we did not get a write complete for the previous * response. */ - while (retry_count < UINT_MAX) { + while (retry_count < RETRY_MAX_COUNT) { if (!driver->rsp_buf_busy) break; /* @@ -1803,14 +1792,18 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt, } break; case TYPE_CMD: - if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) { + if (peripheral >= 0 && peripheral < NUM_PERIPHERALS && + num != TYPE_CMD) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n", - peripheral, type, num); + peripheral, type, num); diagfwd_write_done(peripheral, type, num); - } - if (peripheral == APPS_DATA || - ctxt == DIAG_MEMORY_DEVICE_MODE) { + } else if (peripheral == APPS_DATA || + (peripheral >= 0 && peripheral < NUM_PERIPHERALS && + num == TYPE_CMD)) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "Marking APPS response buffer free after write done for p: %d, t: %d, buf_num: %d\n", + peripheral, type, num); spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags); driver->rsp_buf_busy = 0; driver->encoded_rsp_len = 0; diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 704b945a026b300a8fed516d4ed3ab0b15348292..10da2c717088aaf8987553ac134135d90b122bca 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -445,8 +445,8 @@ static void process_last_event_report(uint8_t *buf, uint32_t len, header = (struct diag_ctrl_last_event_report *)ptr; event_size = ((header->event_last_id / 8) + 1); if (event_size >= driver->event_mask_size) { - pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n", - __func__); + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: receiving event mask size more that Apps can handle\n"); temp = krealloc(driver->event_mask->ptr, event_size, GFP_KERNEL); if (!temp) { @@ -560,6 +560,10 @@ static void process_ssid_range_report(uint8_t *buf, uint32_t len, mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr; found = 0; for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) { + if (!mask_ptr || !ssid_range) { + found = 1; + break; + } if (mask_ptr->ssid_first != ssid_range->ssid_first) continue; mutex_lock(&mask_ptr->lock); @@ -578,6 +582,8 @@ static void process_ssid_range_report(uint8_t *buf, uint32_t len, new_size = (driver->msg_mask_tbl_count + 1) * sizeof(struct diag_msg_mask_t); + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: receiving msg mask size more that Apps can handle\n"); temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL); if (!temp) { pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n", @@ -586,6 +592,7 @@ static void process_ssid_range_report(uint8_t *buf, uint32_t len, continue; } msg_mask.ptr = temp; + mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr; err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range); if (err) { pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n", @@ -625,6 +632,10 @@ static void diag_build_time_mask_update(uint8_t *buf, num_items = range->ssid_last - range->ssid_first + 1; for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) { + if (!build_mask) { + found = 1; + break; + } if (build_mask->ssid_first != range->ssid_first) continue; found = 1; @@ -635,7 +646,8 @@ static void diag_build_time_mask_update(uint8_t *buf, __func__); } dest_ptr = build_mask->ptr; - for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++) + for (j = 0; (j < build_mask->range) && mask_ptr && dest_ptr; + j++, mask_ptr++, dest_ptr++) *(uint32_t *)dest_ptr |= *mask_ptr; mutex_unlock(&build_mask->lock); break; @@ -643,8 +655,12 @@ static void diag_build_time_mask_update(uint8_t *buf, if (found) goto end; + new_size = (driver->bt_msg_mask_tbl_count + 1) * sizeof(struct diag_msg_mask_t); + DIAG_LOG(DIAG_DEBUG_MASKS, + "diag: receiving build time mask size more that Apps can handle\n"); + temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL); if (!temp) { pr_err("diag: In %s, unable to create a new entry for build time mask\n", @@ -652,6 +668,7 @@ static void diag_build_time_mask_update(uint8_t *buf, goto end; } driver->build_time_mask->ptr = temp; + build_mask = (struct diag_msg_mask_t *)driver->build_time_mask->ptr; err = diag_create_msg_mask_table_entry(build_mask, range); if (err) { pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n", diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c index 58259e1221b8533e4c6a94848ea05fe2eb85bb85..215f5a54fa07086d9f913d9c63645a233cd565ec 100644 --- a/drivers/char/diag/diagfwd_mhi.c +++ b/drivers/char/diag/diagfwd_mhi.c @@ -720,7 +720,7 @@ void diag_mhi_exit(void) static const struct mhi_device_id diag_mhi_match_table[] = { { .chan = "DIAG", .driver_data = MHI_1 }, { .chan = "DCI", .driver_data = MHI_DCI_1 }, - {NULL}, + {}, }; static struct mhi_driver diag_mhi_driver = { diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c index 13be6d6cdf69c352dbfa9c2210e238d9c4d38be0..2312c85fad70af7c57ca58fac47fa20677e29280 100644 --- a/drivers/char/diag/diagfwd_socket.c +++ b/drivers/char/diag/diagfwd_socket.c @@ -52,6 +52,10 @@ #define INST_ID_DCI_CMD 3 #define INST_ID_DCI 4 +#define MAX_BUF_SIZE 0x4400 +#define MAX_NO_PACKETS 10 +#define DIAG_SO_RCVBUF_SIZE (MAX_BUF_SIZE * MAX_NO_PACKETS) + struct qmi_handle *cntl_qmi; static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS]; @@ -438,6 +442,7 @@ static void socket_open_server(struct diag_socket_info *info) struct kvec iv = { &pkt, sizeof(pkt) }; int ret; int sl = sizeof(sq); + unsigned int size = DIAG_SO_RCVBUF_SIZE; if (!info || info->port_type != PORT_TYPE_SERVER) return; @@ -456,6 +461,9 @@ static void socket_open_server(struct diag_socket_info *info) return; } + kernel_setsockopt(info->hdl, SOL_SOCKET, SO_RCVBUF, + (char *)&size, sizeof(size)); + write_lock_bh(&info->hdl->sk->sk_callback_lock); info->hdl->sk->sk_user_data = (void *)(info); info->hdl->sk->sk_data_ready = socket_data_ready; diff --git a/drivers/char/fastcvpd.c b/drivers/char/fastcvpd.c index 279fc71b53a7b06683a79e0aee96251858a63af5..202d4404a5507c64a3d92ccc39d3ef7a92178506 100644 --- a/drivers/char/fastcvpd.c +++ b/drivers/char/fastcvpd.c @@ -184,7 +184,7 @@ EXPORT_SYMBOL(fastcvpd_video_resume); int fastcvpd_video_shutdown(uint32_t session_flag) { struct fastcvpd_apps *me = &gfa_cv; - int err; + int err, local_cmd_msg_rsp; struct fastcvpd_cmd_msg local_cmd_msg; int srcVM[DEST_VM_NUM] = {VMID_HLOS, VMID_CDSP_Q6}; int destVM[SRC_VM_NUM] = {VMID_HLOS}; @@ -202,21 +202,21 @@ int fastcvpd_video_shutdown(uint32_t session_flag) spin_lock(&me->hlock); local_cmd_msg.msg_ptr = cmd_msg.msg_ptr; local_cmd_msg.msg_ptr_len = cmd_msg.msg_ptr_len; - if (cmd_msg_rsp.ret_val == 0) { + local_cmd_msg_rsp = cmd_msg_rsp.ret_val; + spin_unlock(&me->hlock); + if (local_cmd_msg_rsp == 0) { err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr, local_cmd_msg.msg_ptr_len, srcVM, DEST_VM_NUM, destVM, destVMperm, SRC_VM_NUM); if (err) { pr_err("%s: Failed to hyp_assign. err=%d\n", __func__, err); - spin_unlock(&me->hlock); return err; } } else { pr_err("%s: Skipping hyp_assign as CDSP sent invalid response=%d\n", __func__, cmd_msg_rsp.ret_val); } - spin_unlock(&me->hlock); return err; } diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index feafdab734ae20b268e3078bd71822ef382a408e..4835b588b7833fb554ac0394c8c7ada8262d6892 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); + bt->timeout = bt->BT_CAP_req2rsp; + /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); - bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c new file mode 100644 index 0000000000000000000000000000000000000000..0c623221bd1c0dff53caf3d2d76782d36b639cae --- /dev/null +++ b/drivers/char/rdbg.c @@ -0,0 +1,1212 @@ +/* + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SMP2P_NUM_PROCS 16 +#define MAX_RETRIES 20 + +#define SM_VERSION 1 +#define SM_BLOCKSIZE 128 + +#define SMQ_MAGIC_INIT 0xFF00FF00 +#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1) +#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2) + +#define SMEM_LC_DEBUGGER 470 + +enum SMQ_STATUS { + SMQ_SUCCESS = 0, + SMQ_ENOMEMORY = -1, + SMQ_EBADPARM = -2, + SMQ_UNDERFLOW = -3, + SMQ_OVERFLOW = -4 +}; + +enum smq_type { + PRODUCER = 1, + CONSUMER = 2, + INVALID = 3 +}; + +struct smq_block_map { + uint32_t index_read; + uint32_t num_blocks; + uint8_t *map; +}; + +struct smq_node { + uint16_t index_block; + uint16_t num_blocks; +} __attribute__ ((__packed__)); + +struct smq_hdr { + uint8_t producer_version; + uint8_t consumer_version; +} __attribute__ ((__packed__)); + +struct smq_out_state { + uint32_t init; + uint32_t index_check_queue_for_reset; + uint32_t index_sent_write; + uint32_t index_free_read; +} __attribute__ ((__packed__)); + +struct smq_out { + struct smq_out_state s; + struct smq_node sent[1]; +}; + +struct smq_in_state { + uint32_t init; + uint32_t index_check_queue_for_reset_ack; + uint32_t index_sent_read; + uint32_t index_free_write; +} __attribute__ ((__packed__)); + +struct smq_in { + struct smq_in_state s; + struct smq_node free[1]; +}; + +struct smq { + struct smq_hdr *hdr; + struct smq_out *out; + struct smq_in *in; + uint8_t *blocks; + uint32_t num_blocks; + struct mutex *lock; + uint32_t initialized; + struct smq_block_map block_map; + enum smq_type type; +}; + +struct gpio_info { + int gpio_base_id; + int irq_base_id; + unsigned int smem_bit; + struct qcom_smem_state *smem_state; +}; + +struct rdbg_data { + struct device *device; + struct completion work; + struct gpio_info in; + struct gpio_info out; + bool device_initialized; + int gpio_out_offset; + bool device_opened; + void *smem_addr; + size_t smem_size; + struct smq producer_smrb; + struct smq consumer_smrb; + struct mutex write_mutex; +}; + +struct rdbg_device { + struct cdev cdev; + struct class *class; + dev_t dev_no; + int num_devices; + struct rdbg_data *rdbg_data; +}; + + +int registers[32] = {0}; +static struct rdbg_device g_rdbg_instance = { + { {0} }, + NULL, + 0, + SMP2P_NUM_PROCS, + NULL +}; + +struct processor_specific_info { + char *name; + unsigned int smem_buffer_addr; + size_t smem_buffer_size; +}; + +static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = { + {0}, /*APPS*/ + {"rdbg_modem", 0, 0}, /*MODEM*/ + {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/ + {0}, /*SMP2P_RESERVED_PROC_1*/ + {"rdbg_wcnss", 0, 0}, /*WCNSS*/ + {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/ + {NULL}, /*SMP2P_POWER_PROC*/ + {NULL}, /*SMP2P_TZ_PROC*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL}, /*EMPTY*/ + {NULL} /*SMP2P_REMOTE_MOCK_PROC*/ +}; + +static int smq_blockmap_get(struct smq_block_map *block_map, + uint32_t *block_index, uint32_t n) +{ + uint32_t start; + uint32_t mark = 0; + uint32_t found = 0; + uint32_t i = 0; + + start = block_map->index_read; + + if (n == 1) { + do { + if (!block_map->map[block_map->index_read]) { + *block_index = block_map->index_read; + block_map->map[block_map->index_read] = 1; + block_map->index_read++; + block_map->index_read %= block_map->num_blocks; + return SMQ_SUCCESS; + } + block_map->index_read++; + } while (start != (block_map->index_read %= + block_map->num_blocks)); + } else { + mark = block_map->num_blocks; + + do { + if (!block_map->map[block_map->index_read]) { + if (mark > block_map->index_read) { + mark = block_map->index_read; + start = block_map->index_read; + found = 0; + } + + found++; + if (found == n) { + *block_index = mark; + for (i = 0; i < n; i++) + block_map->map[mark + i] = + (uint8_t)(n - i); + block_map->index_read += block_map->map + [block_map->index_read] - 1; + return SMQ_SUCCESS; + } + } else { + found = 0; + block_map->index_read += block_map->map + [block_map->index_read] - 1; + mark = block_map->num_blocks; + } + block_map->index_read++; + } while (start != (block_map->index_read %= + block_map->num_blocks)); + } + + return SMQ_ENOMEMORY; +} + +static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i) +{ + uint32_t num_blocks = block_map->map[i]; + + while (num_blocks--) { + block_map->map[i] = 0; + i++; + } +} + +static int smq_blockmap_reset(struct smq_block_map *block_map) +{ + if (!block_map->map) + return SMQ_ENOMEMORY; + memset(block_map->map, 0, block_map->num_blocks + 1); + block_map->index_read = 0; + + return SMQ_SUCCESS; +} + +static int smq_blockmap_ctor(struct smq_block_map *block_map, + uint32_t num_blocks) +{ + if (num_blocks <= 1) + return SMQ_ENOMEMORY; + + block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL); + if (!block_map->map) + return SMQ_ENOMEMORY; + + block_map->num_blocks = num_blocks - 1; + smq_blockmap_reset(block_map); + + return SMQ_SUCCESS; +} + +static void smq_blockmap_dtor(struct smq_block_map *block_map) +{ + kfree(block_map->map); + block_map->map = NULL; +} + +static int smq_free(struct smq *smq, void *data) +{ + struct smq_node node; + uint32_t index_block; + int err = SMQ_SUCCESS; + + if (smq->lock) + mutex_lock(smq->lock); + + if ((smq->hdr->producer_version != SM_VERSION) && + (smq->out->s.init != SMQ_MAGIC_PRODUCER)) { + err = SMQ_UNDERFLOW; + goto bail; + } + + index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE; + if (index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + node.index_block = (uint16_t)index_block; + node.num_blocks = 0; + *((struct smq_node *)(smq->in->free + + smq->in->s.index_free_write)) = node; + + smq->in->s.index_free_write = (smq->in->s.index_free_write + 1) + % smq->num_blocks; + +bail: + if (smq->lock) + mutex_unlock(smq->lock); + return err; +} + +static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore) +{ + struct smq_node *node; + int err = SMQ_SUCCESS; + int more = 0; + + if ((smq->hdr->producer_version != SM_VERSION) && + (smq->out->s.init != SMQ_MAGIC_PRODUCER)) + return SMQ_UNDERFLOW; + + if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) { + err = SMQ_UNDERFLOW; + goto bail; + } + + node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read); + if (node->index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1) + % smq->num_blocks; + + *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE); + *pnsize = SM_BLOCKSIZE * node->num_blocks; + + /* + * Ensure that the reads and writes are updated in the memory + * when they are done and not cached. Also, ensure that the reads + * and writes are not reordered as they are shared between two cores. + */ + rmb(); + if (smq->in->s.index_sent_read != smq->out->s.index_sent_write) + more = 1; + +bail: + *pbmore = more; + return err; +} + +static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize) +{ + void *pv = 0; + int num_blocks; + uint32_t index_block = 0; + int err = SMQ_SUCCESS; + struct smq_node *node = NULL; + + mutex_lock(smq->lock); + + if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) && + (smq->hdr->consumer_version == SM_VERSION)) { + if (smq->out->s.index_check_queue_for_reset == + smq->in->s.index_check_queue_for_reset_ack) { + while (smq->out->s.index_free_read != + smq->in->s.index_free_write) { + node = (struct smq_node *)( + smq->in->free + + smq->out->s.index_free_read); + if (node->index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + smq->out->s.index_free_read = + (smq->out->s.index_free_read + 1) + % smq->num_blocks; + + smq_blockmap_put(&smq->block_map, + node->index_block); + /* + * Ensure that the reads and writes are + * updated in the memory when they are done + * and not cached. Also, ensure that the reads + * and writes are not reordered as they are + * shared between two cores. + */ + rmb(); + } + } + } + + num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE; + err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks); + if (err != SMQ_SUCCESS) + goto bail; + + pv = smq->blocks + (SM_BLOCKSIZE * index_block); + + err = copy_from_user((void *)pv, (void *)pcb, nsize); + if (err != 0) + goto bail; + + ((struct smq_node *)(smq->out->sent + + smq->out->s.index_sent_write))->index_block + = (uint16_t)index_block; + ((struct smq_node *)(smq->out->sent + + smq->out->s.index_sent_write))->num_blocks + = (uint16_t)num_blocks; + + smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1) + % smq->num_blocks; + +bail: + if (err != SMQ_SUCCESS) { + if (pv) + smq_blockmap_put(&smq->block_map, index_block); + } + mutex_unlock(smq->lock); + return err; +} + +static int smq_reset_producer_queue_internal(struct smq *smq, + uint32_t reset_num) +{ + int retval = 0; + uint32_t i; + + if (smq->type != PRODUCER) + goto bail; + + mutex_lock(smq->lock); + if (smq->out->s.index_check_queue_for_reset != reset_num) { + smq->out->s.index_check_queue_for_reset = reset_num; + for (i = 0; i < smq->num_blocks; i++) + (smq->out->sent + i)->index_block = 0xFFFF; + + smq_blockmap_reset(&smq->block_map); + smq->out->s.index_sent_write = 0; + smq->out->s.index_free_read = 0; + retval = 1; + } + mutex_unlock(smq->lock); + +bail: + return retval; +} + +static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod) +{ + int retval = 0; + uint32_t reset_num, i; + + if ((p_cons->type != CONSUMER) || + (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) || + (p_cons->hdr->producer_version != SM_VERSION)) + goto bail; + + reset_num = p_cons->out->s.index_check_queue_for_reset; + if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) { + p_cons->in->s.index_check_queue_for_reset_ack = reset_num; + for (i = 0; i < p_cons->num_blocks; i++) + (p_cons->in->free + i)->index_block = 0xFFFF; + + p_cons->in->s.index_sent_read = 0; + p_cons->in->s.index_free_write = 0; + + retval = smq_reset_producer_queue_internal(p_prod, reset_num); + } + +bail: + return retval; +} + +static int check_subsystem_debug_enabled(void *base_addr, int size) +{ + int num_blocks; + uint8_t *pb_orig; + uint8_t *pb; + struct smq smq; + int err = 0; + + pb = pb_orig = (uint8_t *)base_addr; + pb += sizeof(struct smq_hdr); + pb = PTR_ALIGN(pb, 8); + size -= pb - (uint8_t *)pb_orig; + num_blocks = (int)((size - sizeof(struct smq_out_state) - + sizeof(struct smq_in_state))/(SM_BLOCKSIZE + + sizeof(struct smq_node) * 2)); + if (num_blocks <= 0) { + err = SMQ_EBADPARM; + goto bail; + } + + pb += num_blocks * SM_BLOCKSIZE; + smq.out = (struct smq_out *)pb; + pb += sizeof(struct smq_out_state) + (num_blocks * + sizeof(struct smq_node)); + smq.in = (struct smq_in *)pb; + + if (smq.in->s.init != SMQ_MAGIC_CONSUMER) { + pr_err("%s, smq in consumer not initialized\n", __func__); + err = -ECOMM; + } + +bail: + return err; +} + +static void smq_dtor(struct smq *smq) +{ + if (smq->initialized == SMQ_MAGIC_INIT) { + switch (smq->type) { + case PRODUCER: + smq->out->s.init = 0; + smq_blockmap_dtor(&smq->block_map); + break; + case CONSUMER: + smq->in->s.init = 0; + break; + default: + case INVALID: + break; + } + + smq->initialized = 0; + } +} + +/* + * The shared memory is used as a circular ring buffer in each direction. + * Thus we have a bi-directional shared memory channel between the AP + * and a subsystem. We call this SMQ. Each memory channel contains a header, + * data and a control mechanism that is used to synchronize read and write + * of data between the AP and the remote subsystem. + * + * Overall SMQ memory view: + * + * +------------------------------------------------+ + * | SMEM buffer | + * |-----------------------+------------------------| + * |Producer: LA | Producer: Remote | + * |Consumer: Remote | subsystem | + * | subsystem | Consumer: LA | + * | | | + * | Producer| Consumer| + * +-----------------------+------------------------+ + * | | + * | | + * | +--------------------------------------+ + * | | + * | | + * v v + * +--------------------------------------------------------------+ + * | Header | Data | Control | + * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ + * | | b | b | b | | S |n |n | | S |n |n | | + * | Producer | l | l | l | | M |o |o | | M |o |o | | + * | Ver | o | o | o | | Q |d |d | | Q |d |d | | + * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | + * | | k | k | k | | O | | | | I | | | | + * | Consumer | | | | | u |0 |1 | | n |0 |1 | | + * | Ver | 0 | 1 | 2 | | t | | | | | | | | + * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ + * | | + * + | + * | + * +------------------------+ + * | + * v + * +----+----+----+----+ + * | SMQ Nodes | + * |----|----|----|----| + * Node # | 0 | 1 | 2 | ...| + * |----|----|----|----| + * Starting Block Index # | 0 | 3 | 8 | ...| + * |----|----|----|----| + * # of blocks | 3 | 5 | 1 | ...| + * +----+----+----+----+ + * + * Header: Contains version numbers for software compatibility to ensure + * that both producers and consumers on the AP and subsystems know how to + * read from and write to the queue. + * Both the producer and consumer versions are 1. + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 1 byte | Producer Version | + * +---------+-------------------+ + * | 1 byte | Consumer Version | + * +---------+-------------------+ + * + * Data: The data portion contains multiple blocks [0..N] of a fixed size. + * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. + * Payload sent from the debug agent app is split (if necessary) and placed + * in these blocks. The first data block is placed at the next 8 byte aligned + * address after the header. + * + * The number of blocks for a given SMEM allocation is derived as follows: + * Number of Blocks = ((Total Size - Alignment - Size of Header + * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) + * + * The producer maintains a private block map of each of these blocks to + * determine which of these blocks in the queue is available and which are free. + * + * Control: + * The control portion contains a list of nodes [0..N] where N is number + * of available data blocks. Each node identifies the data + * block indexes that contain a particular debug message to be transferred, + * and the number of blocks it took to hold the contents of the message. + * + * Each node has the following structure: + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 2 bytes |Staring Block Index| + * +---------+-------------------+ + * | 2 bytes |Number of Blocks | + * +---------+-------------------+ + * + * The producer and the consumer update different parts of the control channel + * (SMQOut / SMQIn) respectively. Each of these control data structures contains + * information about the last node that was written / read, and the actual nodes + * that were written/read. + * + * SMQOut Structure (R/W by producer, R by consumer): + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 4 bytes | Magic Init Number | + * +---------+-------------------+ + * | 4 bytes | Reset | + * +---------+-------------------+ + * | 4 bytes | Last Sent Index | + * +---------+-------------------+ + * | 4 bytes | Index Free Read | + * +---------+-------------------+ + * + * SMQIn Structure (R/W by consumer, R by producer): + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 4 bytes | Magic Init Number | + * +---------+-------------------+ + * | 4 bytes | Reset ACK | + * +---------+-------------------+ + * | 4 bytes | Last Read Index | + * +---------+-------------------+ + * | 4 bytes | Index Free Write | + * +---------+-------------------+ + * + * Magic Init Number: + * Both SMQ Out and SMQ In initialize this field with a predefined magic + * number so as to make sure that both the consumer and producer blocks + * have fully initialized and have valid data in the shared memory control area. + * Producer Magic #: 0xFF00FF01 + * Consumer Magic #: 0xFF00FF02 + */ +static int smq_ctor(struct smq *smq, void *base_addr, int size, + enum smq_type type, struct mutex *lock_ptr) +{ + int num_blocks; + uint8_t *pb_orig; + uint8_t *pb; + uint32_t i; + int err; + + if (smq->initialized == SMQ_MAGIC_INIT) { + err = SMQ_EBADPARM; + goto bail; + } + + if (!base_addr || !size) { + err = SMQ_EBADPARM; + goto bail; + } + + if (type == PRODUCER) + smq->lock = lock_ptr; + + pb_orig = (uint8_t *)base_addr; + smq->hdr = (struct smq_hdr *)pb_orig; + pb = pb_orig; + pb += sizeof(struct smq_hdr); + pb = PTR_ALIGN(pb, 8); + size -= pb - (uint8_t *)pb_orig; + num_blocks = (int)((size - sizeof(struct smq_out_state) - + sizeof(struct smq_in_state))/(SM_BLOCKSIZE + + sizeof(struct smq_node) * 2)); + if (num_blocks <= 0) { + err = SMQ_ENOMEMORY; + goto bail; + } + + smq->blocks = pb; + smq->num_blocks = num_blocks; + pb += num_blocks * SM_BLOCKSIZE; + smq->out = (struct smq_out *)pb; + pb += sizeof(struct smq_out_state) + (num_blocks * + sizeof(struct smq_node)); + smq->in = (struct smq_in *)pb; + smq->type = type; + if (type == PRODUCER) { + smq->hdr->producer_version = SM_VERSION; + for (i = 0; i < smq->num_blocks; i++) + (smq->out->sent + i)->index_block = 0xFFFF; + + err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks); + if (err != SMQ_SUCCESS) + goto bail; + + smq->out->s.index_sent_write = 0; + smq->out->s.index_free_read = 0; + if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { + smq->out->s.index_check_queue_for_reset += 1; + } else { + smq->out->s.index_check_queue_for_reset = 1; + smq->out->s.init = SMQ_MAGIC_PRODUCER; + } + } else { + smq->hdr->consumer_version = SM_VERSION; + for (i = 0; i < smq->num_blocks; i++) + (smq->in->free + i)->index_block = 0xFFFF; + + smq->in->s.index_sent_read = 0; + smq->in->s.index_free_write = 0; + if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { + smq->in->s.index_check_queue_for_reset_ack = + smq->out->s.index_check_queue_for_reset; + } else { + smq->in->s.index_check_queue_for_reset_ack = 0; + } + + smq->in->s.init = SMQ_MAGIC_CONSUMER; + } + smq->initialized = SMQ_MAGIC_INIT; + err = SMQ_SUCCESS; + +bail: + return err; +} + +static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata) +{ + unsigned int offset = rdbgdata->gpio_out_offset; + unsigned int val; + + val = (registers[offset]) ^ (BIT(rdbgdata->out.smem_bit+offset)); + qcom_smem_state_update_bits(rdbgdata->out.smem_state, + BIT(rdbgdata->out.smem_bit+offset), val); + registers[offset] = val; + rdbgdata->gpio_out_offset = (offset + 1) % 32; +} + +static irqreturn_t on_interrupt_from(int irq, void *ptr) +{ + struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr; + + dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem\n", + __func__, irq); + complete(&(rdbgdata->work)); + return IRQ_HANDLED; +} + +static int initialize_smq(struct rdbg_data *rdbgdata) +{ + int err = 0; + unsigned char *smem_consumer_buffer = rdbgdata->smem_addr; + + smem_consumer_buffer += (rdbgdata->smem_size/2); + + if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr), + ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) { + dev_err(rdbgdata->device, "%s: smq producer allocation failed\n", + __func__); + err = -ENOMEM; + goto bail; + } + + if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer, + ((rdbgdata->smem_size)/2), CONSUMER, NULL)) { + dev_err(rdbgdata->device, "%s: smq consumer allocation failed\n", + __func__); + err = -ENOMEM; + } + +bail: + return err; + +} + +static int rdbg_open(struct inode *inode, struct file *filp) +{ + int device_id = -1; + struct rdbg_device *device = &g_rdbg_instance; + struct rdbg_data *rdbgdata = NULL; + int err = 0; + + if (!inode || !device->rdbg_data) { + pr_err("Memory not allocated yet\n"); + err = -ENODEV; + goto bail; + } + + device_id = MINOR(inode->i_rdev); + rdbgdata = &device->rdbg_data[device_id]; + + if (rdbgdata->device_opened) { + dev_err(rdbgdata->device, "%s: Device already opened\n", + __func__); + err = -EEXIST; + goto bail; + } + + rdbgdata->smem_size = proc_info[device_id].smem_buffer_size; + if (!rdbgdata->smem_size) { + dev_err(rdbgdata->device, "%s: smem not initialized\n", + __func__); + err = -ENOMEM; + goto bail; + } + + rdbgdata->smem_addr = qcom_smem_get(QCOM_SMEM_HOST_ANY, + proc_info[device_id].smem_buffer_addr, + &(rdbgdata->smem_size)); + if (!rdbgdata->smem_addr) { + dev_err(rdbgdata->device, "%s: Could not allocate smem memory\n", + __func__); + err = -ENOMEM; + pr_err("rdbg:Could not allocate smem memory\n"); + goto bail; + } + dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d\n", + __func__, (unsigned long)rdbgdata->smem_addr, + (unsigned int)rdbgdata->smem_size); + + if (check_subsystem_debug_enabled(rdbgdata->smem_addr, + rdbgdata->smem_size/2)) { + dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled\n", + __func__, proc_info[device_id].name); + pr_err("rdbg:Sub system debug is not enabled\n"); + err = -ECOMM; + goto bail; + } + + init_completion(&rdbgdata->work); + + err = request_threaded_irq(rdbgdata->in.irq_base_id, NULL, + on_interrupt_from, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + proc_info[device_id].name, (void *)&device->rdbg_data[device_id]); + if (err) { + dev_err(rdbgdata->device, + "%s: Failed to register interrupt.Err=%d,irqid=%d.\n", + __func__, err, rdbgdata->in.irq_base_id); + pr_err("rdbg : Failed to register interrupt %d\n", err); + goto bail; + } + + mutex_init(&rdbgdata->write_mutex); + + err = initialize_smq(rdbgdata); + if (err) { + dev_err(rdbgdata->device, "Error initializing smq. Err=%d\n", + err); + pr_err("rdbg: initialize_smq() failed with err %d\n", err); + goto smq_bail; + } + + rdbgdata->device_opened = 1; + + filp->private_data = (void *)rdbgdata; + return 0; + +smq_bail: + smq_dtor(&(rdbgdata->producer_smrb)); + smq_dtor(&(rdbgdata->consumer_smrb)); + mutex_destroy(&rdbgdata->write_mutex); +bail: + return err; +} + +static int rdbg_release(struct inode *inode, struct file *filp) +{ + int device_id = -1; + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + struct rdbg_data *rdbgdata = NULL; + int err = 0; + + if (!inode || !rdbgdevice->rdbg_data) { + pr_err("Memory not allocated yet\n"); + err = -ENODEV; + goto bail; + } + + device_id = MINOR(inode->i_rdev); + rdbgdata = &rdbgdevice->rdbg_data[device_id]; + + if (rdbgdata->device_opened == 1) { + dev_dbg(rdbgdata->device, "%s: Destroying %s.\n", __func__, + proc_info[device_id].name); + rdbgdata->device_opened = 0; + complete(&(rdbgdata->work)); + if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized) + smq_dtor(&( + rdbgdevice->rdbg_data[device_id].producer_smrb)); + if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized) + smq_dtor(&( + rdbgdevice->rdbg_data[device_id].consumer_smrb)); + mutex_destroy(&rdbgdata->write_mutex); + } + + filp->private_data = NULL; + +bail: + return err; +} + +static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size, + loff_t *offset) +{ + int err = 0; + struct rdbg_data *rdbgdata = filp->private_data; + void *p_sent_buffer = NULL; + int nsize = 0; + int more = 0; + + if (!rdbgdata) { + pr_err("Invalid argument"); + err = -EINVAL; + goto bail; + } + + dev_dbg(rdbgdata->device, "%s: In receive\n", __func__); + err = wait_for_completion_interruptible(&(rdbgdata->work)); + if (err) { + dev_err(rdbgdata->device, "%s: Error in wait\n", __func__); + goto bail; + } + + smq_check_queue_reset(&(rdbgdata->consumer_smrb), + &(rdbgdata->producer_smrb)); + if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer, + &nsize, &more) != SMQ_SUCCESS) { + dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d\n", + __func__, err); + err = -ENODATA; + goto bail; + } + size = ((size < nsize) ? size : nsize); + err = copy_to_user(buf, p_sent_buffer, size); + if (err != 0) { + dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d\n", + __func__, err); + err = -ENODATA; + goto bail; + } + smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer); + err = size; + dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx\n", + __func__, (unsigned long) buf); + +bail: + return err; +} + +static ssize_t rdbg_write(struct file *filp, const char __user *buf, + size_t size, loff_t *offset) +{ + int err = 0; + int num_retries = 0; + struct rdbg_data *rdbgdata = filp->private_data; + + if (!rdbgdata) { + pr_err("Invalid argument"); + err = -EINVAL; + goto bail; + } + + do { + err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size); + dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.\n", + __func__, err); + } while (err != 0 && num_retries++ < MAX_RETRIES); + + if (err != 0) { + pr_err("rdbg: send_interrupt_to_subsystem failed\n"); + err = -ECOMM; + goto bail; + } + + send_interrupt_to_subsystem(rdbgdata); + + err = size; + +bail: + return err; +} + +static const struct file_operations rdbg_fops = { + .open = rdbg_open, + .read = rdbg_read, + .write = rdbg_write, + .release = rdbg_release, +}; + +static int register_smp2p_out(struct device *dev, char *node_name, + struct gpio_info *gpio_info_ptr) +{ + struct device_node *node = dev->of_node; + + if (gpio_info_ptr) { + if (of_find_property(node, "qcom,smem-states", NULL)) { + gpio_info_ptr->smem_state = + qcom_smem_state_get(dev, "rdbg-smp2p-out", + &gpio_info_ptr->smem_bit); + if (IS_ERR_OR_NULL(gpio_info_ptr->smem_state)) + pr_err("rdbg: failed get smem state\n"); + } + return 0; + } + return -EINVAL; +} + +static int register_smp2p_in(struct device *dev, char *node_name, + struct gpio_info *gpio_info_ptr) +{ + int id = 0; + struct device_node *node = dev->of_node; + + if (gpio_info_ptr) { + id = of_irq_get_byname(node, "rdbg-smp2p-in"); + gpio_info_ptr->gpio_base_id = id; + gpio_info_ptr->irq_base_id = id; + return 0; + } + return -EINVAL; +} + +static int rdbg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + int minor = 0; + int err = 0; + char *rdbg_compatible_string = "qcom,smp2p-interrupt-rdbg-"; + int max_len = strlen(rdbg_compatible_string) + strlen("xx-out"); + char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL); + + if (!node_name) { + err = -ENOMEM; + goto bail; + } + for (minor = 0; minor < rdbgdevice->num_devices; minor++) { + if (!proc_info[minor].name) + continue; + if (snprintf(node_name, max_len, "%s%d-out", + rdbg_compatible_string, minor) <= 0) { + pr_err("Error in snprintf\n"); + err = -ENOMEM; + goto bail; + } + + if (of_device_is_compatible(dev->of_node, node_name)) { + if (register_smp2p_out(dev, node_name, + &rdbgdevice->rdbg_data[minor].out)) { + pr_err("register_smp2p_out failed for %s\n", + proc_info[minor].name); + err = -EINVAL; + goto bail; + } + } + if (snprintf(node_name, max_len, "%s%d-in", + rdbg_compatible_string, minor) <= 0) { + pr_err("Error in snprintf\n"); + err = -ENOMEM; + goto bail; + } + + if (of_device_is_compatible(dev->of_node, node_name)) { + if (register_smp2p_in(dev, node_name, + &rdbgdevice->rdbg_data[minor].in)) { + pr_err("register_smp2p_in failed for %s\n", + proc_info[minor].name); + } + } + } +bail: + kfree(node_name); + return err; +} + +static const struct of_device_id rdbg_match_table[] = { + { .compatible = "qcom,smp2p-interrupt-rdbg-2-out", }, + { .compatible = "qcom,smp2p-interrupt-rdbg-2-in", }, + { .compatible = "qcom,smp2p-interrupt-rdbg-5-out", }, + { .compatible = "qcom,smp2p-interrupt-rdbg-5-in", }, + {} +}; + +static struct platform_driver rdbg_driver = { + .probe = rdbg_probe, + .driver = { + .name = "rdbg", + .owner = THIS_MODULE, + .of_match_table = rdbg_match_table, + }, +}; + +static int __init rdbg_init(void) +{ + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + int minor = 0; + int major = 0; + int minor_nodes_created = 0; + int err = 0; + + if (rdbgdevice->num_devices < 1 || + rdbgdevice->num_devices > SMP2P_NUM_PROCS) { + pr_err("rgdb: invalid num_devices\n"); + err = -EDOM; + goto bail; + } + rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices, + sizeof(struct rdbg_data), GFP_KERNEL); + if (!rdbgdevice->rdbg_data) { + err = -ENOMEM; + goto bail; + } + err = platform_driver_register(&rdbg_driver); + if (err) + goto bail; + err = alloc_chrdev_region(&rdbgdevice->dev_no, 0, + rdbgdevice->num_devices, "rdbgctl"); + if (err) { + pr_err("Error in alloc_chrdev_region.\n"); + goto data_bail; + } + major = MAJOR(rdbgdevice->dev_no); + + cdev_init(&rdbgdevice->cdev, &rdbg_fops); + rdbgdevice->cdev.owner = THIS_MODULE; + err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0), + rdbgdevice->num_devices); + if (err) { + pr_err("Error in cdev_add\n"); + goto chrdev_bail; + } + rdbgdevice->class = class_create(THIS_MODULE, "rdbg"); + if (IS_ERR(rdbgdevice->class)) { + err = PTR_ERR(rdbgdevice->class); + pr_err("Error in class_create\n"); + goto cdev_bail; + } + for (minor = 0; minor < rdbgdevice->num_devices; minor++) { + if (!proc_info[minor].name) + continue; + rdbgdevice->rdbg_data[minor].device = device_create( + rdbgdevice->class, NULL, MKDEV(major, minor), + NULL, "%s", proc_info[minor].name); + if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) { + err = PTR_ERR(rdbgdevice->rdbg_data[minor].device); + pr_err("Error in device_create\n"); + goto device_bail; + } + rdbgdevice->rdbg_data[minor].device_initialized = 1; + minor_nodes_created++; + dev_dbg(rdbgdevice->rdbg_data[minor].device, + "%s: created /dev/%s c %d %d'\n", __func__, + proc_info[minor].name, major, minor); + } + if (!minor_nodes_created) { + pr_err("No device tree entries found\n"); + err = -EINVAL; + goto class_bail; + } + + goto bail; + +device_bail: + for (--minor; minor >= 0; minor--) { + if (rdbgdevice->rdbg_data[minor].device_initialized) + device_destroy(rdbgdevice->class, + MKDEV(MAJOR(rdbgdevice->dev_no), minor)); + } +class_bail: + class_destroy(rdbgdevice->class); +cdev_bail: + cdev_del(&rdbgdevice->cdev); +chrdev_bail: + unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices); +data_bail: + kfree(rdbgdevice->rdbg_data); +bail: + return err; +} +module_init(rdbg_init); + +static void __exit rdbg_exit(void) +{ + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + int minor; + + for (minor = 0; minor < rdbgdevice->num_devices; minor++) { + if (rdbgdevice->rdbg_data[minor].device_initialized) { + device_destroy(rdbgdevice->class, + MKDEV(MAJOR(rdbgdevice->dev_no), minor)); + } + } + class_destroy(rdbgdevice->class); + cdev_del(&rdbgdevice->cdev); + unregister_chrdev_region(rdbgdevice->dev_no, 1); + kfree(rdbgdevice->rdbg_data); +} +module_exit(rdbg_exit); + +MODULE_DESCRIPTION("rdbg module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 461bf0b8a09473dbadc89b9259473dc36be2cd77..98cf36fb068d2cffdbd52cba1bfbb46db42dc2f4 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work) struct file_priv *priv = container_of(work, struct file_priv, work); mutex_lock(&priv->buffer_mutex); - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); mutex_unlock(&priv->buffer_mutex); } @@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip, struct file_priv *priv) { priv->chip = chip; - atomic_set(&priv->data_pending, 0); mutex_init(&priv->buffer_mutex); setup_timer(&priv->user_read_timer, user_reader_timeout, (unsigned long)priv); @@ -59,29 +58,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct file_priv *priv = file->private_data; - ssize_t ret_size; - ssize_t orig_ret_size; + ssize_t ret_size = 0; int rc; del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); - ret_size = atomic_read(&priv->data_pending); - if (ret_size > 0) { /* relay data */ - orig_ret_size = ret_size; - if (size < ret_size) - ret_size = size; + mutex_lock(&priv->buffer_mutex); - mutex_lock(&priv->buffer_mutex); + if (priv->data_pending) { + ret_size = min_t(ssize_t, size, priv->data_pending); rc = copy_to_user(buf, priv->data_buffer, ret_size); - memset(priv->data_buffer, 0, orig_ret_size); + memset(priv->data_buffer, 0, priv->data_pending); if (rc) ret_size = -EFAULT; - mutex_unlock(&priv->buffer_mutex); + priv->data_pending = 0; } - atomic_set(&priv->data_pending, 0); - + mutex_unlock(&priv->buffer_mutex); return ret_size; } @@ -92,17 +86,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, size_t in_size = size; ssize_t out_size; + if (in_size > TPM_BUFSIZE) + return -E2BIG; + + mutex_lock(&priv->buffer_mutex); + /* Cannot perform a write until the read has cleared either via * tpm_read or a user_read_timer timeout. This also prevents split * buffered writes from blocking here. */ - if (atomic_read(&priv->data_pending) != 0) + if (priv->data_pending != 0) { + mutex_unlock(&priv->buffer_mutex); return -EBUSY; - - if (in_size > TPM_BUFSIZE) - return -E2BIG; - - mutex_lock(&priv->buffer_mutex); + } if (copy_from_user (priv->data_buffer, (void __user *) buf, in_size)) { @@ -133,7 +129,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return out_size; } - atomic_set(&priv->data_pending, out_size); + priv->data_pending = out_size; mutex_unlock(&priv->buffer_mutex); /* Set a timeout by which the reader must come claim the result */ @@ -150,5 +146,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv) del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); file->private_data = NULL; - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; } diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h index ba3b6f9dacf7c6a0f55bca4de024ea736f3304b9..b24cfb4d3ee1e7c7569580f69b2f8146eaa3bcbc 100644 --- a/drivers/char/tpm/tpm-dev.h +++ b/drivers/char/tpm/tpm-dev.h @@ -8,7 +8,7 @@ struct file_priv { struct tpm_chip *chip; /* Data passed to and from the tpm via the read/write calls */ - atomic_t data_pending; + size_t data_pending; struct mutex buffer_mutex; struct timer_list user_read_timer; /* user needs to claim result */ diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index e2e059d8ffec0c16ec9e106d6c848a036857295f..d26ea7513226c991eb3eeef3dfdbebadee8fa298 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, * TPM_RC_REFERENCE_H0 means the session has been * flushed outside the space */ - rc = -ENOENT; + *handle = 0; tpm_buf_destroy(&tbuf); + return -ENOENT; } else if (rc > 0) { dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n", __func__, rc); diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 7d3223fc71619f41252c4d47772ece1f909afc45..72b6091eb7b944f50b6a2e3d9ceafa8077a851d2 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_pll *pll = to_clk_pll(hw); - unsigned int pllr; - u16 mul; - u8 div; - - regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); - - div = PLL_DIV(pllr); - mul = PLL_MUL(pllr, pll->layout); - - if (!div || !mul) - return 0; - return (parent_rate / div) * (mul + 1); + return (parent_rate / pll->div) * (pll->mul + 1); } static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index b77a742fc63eb5273ffb002c1c912604fabd20a9..8beeb1bfffd0c6095bbc5b56cbeeb61f04186048 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -101,10 +101,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) return 0; } +static int clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_mux *mux = to_clk_mux(hw); + + return clk_mux_determine_rate_flags(hw, req, mux->flags); +} + const struct clk_ops clk_mux_ops = { .get_parent = clk_mux_get_parent, .set_parent = clk_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 23fdf3294e0d946060f1044fd080547f670e0356..508a4db60dd0e5067d4400a32dde2de050c937b2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -367,9 +367,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, return now <= rate && now > best; } -static int -clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, - unsigned long flags) +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags) { struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents, ret; @@ -429,6 +429,7 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, return 0; } +EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags); struct clk *__clk_lookup(const char *name) { diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 5e8c18afce9ad35742dd378cded8042abe80c922..41c08fc892b97456bf8c9d3d7740337ee051638d 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -461,7 +461,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ - clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); + clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]); clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 646565f68e9ac935a01c3b067b6543b89d961149..61ed49c4eaa9dda17c12af851bcfdc71310677b7 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -318,6 +318,7 @@ config MSM_CAMCC_SDMSHRIKE config MDM_GCC_QCS405 tristate "QCS405 Global Clock Controller" + select QCOM_GDSC depends on COMMON_CLK_QCOM help Support for the global clock controller on Qualcomm Technologies, Inc @@ -341,3 +342,29 @@ config CLOCK_CPU_QCS405 based devices. Say Y if you want to support CPU clock scaling using CPUfreq drivers for dynamic power management. + +config MSM_GCC_SM6150 + tristate "SM6150 Global Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on Qualcomm Technologies, Inc + SM6150 devices. + Say Y if you want to use peripheral devices such as UART, SPI, I2C, + USB, UFS, SD/eMMC, PCIe, etc. + +config MSM_GPUCC_SM6150 + tristate "SM6150 graphics Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the graphics clock controller on Qualcomm Technologies, Inc. + SM6150 devices. + Say Y if you want to support graphics clocks. + +config MSM_VIDEOCC_SM6150 + tristate "SM6150 Video Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the video clock controller on Qualcomm Technologies, Inc. + SM6150 devices. + Say Y if you want to support video devices and functionality such as + video encode/decode. diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index b7efc3c1e3b0fb519fda0eaefc8c7f9a96f603da..6c6d5904964bb1aac99f529edf4ecda9cebf45bf 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -41,14 +41,17 @@ obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o +obj-$(CONFIG_MSM_GCC_SM6150) += gcc-sm6150.o obj-$(CONFIG_MSM_GCC_SM8150) += gcc-sm8150.o obj-$(CONFIG_MSM_GCC_SDMSHRIKE) += gcc-sdmshrike.o +obj-$(CONFIG_MSM_GPUCC_SM6150) += gpucc-sm6150.o obj-$(CONFIG_MSM_GPUCC_SM8150) += gpucc-sm8150.o obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o obj-$(CONFIG_MSM_NPUCC_SM8150) += npucc-sm8150.o +obj-$(CONFIG_MSM_VIDEOCC_SM6150) += videocc-sm6150.o obj-$(CONFIG_MSM_VIDEOCC_SM8150) += videocc-sm8150.o obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index f23f88a3a116f2899feaf63ffcf4317b17e6e215..3a115dfd625972423cb92d6b31f7e94be85250b2 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -181,15 +181,10 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, { u32 val, mask; - if (config->l) - regmap_write(regmap, pll->offset + PLL_L_VAL, - config->l); - if (config->alpha) - regmap_write(regmap, pll->offset + PLL_ALPHA_VAL, - config->alpha); - if (config->alpha_u) - regmap_write(regmap, pll->offset + PLL_ALPHA_VAL_U, - config->alpha_u); + regmap_write(regmap, pll->offset + PLL_L_VAL, config->l); + regmap_write(regmap, pll->offset + PLL_ALPHA_VAL, config->alpha); + regmap_write(regmap, pll->offset + PLL_ALPHA_VAL_U, config->alpha_u); + if (config->config_ctl_val) regmap_write(regmap, pll->offset + PLL_CONFIG_CTL, config->config_ctl_val); @@ -1487,6 +1482,8 @@ static int clk_alpha_pll_slew_update(struct clk_alpha_pll *pll) return ret; } +static int clk_alpha_pll_calibrate(struct clk_hw *hw); + static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -1495,7 +1492,13 @@ static int clk_alpha_pll_slew_set_rate(struct clk_hw *hw, unsigned long rate, const struct pll_vco *curr_vco = NULL, *vco; u32 l, ctl; u64 a; - int i = 0; + int i = 0, rc; + + if (!clk_hw_is_enabled(hw)) { + rc = clk_alpha_pll_calibrate(hw); + if (rc) + return rc; + } freq_hz = alpha_pll_round_rate(pll, rate, parent_rate, &l, &a); if (freq_hz != rate) { @@ -1594,7 +1597,6 @@ static int clk_alpha_pll_calibrate(struct clk_hw *hw) calibration_freq = (pll->vco_table[0].min_freq + pll->vco_table[0].max_freq)/2; - freq_hz = alpha_pll_round_rate(pll, calibration_freq, clk_hw_get_rate(parent), &l, &a); if (freq_hz != calibration_freq) { @@ -1648,15 +1650,7 @@ static int clk_alpha_pll_calibrate(struct clk_hw *hw) static int clk_alpha_pll_slew_enable(struct clk_hw *hw) { - int rc; - - rc = clk_alpha_pll_calibrate(hw); - if (rc) - return rc; - - rc = clk_alpha_pll_enable(hw); - - return rc; + return clk_alpha_pll_enable(hw); } const struct clk_ops clk_alpha_pll_slew_ops = { diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index e6220a0b8d2e9581c3a214a1bf52281bd141cf3b..f8d46ae34ae1c40fbf13cf64e9cc24b45beab939 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -143,7 +143,7 @@ clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) * req->best_parent_rate; else - req->rate = req->rate = f->freq; + req->rate = f->freq; return 0; } diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index 1b9a5f13de18960bffe3fe0acca49bfef688100d..197ba2ce6f28900e9654922c532aa0e6e74ab7db 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -161,6 +161,7 @@ extern const struct clk_ops clk_dyn_rcg_ops; * @current_freq: last cached frequency when using branches with shared RCGs * @enable_safe_config: When set, the RCG is parked at CXO when it's disabled * @clkr: regmap clock handle + * @cfg_off: defines the cfg register offseted from the CMD_RCGR * @flags: additional flag parameters for the RCG */ struct clk_rcg2 { @@ -172,6 +173,7 @@ struct clk_rcg2 { unsigned long current_freq; bool enable_safe_config; struct clk_regmap clkr; + u8 cfg_off; u8 flags; #define FORCE_ENABLE_RCG BIT(0) #define DFS_ENABLE_RCG BIT(1) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 866d5aaf84a581be1da15998bc5729f16efb1412..41d40a955f98dfa15ce75d5241dafd737b84fed2 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -92,7 +92,8 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw) u32 cfg; int i, ret; - ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); + ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + + CFG_REG, &cfg); if (ret) goto err; @@ -147,13 +148,14 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) u32 old_cfg; /* Read back the old configuration */ - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &old_cfg); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + CFG_REG, + &old_cfg); if (rcg->flags & DFS_ENABLE_RCG) return 0; - ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, - CFG_SRC_SEL_MASK, cfg); + ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + + rcg->cfg_off + CFG_REG, CFG_SRC_SEL_MASK, cfg); if (ret) return ret; @@ -272,13 +274,16 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return rcg->current_freq; } - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + CFG_REG, + &cfg); if (rcg->mnd_width) { mask = BIT(rcg->mnd_width) - 1; - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + + M_REG, &m); m &= mask; - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + + N_REG, &n); n = ~n; n &= mask; n += m; @@ -389,22 +394,26 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) return index; /* Read back the old configuration */ - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &old_cfg); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + rcg->cfg_off + CFG_REG, + &old_cfg); if (rcg->mnd_width && f->n) { mask = BIT(rcg->mnd_width) - 1; ret = regmap_update_bits(rcg->clkr.regmap, - rcg->cmd_rcgr + M_REG, mask, f->m); + rcg->cmd_rcgr + rcg->cfg_off + M_REG, + mask, f->m); if (ret) return ret; ret = regmap_update_bits(rcg->clkr.regmap, - rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m)); + rcg->cmd_rcgr + rcg->cfg_off + N_REG, + mask, ~(f->n - f->m)); if (ret) return ret; ret = regmap_update_bits(rcg->clkr.regmap, - rcg->cmd_rcgr + D_REG, mask, ~f->n); + rcg->cmd_rcgr + rcg->cfg_off + D_REG, + mask, ~f->n); if (ret) return ret; } @@ -416,7 +425,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) if (rcg->mnd_width && f->n && (f->m != f->n)) cfg |= CFG_MODE_DUAL_EDGE; ret = regmap_update_bits(rcg->clkr.regmap, - rcg->cmd_rcgr + CFG_REG, mask, cfg); + rcg->cmd_rcgr + rcg->cfg_off + CFG_REG, mask, cfg); if (ret) return ret; @@ -445,7 +454,7 @@ static void clk_rcg2_list_registers(struct seq_file *f, struct clk_hw *hw) size = ARRAY_SIZE(data1); for (i = 0; i < size; i++) { regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr + - data1[i].offset), &val); + rcg->cfg_off + data1[i].offset), &val); clock_debug_output(f, false, "%20s: 0x%.8x\n", data1[i].name, val); } @@ -453,7 +462,7 @@ static void clk_rcg2_list_registers(struct seq_file *f, struct clk_hw *hw) size = ARRAY_SIZE(data); for (i = 0; i < size; i++) { regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr + - data[i].offset), &val); + rcg->cfg_off + data[i].offset), &val); clock_debug_output(f, false, "%20s: 0x%.8x\n", data[i].name, val); } diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index d3547a43c316dba3f91bebe08c0031480ed02c6f..93a27fb656a8abeeba8ed96f101712ef3938c854 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -327,6 +327,24 @@ static const struct clk_rpmh_desc clk_rpmh_sm8150 = { .num_clks = ARRAY_SIZE(sm8150_rpmh_clocks), }; +static struct clk_hw *sm6150_rpmh_clocks[] = { + [RPMH_CXO_CLK] = &sm8150_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sm8150_bi_tcxo_ao.hw, + [RPMH_LN_BB_CLK2] = &sm8150_ln_bb_clk2.hw, + [RPMH_LN_BB_CLK2_A] = &sm8150_ln_bb_clk2_ao.hw, + [RPMH_LN_BB_CLK3] = &sm8150_ln_bb_clk3.hw, + [RPMH_LN_BB_CLK3_A] = &sm8150_ln_bb_clk3_ao.hw, + [RPMH_RF_CLK1] = &sm8150_rf_clk1.hw, + [RPMH_RF_CLK1_A] = &sm8150_rf_clk1_ao.hw, + [RPMH_RF_CLK2] = &sm8150_rf_clk2.hw, + [RPMH_RF_CLK2_A] = &sm8150_rf_clk2_ao.hw, +}; + +static const struct clk_rpmh_desc clk_rpmh_sm6150 = { + .clks = sm6150_rpmh_clocks, + .num_clks = ARRAY_SIZE(sm6150_rpmh_clocks), +}; + static struct clk_hw *sdmshrike_rpmh_clocks[] = { [RPMH_CXO_CLK] = &sm8150_bi_tcxo.hw, [RPMH_CXO_CLK_A] = &sm8150_bi_tcxo_ao.hw, @@ -352,6 +370,7 @@ static const struct clk_rpmh_desc clk_rpmh_sdmshrike = { static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,rpmh-clk-sm8150", .data = &clk_rpmh_sm8150}, { .compatible = "qcom,rpmh-clk-sdmshrike", .data = &clk_rpmh_sdmshrike}, + { .compatible = "qcom,rpmh-clk-sm6150", .data = &clk_rpmh_sm6150}, { } }; MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 79a6975ac5eec727ac0c8bd74927f4e6d9bf319e..2f1661d0f746440cf6a7777fcc4982b751f5b4f9 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -409,7 +409,7 @@ static int clk_smd_rpm_enable_scaling(void) .length = sizeof(value), }; - ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_SLEEP_STATE, + ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, QCOM_SMD_RPM_MISC_CLK, QCOM_RPM_SCALING_ENABLE_ID, &req, 1); if (ret) { @@ -417,7 +417,7 @@ static int clk_smd_rpm_enable_scaling(void) return ret; } - ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_ACTIVE_STATE, + ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, QCOM_SMD_RPM_MISC_CLK, QCOM_RPM_SCALING_ENABLE_ID, &req, 1); if (ret) { @@ -439,7 +439,7 @@ static int clk_vote_bimc(struct clk_hw *hw, uint32_t rate) .length = sizeof(rate), }; - ret = msm_rpm_send_message_noirq(QCOM_SMD_RPM_ACTIVE_STATE, + ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type, r->rpm_clk_id, &req, 1); if (ret < 0) { if (ret != -EPROBE_DEFER) @@ -610,7 +610,6 @@ DEFINE_CLK_SMD_RPM(qcs405, bimc_gpu_clk, bimc_gpu_a_clk, /* SMD_XO_BUFFER */ DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, ln_bb_clk, ln_bb_clk_a, 8); DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, rf_clk1, rf_clk1_a, 4); -DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs405, div_clk1, div_clk1_a, 0xb); DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, ln_bb_clk_pin, ln_bb_clk_a_pin, 8); DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs405, rf_clk1_pin, rf_clk1_a_pin, 4); @@ -667,8 +666,6 @@ static struct clk_hw *qcs405_clks[] = { [RPM_SMD_LN_BB_CLK_A] = &qcs405_ln_bb_clk_a.hw, [RPM_SMD_LN_BB_CLK_PIN] = &qcs405_ln_bb_clk_pin.hw, [RPM_SMD_LN_BB_CLK_A_PIN] = &qcs405_ln_bb_clk_a_pin.hw, - [RPM_SMD_DIV_CLK1] = &qcs405_div_clk1.hw, - [RPM_SMD_DIV_A_CLK1] = &qcs405_div_clk1_a.hw, [RPM_SMD_PNOC_CLK] = &qcs405_pnoc_clk.hw, [RPM_SMD_PNOC_A_CLK] = &qcs405_pnoc_a_clk.hw, [RPM_SMD_CE1_CLK] = &qcs405_ce1_clk.hw, @@ -708,7 +705,7 @@ static struct clk_hw *qcs405_clks[] = { static const struct rpm_smd_clk_desc rpm_clk_qcs405 = { .clks = qcs405_clks, - .num_rpm_clks = RPM_SMD_BIMC_GPU_A_CLK, + .num_rpm_clks = RPM_SMD_LN_BB_CLK_A_PIN, .num_clks = ARRAY_SIZE(qcs405_clks), }; diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 71fd4f2997e23f0ae83976d97376d13808f51819..6fa3aa56b5a3dc56e26d8a05917462e3cba78d24 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2014, 2017-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -28,7 +28,9 @@ struct qcom_cc { struct qcom_reset_controller reset; struct clk_regmap **rclks; + struct clk_hw **hwclks; size_t num_rclks; + size_t num_hwclks; }; const @@ -205,11 +207,14 @@ static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec, struct qcom_cc *cc = data; unsigned int idx = clkspec->args[0]; - if (idx >= cc->num_rclks) { - pr_err("%s: invalid index %u\n", __func__, idx); + if (idx >= cc->num_rclks + cc->num_hwclks) { + pr_err("invalid index %u\n", idx); return ERR_PTR(-EINVAL); } + if (idx < cc->num_hwclks && cc->hwclks[idx]) + return cc->hwclks[idx]; + return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT); } @@ -222,7 +227,9 @@ int qcom_cc_really_probe(struct platform_device *pdev, struct qcom_cc *cc; struct gdsc_desc *scd; size_t num_clks = desc->num_clks; + size_t num_hwclks = desc->num_hwclks; struct clk_regmap **rclks = desc->clks; + struct clk_hw **hwclks = desc->hwclks; cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL); if (!cc) @@ -230,6 +237,17 @@ int qcom_cc_really_probe(struct platform_device *pdev, cc->rclks = rclks; cc->num_rclks = num_clks; + cc->hwclks = hwclks; + cc->num_hwclks = num_hwclks; + + for (i = 0; i < num_hwclks; i++) { + if (!hwclks[i]) + continue; + + ret = devm_clk_hw_register(dev, hwclks[i]); + if (ret) + return ret; + } for (i = 0; i < num_clks; i++) { if (!rclks[i]) diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index 210785ccf1a4810bf318d00819c97ad1aac030ce..92c067c6d64d4d4e97db12e19714f588c3935cfc 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -36,7 +36,9 @@ struct parent_map; struct qcom_cc_desc { const struct regmap_config *config; struct clk_regmap **clks; + struct clk_hw **hwclks; size_t num_clks; + size_t num_hwclks; const struct qcom_reset_map *resets; size_t num_resets; struct gdsc **gdscs; diff --git a/drivers/clk/qcom/debugcc-qcs405.c b/drivers/clk/qcom/debugcc-qcs405.c index b7b07f5e934e27064f4731beef969ea5ababacfe..1695bb3ace2f03a5ad26d637b6a4a9ba080d549b 100644 --- a/drivers/clk/qcom/debugcc-qcs405.c +++ b/drivers/clk/qcom/debugcc-qcs405.c @@ -110,6 +110,7 @@ static const char *const debug_mux_parent_names[] = { "gcc_usb_hs_inactivity_timers_clk", "gcc_usb_hs_phy_cfg_ahb_clk", "gcc_usb_hs_system_clk", + "gcc_dcc_clk", "apcs_mux_clk", }; @@ -278,6 +279,8 @@ static struct clk_debug_mux gcc_debug_mux = { 0x64, 0x1FF, 0, 0xF000, 12, 4, 0x74000, 0x74000, 0x74000 }, { "gcc_usb_hs_system_clk", 0x60, 4, GCC, 0x60, 0x1FF, 0, 0xF000, 12, 4, 0x74000, 0x74000, 0x74000 }, + { "gcc_dcc_clk", 0x1B9, 4, GCC, + 0x1B9, 0x1FF, 0, 0xF000, 12, 4, 0x74000, 0x74000, 0x74000 }, { "apcs_mux_clk", 0x16A, CPU_CC, 0x000, 0x3, 8, 0x0FF }, ), .hw.init = &(struct clk_init_data){ diff --git a/drivers/clk/qcom/gcc-qcs405.c b/drivers/clk/qcom/gcc-qcs405.c index b8d9501ca58a359ae75c1fc2ed9c5861ee084efe..4d0f2be22e53d6a0b5eac45d60049a0f3ce6f29d 100644 --- a/drivers/clk/qcom/gcc-qcs405.c +++ b/drivers/clk/qcom/gcc-qcs405.c @@ -823,7 +823,7 @@ static struct clk_rcg2 byte0_clk_src = { .name = "byte0_clk_src", .parent_names = gcc_parent_names_5, .num_parents = 4, - .flags = CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, .ops = &clk_byte2_ops, .vdd_class = &vdd_cx, .num_rate_max = VDD_NUM, @@ -917,7 +917,6 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = { F_SLEW(523200000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1046400000), F_SLEW(550000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1100000000), F_SLEW(598000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1196000000), - F_SLEW(650000000, P_GPLL3_OUT_MAIN, 1, 0, 0, 1300000000), { } }; @@ -927,17 +926,12 @@ static struct clk_rcg2 gfx3d_clk_src = { .hid_width = 5, .parent_map = gcc_parent_map_7, .freq_tbl = ftbl_gfx3d_clk_src, + .flags = FORCE_ENABLE_RCG, .clkr.hw.init = &(struct clk_init_data){ .name = "gfx3d_clk_src", .parent_names = gcc_parent_names_7, .num_parents = 6, .ops = &clk_rcg2_ops, - .vdd_class = &vdd_cx, - .num_rate_max = VDD_NUM, - .rate_max = (unsigned long[VDD_NUM]) { - [VDD_LOW] = 270000000, - [VDD_NOMINAL] = 484800000, - [VDD_HIGH] = 598000000}, }, }; @@ -1133,7 +1127,7 @@ static struct clk_rcg2 pclk0_clk_src = { .name = "pclk0_clk_src", .parent_names = gcc_parent_names_12, .num_parents = 4, - .flags = CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, .ops = &clk_pixel_ops, .vdd_class = &vdd_cx, .num_rate_max = VDD_NUM, @@ -1472,6 +1466,19 @@ static struct clk_branch gcc_blsp1_ahb_clk = { }, }; +static struct clk_branch gcc_dcc_clk = { + .halt_reg = 0x77004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x77004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_dcc_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_blsp1_qup0_i2c_apps_clk = { .halt_reg = 0x6028, .halt_check = BRANCH_HALT, @@ -2140,8 +2147,8 @@ static struct clk_branch gcc_mdss_mdp_clk = { }, }; -static DEFINE_CLK_VOTER(mdss_mdp_vote_clk, &gcc_mdss_mdp_clk.c, 0); -static DEFINE_CLK_VOTER(mdss_rotator_vote_clk, &gcc_mdss_mdp_clk.c, 0); +static DEFINE_CLK_VOTER(mdss_mdp_vote_clk, gcc_mdss_mdp_clk, 0); +static DEFINE_CLK_VOTER(mdss_rotator_vote_clk, gcc_mdss_mdp_clk, 0); static struct clk_branch gcc_mdss_pclk0_clk = { .halt_reg = 0x4d084, @@ -2206,6 +2213,12 @@ static struct clk_branch gcc_oxili_gfx3d_clk = { .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOW] = 270000000, + [VDD_NOMINAL] = 484800000, + [VDD_HIGH] = 598000000}, }, }, }; @@ -2639,8 +2652,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = { }; static struct clk_branch gcc_usb3_phy_pipe_clk = { - .halt_reg = 0x39018, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x39018, .enable_mask = BIT(0), @@ -2820,6 +2832,7 @@ static struct clk_regmap *gcc_qcs405_clocks[] = { [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr, [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr, [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, + [GCC_DCC_CLK] = &gcc_dcc_clk.clkr, }; static const struct qcom_reset_map gcc_qcs405_resets[] = { @@ -2827,8 +2840,8 @@ static const struct qcom_reset_map gcc_qcs405_resets[] = { [GCC_USB_HS_BCR] = {0x41000}, [GCC_USB2_HS_PHY_ONLY_BCR] = {0x41034}, [GCC_QUSB2_PHY_BCR] = {0x4103C}, - [GCC_USB_HS_PHY_CFG_AHB_BCR] = {0x41038}, - [GCC_USB2A_PHY_BCR] = {0x41028}, + [GCC_USB_HS_PHY_CFG_AHB_BCR] = {0x0000C, 0}, + [GCC_USB2A_PHY_BCR] = {0x0000C, 1}, [GCC_USB3_PHY_BCR] = {0x39004}, [GCC_USB_30_BCR] = {0x39000}, [GCC_USB3PHY_PHY_BCR] = {0x39008}, @@ -2945,6 +2958,8 @@ static const struct qcom_cc_desc mdss_qcs405_desc = { .config = &gcc_qcs405_regmap_config, .clks = mdss_qcs405_clocks, .num_clks = ARRAY_SIZE(mdss_qcs405_clocks), + .hwclks = mdss_qcs405_hws, + .num_hwclks = ARRAY_SIZE(mdss_qcs405_hws), }; static const struct of_device_id mdss_qcs405_match_table[] = { @@ -2957,7 +2972,10 @@ MODULE_DEVICE_TABLE(of, mdss_qcs405_match_table); static int mdss_qcs405_probe(struct platform_device *pdev) { struct clk *clk; - int ret = 0, i; + struct regmap *regmap; + struct resource *res; + void __iomem *base; + int ret = 0; clk = devm_clk_get(&pdev->dev, "pclk0_src"); if (IS_ERR(clk)) { @@ -2973,16 +2991,22 @@ static int mdss_qcs405_probe(struct platform_device *pdev) return PTR_ERR(clk); } - /* register hardware clocks */ - for (i = 0; i < ARRAY_SIZE(mdss_qcs405_hws); i++) { - clk = devm_clk_register(&pdev->dev, mdss_qcs405_hws[i]); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "Unable to register hardware clocks\n"); - return PTR_ERR(clk); - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Failed to get resources\n"); + return -EINVAL; } - ret = qcom_cc_probe(pdev, &mdss_qcs405_desc); + base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(&pdev->dev, base, + mdss_qcs405_desc.config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cc_really_probe(pdev, &mdss_qcs405_desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register MDSS clocks\n"); return ret; diff --git a/drivers/clk/qcom/gcc-sm6150.c b/drivers/clk/qcom/gcc-sm6150.c new file mode 100644 index 0000000000000000000000000000000000000000..2a7c239375c3bc7cf2cbaef1cc703a489027202c --- /dev/null +++ b/drivers/clk/qcom/gcc-sm6150.c @@ -0,0 +1,3543 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "reset.h" +#include "vdd-level-sm6150.h" + +#define GCC_DISPLAY_MISC 0x0b080 +#define GCC_CAMERA_MISC 0x0b084 +#define GCC_VIDEO_MISC 0x9b000 +#define GCC_GPU_MISC 0x71028 +#define GCC_EMAC_MISC 0x06040 + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_AUX2, + P_GPLL0_OUT_MAIN, + P_GPLL3_OUT_MAIN, + P_GPLL4_OUT_MAIN, + P_GPLL6_OUT_MAIN, + P_GPLL7_OUT_MAIN, + P_GPLL8_OUT_MAIN, + P_SLEEP_CLK, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_0[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; +static const char * const gcc_parent_names_0_ao[] = { + "bi_tcxo_ao", + "gpll0_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL6_OUT_MAIN, 2 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_1[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll6_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_2[] = { + "bi_tcxo", + "gpll0_out_main", + "sleep_clk", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_3[] = { + "bi_tcxo", + "sleep_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_4[] = { + "bi_tcxo", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL7_OUT_MAIN, 3 }, + { P_GPLL4_OUT_MAIN, 5 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_5[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll7_out_main", + "gpll4_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL7_OUT_MAIN, 3 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_6[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll7_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL8_OUT_MAIN, 2 }, + { P_GPLL4_OUT_MAIN, 5 }, + { P_GPLL0_OUT_AUX2, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_7[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll8_out_main", + "gpll4_out_main", + "gpll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_8[] = { + "bi_tcxo", + "gpll0_out_main", + "core_bi_pll_test_se", +}; + +static const struct parent_map gcc_parent_map_9[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL3_OUT_MAIN, 4 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gcc_parent_names_9[] = { + "bi_tcxo", + "gpll0_out_main", + "gpll3_out_main", + "core_bi_pll_test_se", +}; + +static struct clk_alpha_pll gpll0_out_main = { + .offset = 0x0, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0_out_main", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_fixed_factor gpll0_out_aux2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "gpll0_out_aux2", + .parent_names = (const char *[]){ "gpll0_out_main" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpll6_out_main = { + .offset = 0x13000, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gpll6_out_main", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll7_out_main = { + .offset = 0x1a000, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gpll7_out_main", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll8_out_main = { + .offset = 0x1b000, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gpll8_out_main", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { + .cmd_rcgr = 0x48014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk_src", + .parent_names = gcc_parent_names_0_ao, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx_ao, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { + F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0), + F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac_ptp_clk_src = { + .cmd_rcgr = 0x6038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_emac_ptp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_ptp_clk_src", + .parent_names = gcc_parent_names_5, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 230400000}, + }, +}; + +static struct clk_rcg2 gcc_emac_rgmii_clk_src = { + .cmd_rcgr = 0x601c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_emac_ptp_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_emac_rgmii_clk_src", + .parent_names = gcc_parent_names_6, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 125000000, + [VDD_NOMINAL] = 250000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0), + F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x64004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x65004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x66004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_names = gcc_parent_names_2, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0x6b02c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 9600000, + [VDD_LOW] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = { + .cmd_rcgr = 0x6f014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_refgen_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 100000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x33010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 60000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = { + F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qspi_core_clk_src = { + .cmd_rcgr = 0x4b008, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qspi_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 150000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625), + F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625), + F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75), + F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25), + F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75), + F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), + F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15), + F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375), + F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75), + F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625), + F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0), + F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x17148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x17278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x173a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x174d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x17608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x17738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x18148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x18278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x183a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x184d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x18608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x18738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 128000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = { + F(144000, P_BI_TCXO, 16, 3, 25), + F(400000, P_BI_TCXO, 12, 1, 4), + F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3), + F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2), + F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0), + F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0), + F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .cmd_rcgr = 0x12028, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk_src", + .parent_names = gcc_parent_names_1, + .num_parents = 5, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 100000000, + [VDD_NOMINAL] = 384000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = { + F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .cmd_rcgr = 0x12010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 150000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0), + F(52000000, P_GPLL8_OUT_MAIN, 8, 0, 0), + F(208000000, P_GPLL8_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .cmd_rcgr = 0x1400c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk_src", + .parent_names = gcc_parent_names_7, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 52000000, + [VDD_LOW] = 208000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0), + F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x77020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src, + .flags = FORCE_ENABLE_RCG, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 50000000, + [VDD_LOW] = 100000000, + [VDD_NOMINAL] = 200000000, + [VDD_HIGH] = 240000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0), + F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x77048, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src, + .flags = FORCE_ENABLE_RCG, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 75000000, + [VDD_LOW] = 150000000, + [VDD_NOMINAL] = 300000000}, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x7707c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .flags = FORCE_ENABLE_RCG, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_names = gcc_parent_names_4, + .num_parents = 2, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0), + F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x77060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src, + .flags = FORCE_ENABLE_RCG, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 37500000, + [VDD_LOW] = 75000000, + [VDD_NOMINAL] = 150000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb20_sec_master_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb20_sec_master_clk_src = { + .cmd_rcgr = 0xa601c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb20_sec_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sec_master_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 60000000, + [VDD_NOMINAL] = 120000000}, + }, +}; + +static struct clk_rcg2 gcc_usb20_sec_mock_utmi_clk_src = { + .cmd_rcgr = 0xa6034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sec_mock_utmi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 60000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb2_sec_phy_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb2_sec_phy_aux_clk_src = { + .cmd_rcgr = 0xa6060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_sec_phy_aux_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0xf01c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 66666667, + [VDD_LOW] = 133333333, + [VDD_NOMINAL] = 200000000, + [VDD_HIGH] = 240000000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = { + F(20000000, P_GPLL0_OUT_AUX2, 15, 0, 0), + F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0xf034, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_names = gcc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 40000000, + [VDD_LOW] = 60000000}, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0xf060, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_names = gcc_parent_names_3, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_rcg2 gcc_vs_ctrl_clk_src = { + .cmd_rcgr = 0x7a030, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_vs_ctrl_clk_src", + .parent_names = gcc_parent_names_8, + .num_parents = 3, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_vsensor_clk_src = { + .cmd_rcgr = 0x7a018, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .freq_tbl = ftbl_gcc_vsensor_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_vsensor_clk_src", + .parent_names = gcc_parent_names_9, + .num_parents = 4, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000, + [VDD_LOW] = 600000000}, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { + .halt_reg = 0x770c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x770c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770c0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_phy_axi_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x770c0, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x770c0, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770c0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk", + .parent_names = (const char *[]){ + "gcc_aggre_ufs_phy_axi_clk", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_hw_ctl_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb2_sec_axi_clk = { + .halt_reg = 0xa6084, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6084, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb2_sec_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb20_sec_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0xf07c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy_east_clk = { + .halt_reg = 0x6a008, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x6a008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6a008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ahb2phy_east_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy_west_clk = { + .halt_reg = 0x6a004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x6a004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ahb2phy_west_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_apc_vs_clk = { + .halt_reg = 0x7a04c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a04c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_apc_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x38004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_ahb_clk = { + .halt_reg = 0xb008, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_xo_clk = { + .halt_reg = 0xb044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_ahb_clk = { + .halt_reg = 0x4100c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4100c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_axi_clk = { + .halt_reg = 0x41008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ce1_clk = { + .halt_reg = 0x41004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ce1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb2_sec_axi_clk = { + .halt_reg = 0xa609c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa609c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb2_sec_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb20_sec_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0xf078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_ahb_clk = { + .halt_reg = 0x48000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk", + .parent_names = (const char *[]){ + "gcc_cpuss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_gnoc_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x48004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_gnoc_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x71154, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x71154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_ahb_clk = { + .halt_reg = 0xb00c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb00c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_gpll0_div_clk_src", + .parent_names = (const char *[]){ + "gpll0_out_aux2", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0xb038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_xo_clk = { + .halt_reg = 0xb048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_axi_clk = { + .halt_reg = 0x6010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_ptp_clk = { + .halt_reg = 0x6034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_ptp_clk", + .parent_names = (const char *[]){ + "gcc_emac_ptp_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_rgmii_clk = { + .halt_reg = 0x6018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_rgmii_clk", + .parent_names = (const char *[]){ + "gcc_emac_rgmii_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac_slv_ahb_clk = { + .halt_reg = 0x6014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x6014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_emac_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x64000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x64000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_names = (const char *[]){ + "gcc_gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x65000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x65000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_names = (const char *[]){ + "gcc_gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x66000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x66000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_names = (const char *[]){ + "gcc_gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_cfg_ahb_clk = { + .halt_reg = 0x71004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x71004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_cfg_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_names = (const char *[]){ + "gpll0_out_main", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_names = (const char *[]){ + "gpll0_out_aux2", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_iref_clk = { + .halt_reg = 0x8c010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_iref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x7100c, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x7100c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x71018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x71018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_axis2_clk = { + .halt_reg = 0x8a00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8a00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_axis2_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_cfg_ahb_clk = { + .halt_reg = 0x8a000, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x8a000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8a000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_gpll0_div_clk_src", + .parent_names = (const char *[]){ + "gpll0_out_main", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_mfab_axis_clk = { + .halt_reg = 0x8a004, + .halt_check = BRANCH_VOTED, + .hwcg_reg = 0x8a004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x8a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_mfab_axis_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_memnoc_axi_clk = { + .halt_reg = 0x8a154, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x8a154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_memnoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_snoc_axi_clk = { + .halt_reg = 0x8a150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8a150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_snoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_vs_clk = { + .halt_reg = 0x7a048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_phy_refgen_clk = { + .halt_reg = 0x6f02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie0_phy_refgen_clk", + .parent_names = (const char *[]){ + "gcc_pcie_phy_refgen_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x6b020, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk", + .parent_names = (const char *[]){ + "gcc_pcie_0_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x6b01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_clkref_clk = { + .halt_reg = 0x8c00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x6b018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x6b014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0x6b010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_phy_aux_clk = { + .halt_reg = 0x6f004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_pcie_0_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]){ + "gcc_pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x33004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x33004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x33008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x34004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x34004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0xb018, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0xb020, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_pcie_ahb_clk = { + .halt_reg = 0x6b044, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b044, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_pcie_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0xb014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = { + .halt_reg = 0x4b000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_cnoc_periph_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qspi_core_clk = { + .halt_reg = 0x4b004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4b004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qspi_core_clk", + .parent_names = (const char *[]){ + "gcc_qspi_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x17014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x1700c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x17144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x17274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x173a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x174d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x17604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s4_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x17734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap0_s5_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x18014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x1800c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x18144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s0_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x18274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(23), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x183a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(24), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x184d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x18604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s4_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x18734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_names = (const char *[]){ + "gcc_qupv3_wrap1_s5_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x18004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x18008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5200c, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ahb_clk = { + .halt_reg = 0x12008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x12004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x12004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]){ + "gcc_sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_ice_core_clk = { + .halt_reg = 0x1200c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1200c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc1_ice_core_clk", + .parent_names = (const char *[]){ + "gcc_sdcc1_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x14008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_names = (const char *[]){ + "gcc_sdcc2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { + .halt_reg = 0x4819c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sys_noc_cpuss_ahb_clk", + .parent_names = (const char *[]){ + "gcc_cpuss_ahb_clk_src", + }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_clkref_clk = { + .halt_reg = 0x8c004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_mem_clkref_clk = { + .halt_reg = 0x8c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_mem_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x77014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_hw_ctl_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_axi_clk", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_hw_ctl_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x77044, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77044, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_ice_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = { + .halt_reg = 0x77044, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77044, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77044, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_hw_ctl_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_ice_core_clk", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_hw_ctl_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x77078, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77078, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { + .halt_reg = 0x77078, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77078, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77078, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_phy_aux_clk", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_hw_ctl_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x77040, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77040, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_unipro_core_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = { + .halt_reg = 0x77040, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77040, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77040, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk", + .parent_names = (const char *[]){ + "gcc_ufs_phy_unipro_core_clk", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_hw_ctl_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_sec_master_clk = { + .halt_reg = 0xa6010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sec_master_clk", + .parent_names = (const char *[]){ + "gcc_usb20_sec_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_sec_mock_utmi_clk = { + .halt_reg = 0xa6018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sec_mock_utmi_clk", + .parent_names = (const char *[]){ + "gcc_usb20_sec_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb20_sec_sleep_clk = { + .halt_reg = 0xa6014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb20_sec_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_sec_phy_aux_clk = { + .halt_reg = 0xa6050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_sec_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb2_sec_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_sec_phy_com_aux_clk = { + .halt_reg = 0xa6054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xa6054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_sec_phy_com_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb2_sec_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_sec_phy_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0xa6058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_sec_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0xf010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0xf018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_names = (const char *[]){ + "gcc_usb30_prim_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0xf014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_clkref_clk = { + .halt_reg = 0x8c008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0xf050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf050, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb3_prim_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0xf054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_names = (const char *[]){ + "gcc_usb3_prim_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0xf058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_clkref_clk = { + .halt_reg = 0x8c014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vdda_vs_clk = { + .halt_reg = 0x7a00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vdda_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vddcx_vs_clk = { + .halt_reg = 0x7a004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vddcx_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vddmx_vs_clk = { + .halt_reg = 0x7a008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vddmx_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_ahb_clk = { + .halt_reg = 0xb004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_ahb_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0xb024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_xo_clk = { + .halt_reg = 0xb040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_xo_clk", + .flags = CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vs_ctrl_ahb_clk = { + .halt_reg = 0x7a014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7a014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7a014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vs_ctrl_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_vs_ctrl_clk = { + .halt_reg = 0x7a010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_vs_ctrl_clk", + .parent_names = (const char *[]){ + "gcc_vs_ctrl_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_wcss_vs_clk = { + .halt_reg = 0x7a054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x7a054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_wcss_vs_clk", + .parent_names = (const char *[]){ + "gcc_vsensor_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_rx1_usb2_clkref_clk = { + .halt_reg = 0x8c030, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8c030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_rx1_usb2_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_prim_clkref_clk = { + .halt_reg = 0x8c028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8c028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_prim_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_sec_clkref_clk = { + .halt_reg = 0x8c018, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8c018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb2_sec_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +/* Measure-only clock for ddrss_gcc_debug_clk. */ +static struct clk_dummy measure_only_bimc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_bimc_clk", + .ops = &clk_dummy_ops, + }, +}; + +/* Measure-only clock for gcc_cfg_noc_ahb_clk. */ +static struct clk_dummy measure_only_cnoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_cnoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +/* Measure-only clock for gcc_ipa_2x_clk. */ +static struct clk_dummy measure_only_ipa_2x_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_ipa_2x_clk", + .ops = &clk_dummy_ops, + }, +}; + +/* Measure-only clock for gcc_sys_noc_axi_clk. */ +static struct clk_dummy measure_only_snoc_clk = { + .rrate = 1000, + .hw.init = &(struct clk_init_data){ + .name = "measure_only_snoc_clk", + .ops = &clk_dummy_ops, + }, +}; + +struct clk_hw *gcc_sm6150_hws[] = { + [GPLL0_OUT_AUX2] = &gpll0_out_aux2.hw, + [MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw, + [MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw, + [MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw, + [MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw, +}; + +static struct clk_regmap *gcc_sm6150_clocks[] = { + [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = + &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_AGGRE_USB2_SEC_AXI_CLK] = &gcc_aggre_usb2_sec_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_AHB2PHY_EAST_CLK] = &gcc_ahb2phy_east_clk.clkr, + [GCC_AHB2PHY_WEST_CLK] = &gcc_ahb2phy_west_clk.clkr, + [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, + [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr, + [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr, + [GCC_CE1_CLK] = &gcc_ce1_clk.clkr, + [GCC_CFG_NOC_USB2_SEC_AXI_CLK] = &gcc_cfg_noc_usb2_sec_axi_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, + [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, + [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr, + [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr, + [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr, + [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr, + [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr, + [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr, + [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr, + [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr, + [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, + [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr, + [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr, + [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr, + [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, + [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr, + [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr, + [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr, + [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr, + [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr, + [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr, + [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr, + [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, + [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr, + [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = + &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = + &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr, + [GCC_USB20_SEC_MASTER_CLK] = &gcc_usb20_sec_master_clk.clkr, + [GCC_USB20_SEC_MASTER_CLK_SRC] = &gcc_usb20_sec_master_clk_src.clkr, + [GCC_USB20_SEC_MOCK_UTMI_CLK] = &gcc_usb20_sec_mock_utmi_clk.clkr, + [GCC_USB20_SEC_MOCK_UTMI_CLK_SRC] = + &gcc_usb20_sec_mock_utmi_clk_src.clkr, + [GCC_USB20_SEC_SLEEP_CLK] = &gcc_usb20_sec_sleep_clk.clkr, + [GCC_USB2_SEC_PHY_AUX_CLK] = &gcc_usb2_sec_phy_aux_clk.clkr, + [GCC_USB2_SEC_PHY_AUX_CLK_SRC] = &gcc_usb2_sec_phy_aux_clk_src.clkr, + [GCC_USB2_SEC_PHY_COM_AUX_CLK] = &gcc_usb2_sec_phy_com_aux_clk.clkr, + [GCC_USB2_SEC_PHY_PIPE_CLK] = &gcc_usb2_sec_phy_pipe_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr, + [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr, + [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr, + [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr, + [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr, + [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr, + [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr, + [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr, + [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr, + [GCC_WCSS_VS_CLK] = &gcc_wcss_vs_clk.clkr, + [GPLL0_OUT_MAIN] = &gpll0_out_main.clkr, + [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr, + [GPLL7_OUT_MAIN] = &gpll7_out_main.clkr, + [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr, + [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr, + [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr, + [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr, +}; + +static const struct qcom_reset_map gcc_sm6150_resets[] = { + [GCC_QUSB2PHY_PRIM_BCR] = { 0xd000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0xd004 }, + [GCC_USB30_PRIM_BCR] = { 0xf000 }, + [GCC_USB2_PHY_SEC_BCR] = { 0x50018 }, + [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c }, + [GCC_PCIE_0_BCR] = { 0x6b000 }, + [GCC_PCIE_0_PHY_BCR] = { 0x6c01c }, + [GCC_PCIE_PHY_BCR] = { 0x6f000 }, + [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 }, + [GCC_UFS_PHY_BCR] = { 0x77000 }, + [GCC_USB20_SEC_BCR] = { 0xa6000 }, +}; + +static struct clk_dfs gcc_dfs_clocks[] = { + { &gcc_qupv3_wrap0_s0_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap0_s1_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap0_s2_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap0_s3_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap0_s4_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap0_s5_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s0_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s1_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s2_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s3_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s4_clk_src, DFS_ENABLE_RCG }, + { &gcc_qupv3_wrap1_s5_clk_src, DFS_ENABLE_RCG }, +}; + +static const struct qcom_cc_dfs_desc gcc_sm6150_dfs_desc = { + .clks = gcc_dfs_clocks, + .num_clks = ARRAY_SIZE(gcc_dfs_clocks), +}; + +static const struct regmap_config gcc_sm6150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xa609c, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sm6150_desc = { + .config = &gcc_sm6150_regmap_config, + .clks = gcc_sm6150_clocks, + .num_clks = ARRAY_SIZE(gcc_sm6150_clocks), + .hwclks = gcc_sm6150_hws, + .num_hwclks = ARRAY_SIZE(gcc_sm6150_hws), + .resets = gcc_sm6150_resets, + .num_resets = ARRAY_SIZE(gcc_sm6150_resets), +}; + +static const struct of_device_id gcc_sm6150_match_table[] = { + { .compatible = "qcom,gcc-sm6150" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sm6150_match_table); + +static int gcc_sm6150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao"); + if (IS_ERR(vdd_cx_ao.regulator[0])) { + if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx_ao regulator\n"); + return PTR_ERR(vdd_cx_ao.regulator[0]); + } + + regmap = qcom_cc_map(pdev, &gcc_sm6150_desc); + if (IS_ERR(regmap)) { + pr_err("Failed to map the gcc registers\n"); + return PTR_ERR(regmap); + } + + /* + * Disable the GPLL0 active input to MM blocks and GPU + * via MISC registers. + */ + regmap_update_bits(regmap, GCC_DISPLAY_MISC, 0x3, 0x3); + regmap_update_bits(regmap, GCC_CAMERA_MISC, 0x3, 0x3); + regmap_update_bits(regmap, GCC_VIDEO_MISC, 0x3, 0x3); + regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3); + regmap_update_bits(regmap, GCC_EMAC_MISC, 0x3, 0x3); + + ret = qcom_cc_really_probe(pdev, &gcc_sm6150_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GCC clocks\n"); + return ret; + } + + /* DFS clock registration */ + ret = qcom_cc_register_rcg_dfs(pdev, &gcc_sm6150_dfs_desc); + if (ret) + dev_err(&pdev->dev, "Failed to register with DFS!\n"); + + dev_info(&pdev->dev, "Registered GCC clocks\n"); + + return 0; +} + +static struct platform_driver gcc_sm6150_driver = { + .probe = gcc_sm6150_probe, + .driver = { + .name = "gcc-sm6150", + .of_match_table = gcc_sm6150_match_table, + }, +}; + +static int __init gcc_sm6150_init(void) +{ + return platform_driver_register(&gcc_sm6150_driver); +} +subsys_initcall(gcc_sm6150_init); + +static void __exit gcc_sm6150_exit(void) +{ + platform_driver_unregister(&gcc_sm6150_driver); +} +module_exit(gcc_sm6150_exit); + +MODULE_DESCRIPTION("QTI GCC SM6150 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:gcc-sm6150"); diff --git a/drivers/clk/qcom/gpucc-sm6150.c b/drivers/clk/qcom/gpucc-sm6150.c new file mode 100644 index 0000000000000000000000000000000000000000..d2f7f604908b335354e877bfcd527df2e95b9bbb --- /dev/null +++ b/drivers/clk/qcom/gpucc-sm6150.c @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "reset.h" +#include "vdd-level-sm6150.h" + +#define CX_GMU_CBCR_SLEEP_MASK 0xf +#define CX_GMU_CBCR_SLEEP_SHIFT 4 +#define CX_GMU_CBCR_WAKE_MASK 0xf +#define CX_GMU_CBCR_WAKE_SHIFT 8 + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_MX_NUM, 1, vdd_mx_corner); + +enum { + P_BI_TCXO, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_2X_CLK, + P_GPU_CC_PLL0_OUT_AUX2, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL1_OUT_AUX, + P_GPU_CC_PLL1_OUT_AUX2, + P_GPU_CC_PLL1_OUT_MAIN, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_OUT_MAIN, 1 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpu_cc_parent_names_0[] = { + "bi_tcxo", + "gpu_cc_pll0_out_main", + "gpu_cc_pll1_out_main", + "gpll0_out_main", + "gpll0_out_main_div", + "core_bi_pll_test_se", +}; + +static const struct parent_map gpu_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL0_2X_CLK, 1 }, + { P_GPU_CC_PLL0_OUT_AUX2, 2 }, + { P_GPU_CC_PLL1_OUT_AUX, 3 }, + { P_GPU_CC_PLL1_OUT_AUX2, 4 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpu_cc_parent_names_1[] = { + "bi_tcxo", + "gpu_cc_pll0_out_aux", + "gpu_cc_pll0_out_aux2", + "gpu_cc_pll1_out_aux", + "gpu_cc_pll1_out_aux2", + "gpll0_out_main", + "core_bi_pll_test_se", +}; + +static struct pll_vco gpu_cc_pll_vco[] = { + { 1000000000, 2000000000, 0 }, + { 500000000, 1000000000, 2 }, +}; + +/* 1020MHz configuration */ +static const struct alpha_pll_config gpu_pll0_config = { + .l = 0x35, + .config_ctl_val = 0x4001055b, + .alpha_u = 0x20, + .alpha = 0x00, + .alpha_en_mask = BIT(24), + .vco_val = 0x0 << 20, + .vco_mask = 0x3 << 20, + .aux2_output_mask = BIT(2), +}; + +/* 930MHz configuration */ +static const struct alpha_pll_config gpu_pll1_config = { + .l = 0x30, + .config_ctl_val = 0x4001055b, + .alpha_u = 0x70, + .alpha = 0x00, + .alpha_en_mask = BIT(24), + .vco_val = 0x2 << 20, + .vco_mask = 0x3 << 20, + .aux2_output_mask = BIT(2), +}; + +static struct clk_alpha_pll gpu_cc_pll0_out_aux2 = { + .offset = 0x0, + .vco_table = gpu_cc_pll_vco, + .num_vco = ARRAY_SIZE(gpu_cc_pll_vco), + .flags = SUPPORTS_DYNAMIC_UPDATE, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll0_out_aux2", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_MX_NUM, + .rate_max = (unsigned long[VDD_MX_NUM]) { + [VDD_MX_MIN] = 1000000000, + [VDD_MX_NOMINAL] = 2000000000}, + }, + }, +}; + +static struct clk_alpha_pll gpu_cc_pll1_out_aux2 = { + .offset = 0x100, + .vco_table = gpu_cc_pll_vco, + .num_vco = ARRAY_SIZE(gpu_cc_pll_vco), + .flags = SUPPORTS_DYNAMIC_UPDATE, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1_out_aux2", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + .vdd_class = &vdd_mx, + .num_rate_max = VDD_MX_NUM, + .rate_max = (unsigned long[VDD_MX_NUM]) { + [VDD_MX_MIN] = 1000000000, + [VDD_MX_NOMINAL] = 2000000000}, + }, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x1120, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gmu_clk_src", + .parent_names = gpu_cc_parent_names_0, + .num_parents = 6, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 200000000}, + }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { + F(290000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0), + F(400000000, P_GPU_CC_PLL1_OUT_AUX2, 2, 0, 0), + F(513000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + F(645000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + F(706000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + F(845000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + F(895000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { + .cmd_rcgr = 0x101c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_1, + .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src, + .flags = FORCE_ENABLE_RCG, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gfx3d_clk_src", + .parent_names = gpu_cc_parent_names_1, + .num_parents = 7, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 290000000, + [VDD_LOW] = 400000000, + [VDD_LOW_L1] = 513000000, + [VDD_NOMINAL] = 645000000, + [VDD_NOMINAL_L1] = 706000000, + [VDD_HIGH] = 845000000, + [VDD_HIGH_L1] = 895000000}, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x107c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x107c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_crc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_apb_clk = { + .halt_reg = 0x1088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_apb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gfx3d_clk = { + .halt_reg = 0x10a4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10a4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gfx3d_clk", + .parent_names = (const char *[]){ + "gpu_cc_gx_gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = { + .halt_reg = 0x10a8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10a8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gfx3d_slv_clk", + .parent_names = (const char *[]){ + "gpu_cc_gx_gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x1098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gmu_clk", + .parent_names = (const char *[]){ + "gpu_cc_gmu_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { + .halt_reg = 0x108c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x108c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_snoc_dvm_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x1004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x109c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x109c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gfx3d_clk = { + .halt_reg = 0x1054, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x1054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gfx3d_clk", + .parent_names = (const char *[]){ + "gpu_cc_gx_gfx3d_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x1064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gmu_clk", + .parent_names = (const char *[]){ + "gpu_cc_gmu_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_sleep_clk = { + .halt_reg = 0x1090, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1090, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x1078, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gpu_cc_sm6150_clocks[] = { + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr, + [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr, + [GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr, + [GPU_CC_PLL1_OUT_AUX2] = &gpu_cc_pll1_out_aux2.clkr, + [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr, + [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr, + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, +}; + +static const struct regmap_config gpu_cc_sm6150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x7008, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_sm6150_desc = { + .config = &gpu_cc_sm6150_regmap_config, + .clks = gpu_cc_sm6150_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_sm6150_clocks), +}; + +static const struct of_device_id gpu_cc_sm6150_match_table[] = { + { .compatible = "qcom,gpucc-sm6150" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_sm6150_match_table); + +static int gpu_cc_sm6150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + unsigned int value, mask; + + /* Get CX voltage regulator for CX and GMU clocks. */ + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + /* Get MX voltage regulator for GPU PLL graphic clock. */ + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + regmap = qcom_cc_map(pdev, &gpu_cc_sm6150_desc); + if (IS_ERR(regmap)) { + pr_err("Failed to map the gpu_cc registers\n"); + return PTR_ERR(regmap); + } + + clk_alpha_pll_configure(&gpu_cc_pll0_out_aux2, regmap, + &gpu_pll0_config); + clk_alpha_pll_configure(&gpu_cc_pll1_out_aux2, regmap, + &gpu_pll1_config); + + ret = qcom_cc_really_probe(pdev, &gpu_cc_sm6150_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GPU CC clocks\n"); + return ret; + } + + /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */ + mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT; + mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT; + value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT; + regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg, + mask, value); + + dev_info(&pdev->dev, "Registered GPU CC clocks\n"); + + return ret; +} + +static struct platform_driver gpu_cc_sm6150_driver = { + .probe = gpu_cc_sm6150_probe, + .driver = { + .name = "gpu_cc-sm6150", + .of_match_table = gpu_cc_sm6150_match_table, + }, +}; + +static int __init gpu_cc_sm6150_init(void) +{ + return platform_driver_register(&gpu_cc_sm6150_driver); +} +subsys_initcall(gpu_cc_sm6150_init); + +static void __exit gpu_cc_sm6150_exit(void) +{ + platform_driver_unregister(&gpu_cc_sm6150_driver); +} +module_exit(gpu_cc_sm6150_exit); + +MODULE_DESCRIPTION("QTI GPU_CC SM6150 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:gpu_cc-sm6150"); diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile index 6d100b5ed19755a016511a173c49813242dd9611..06c011f76a8ffb3c25b2fe83b21245805b3ea1d6 100644 --- a/drivers/clk/qcom/mdss/Makefile +++ b/drivers/clk/qcom/mdss/Makefile @@ -6,3 +6,5 @@ obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-7nm.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm.o obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm-util.o +obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28lpm.o +obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28nm-util.o diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c index 7290205a85cda224c9ddba10f8c2b143b62d8020..874c229910cf24f26ba271bb3a86eb44c7a36505 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c @@ -925,6 +925,11 @@ static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw, if (!vco->priv) pr_err("vco priv is null\n"); + if (!pll) { + pr_err("pll is null\n"); + return 0; + } + /* * Calculate the vco rate from HW registers only for handoff cases. * For other cases where a vco_10nm_set_rate() has already been diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c index f6c7e287b8caf80c76660e43429c4fb3310dc454..ae688cbd5d609d3ebb5bc964912e57db5fe9a81c 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,21 +16,20 @@ #include #include #include -#include -#include -#include -#include +#include #include "mdss-pll.h" #include "mdss-dsi-pll.h" +#include "mdss-dsi-pll-28nm.h" #define VCO_DELAY_USEC 1000 -static struct clk_div_ops fixed_2div_ops; -static const struct clk_ops byte_mux_clk_ops; -static const struct clk_ops pixel_clk_src_ops; -static const struct clk_ops byte_clk_src_ops; -static const struct clk_ops analog_postdiv_clk_ops; +enum { + DSI_PLL_0, + DSI_PLL_1, + DSI_PLL_MAX +}; + static struct lpfr_cfg lpfr_lut_struct[] = { {479500000, 8}, {480000000, 11}, @@ -44,263 +43,519 @@ static struct lpfr_cfg lpfr_lut_struct[] = { {750000000, 11}, }; -static int vco_set_rate_lpm(struct clk *c, unsigned long rate) +static void dsi_pll_sw_reset(struct mdss_pll_resources *rsc) { - int rc; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - /* * DSI PLL software reset. Add HW recommended delays after toggling * the software reset bit off and back on. */ - MDSS_PLL_REG_W(dsi_pll_res->pll_base, + MDSS_PLL_REG_W(rsc->pll_base, DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01); - udelay(1000); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, + ndelay(500); + MDSS_PLL_REG_W(rsc->pll_base, DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00); - udelay(1000); +} + +static void dsi_pll_toggle_lock_detect( + struct mdss_pll_resources *rsc) +{ + /* DSI PLL toggle lock detect setting */ + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04); + ndelay(500); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05); + udelay(512); +} - rc = vco_set_rate(vco, rate); +static int dsi_pll_check_lock_status( + struct mdss_pll_resources *rsc) +{ + int rc = 0; + + rc = dsi_pll_lock_status(rsc); + if (rc) + pr_debug("PLL Locked\n"); + else + pr_err("PLL failed to lock\n"); - mdss_pll_resource_enable(dsi_pll_res, false); return rc; } -static int dsi_pll_enable_seq_8916(struct mdss_pll_resources *dsi_pll_res) + +static int dsi_pll_enable_seq_gf2(struct mdss_pll_resources *rsc) { int pll_locked = 0; + dsi_pll_sw_reset(rsc); + /* - * DSI PLL software reset. Add HW recommended delays after toggling - * the software reset bit off and back on. + * GF PART 2 PLL power up sequence. + * Add necessary delays recommended by hardware. */ - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01); - ndelay(500); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x04); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05); + udelay(3); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f); + udelay(500); + + dsi_pll_toggle_lock_detect(rsc); + + pll_locked = dsi_pll_check_lock_status(rsc); + return pll_locked ? 0 : -EINVAL; +} + +static int dsi_pll_enable_seq_gf1(struct mdss_pll_resources *rsc) +{ + int pll_locked = 0; + dsi_pll_sw_reset(rsc); /* - * PLL power up sequence. + * GF PART 1 PLL power up sequence. * Add necessary delays recommended by hardware. */ - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34); - ndelay(500); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, + + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x14); + MDSS_PLL_REG_W(rsc->pll_base, DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01); - ndelay(500); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, + MDSS_PLL_REG_W(rsc->pll_base, DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05); - ndelay(500); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, + udelay(3); + MDSS_PLL_REG_W(rsc->pll_base, DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f); - ndelay(500); + udelay(500); - /* DSI PLL toggle lock detect setting */ - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04); - ndelay(500); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05); - udelay(512); + dsi_pll_toggle_lock_detect(rsc); - pll_locked = dsi_pll_lock_status(dsi_pll_res); + pll_locked = dsi_pll_check_lock_status(rsc); + return pll_locked ? 0 : -EINVAL; +} - if (pll_locked) - pr_debug("PLL Locked\n"); - else - pr_err("PLL failed to lock\n"); +static int dsi_pll_enable_seq_tsmc(struct mdss_pll_resources *rsc) +{ + int pll_locked = 0; + dsi_pll_sw_reset(rsc); + /* + * TSMC PLL power up sequence. + * Add necessary delays recommended by hardware. + */ + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f); + udelay(500); + + dsi_pll_toggle_lock_detect(rsc); + + pll_locked = dsi_pll_check_lock_status(rsc); return pll_locked ? 0 : -EINVAL; } -/* Op structures */ - -static const struct clk_ops clk_ops_dsi_vco = { - .set_rate = vco_set_rate_lpm, - .round_rate = vco_round_rate, - .handoff = vco_handoff, - .prepare = vco_prepare, - .unprepare = vco_unprepare, +static struct regmap_config dsi_pll_28lpm_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xF4, }; +static struct regmap_bus analog_postdiv_regmap_bus = { + .reg_write = analog_postdiv_reg_write, + .reg_read = analog_postdiv_reg_read, +}; -static struct clk_div_ops fixed_4div_ops = { - .set_div = fixed_4div_set_div, - .get_div = fixed_4div_get_div, +static struct regmap_bus byteclk_src_mux_regmap_bus = { + .reg_write = byteclk_mux_write_sel, + .reg_read = byteclk_mux_read_sel, }; -static struct clk_div_ops analog_postdiv_ops = { - .set_div = analog_set_div, - .get_div = analog_get_div, +static struct regmap_bus pclk_src_regmap_bus = { + .reg_write = pixel_clk_set_div, + .reg_read = pixel_clk_get_div, }; -static struct clk_div_ops digital_postdiv_ops = { - .set_div = digital_set_div, - .get_div = digital_get_div, +static const struct clk_ops clk_ops_vco_28lpm = { + .recalc_rate = vco_28nm_recalc_rate, + .set_rate = vco_28nm_set_rate, + .round_rate = vco_28nm_round_rate, + .prepare = vco_28nm_prepare, + .unprepare = vco_28nm_unprepare, }; -static struct clk_mux_ops byte_mux_ops = { - .set_mux_sel = set_byte_mux_sel, - .get_mux_sel = get_byte_mux_sel, +static struct dsi_pll_vco_clk dsi0pll_vco_clk = { + .ref_clk_rate = 19200000UL, + .min_rate = 350000000UL, + .max_rate = 750000000UL, + .pll_en_seq_cnt = 9, + .pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[3] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[4] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[5] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[6] = dsi_pll_enable_seq_gf2, + .pll_enable_seqs[7] = dsi_pll_enable_seq_gf2, + .pll_enable_seqs[8] = dsi_pll_enable_seq_gf2, + .lpfr_lut_size = 10, + .lpfr_lut = lpfr_lut_struct, + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_vco_clk", + .parent_names = (const char *[]){"bi_tcxo"}, + .num_parents = 1, + .ops = &clk_ops_vco_28lpm, + .flags = CLK_GET_RATE_NOCACHE, + }, }; -static struct dsi_pll_vco_clk dsi_vco_clk_8916 = { - .ref_clk_rate = 19200000, - .min_rate = 350000000, - .max_rate = 750000000, - .pll_en_seq_cnt = 1, - .pll_enable_seqs[0] = dsi_pll_enable_seq_8916, +static struct dsi_pll_vco_clk dsi1pll_vco_clk = { + .ref_clk_rate = 19200000UL, + .min_rate = 350000000UL, + .max_rate = 750000000UL, + .pll_en_seq_cnt = 9, + .pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc, + .pll_enable_seqs[3] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[4] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[5] = dsi_pll_enable_seq_gf1, + .pll_enable_seqs[6] = dsi_pll_enable_seq_gf2, + .pll_enable_seqs[7] = dsi_pll_enable_seq_gf2, + .pll_enable_seqs[8] = dsi_pll_enable_seq_gf2, .lpfr_lut_size = 10, .lpfr_lut = lpfr_lut_struct, - .c = { - .dbg_name = "dsi_vco_clk_8916", - .ops = &clk_ops_dsi_vco, - CLK_INIT(dsi_vco_clk_8916.c), + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_vco_clk", + .parent_names = (const char *[]){"bi_tcxo"}, + .num_parents = 1, + .ops = &clk_ops_vco_28lpm, + .flags = CLK_GET_RATE_NOCACHE, }, }; -static struct div_clk analog_postdiv_clk_8916 = { - .data = { - .max_div = 255, - .min_div = 1, +static struct clk_regmap_div dsi0pll_analog_postdiv = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, + .shift = 0, + .width = 4, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_analog_postdiv", + .parent_names = (const char *[]){"dsi0pll_vco_clk"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_regmap_div_ops, + }, }, - .ops = &analog_postdiv_ops, - .c = { - .parent = &dsi_vco_clk_8916.c, - .dbg_name = "analog_postdiv_clk", - .ops = &analog_postdiv_clk_ops, - .flags = CLKFLAG_NO_RATE_CACHE, - CLK_INIT(analog_postdiv_clk_8916.c), +}; + +static struct clk_regmap_div dsi1pll_analog_postdiv = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, + .shift = 0, + .width = 4, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_analog_postdiv", + .parent_names = (const char *[]){"dsi1pll_vco_clk"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_regmap_div_ops, + }, }, }; -static struct div_clk indirect_path_div2_clk_8916 = { - .ops = &fixed_2div_ops, - .data = { - .div = 2, - .min_div = 2, - .max_div = 2, +static struct clk_fixed_factor dsi0pll_indirect_path_src = { + .div = 2, + .mult = 1, + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_indirect_path_src", + .parent_names = (const char *[]){"dsi0pll_analog_postdiv"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_fixed_factor_ops, }, - .c = { - .parent = &analog_postdiv_clk_8916.c, - .dbg_name = "indirect_path_div2_clk", - .ops = &clk_ops_div, - .flags = CLKFLAG_NO_RATE_CACHE, - CLK_INIT(indirect_path_div2_clk_8916.c), +}; + +static struct clk_fixed_factor dsi1pll_indirect_path_src = { + .div = 2, + .mult = 1, + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_indirect_path_src", + .parent_names = (const char *[]){"dsi1pll_analog_postdiv"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_fixed_factor_ops, }, }; -static struct div_clk pixel_clk_src = { - .data = { - .max_div = 255, - .min_div = 1, +static struct clk_regmap_mux dsi0pll_byteclk_src_mux = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, + .shift = 1, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_byteclk_src_mux", + .parent_names = (const char *[]){ + "dsi0pll_vco_clk", + "dsi0pll_indirect_path_src"}, + .num_parents = 2, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_regmap_mux_closest_ops, + }, }, - .ops = &digital_postdiv_ops, - .c = { - .parent = &dsi_vco_clk_8916.c, - .dbg_name = "pixel_clk_src_8916", - .ops = &pixel_clk_src_ops, - .flags = CLKFLAG_NO_RATE_CACHE, - CLK_INIT(pixel_clk_src.c), +}; + +static struct clk_regmap_mux dsi1pll_byteclk_src_mux = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, + .shift = 1, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_byteclk_src_mux", + .parent_names = (const char *[]){ + "dsi1pll_vco_clk", + "dsi1pll_indirect_path_src"}, + .num_parents = 2, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_regmap_mux_closest_ops, + }, }, }; -static struct mux_clk byte_mux_8916 = { - .num_parents = 2, - .parents = (struct clk_src[]){ - {&dsi_vco_clk_8916.c, 0}, - {&indirect_path_div2_clk_8916.c, 1}, +static struct clk_fixed_factor dsi0pll_byteclk_src = { + .div = 4, + .mult = 1, + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_byteclk_src", + .parent_names = (const char *[]){ + "dsi0pll_byteclk_src_mux"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_fixed_factor_ops, }, - .ops = &byte_mux_ops, - .c = { - .parent = &dsi_vco_clk_8916.c, - .dbg_name = "byte_mux_8916", - .ops = &byte_mux_clk_ops, - CLK_INIT(byte_mux_8916.c), +}; + +static struct clk_fixed_factor dsi1pll_byteclk_src = { + .div = 4, + .mult = 1, + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_byteclk_src", + .parent_names = (const char *[]){ + "dsi1pll_byteclk_src_mux"}, + .num_parents = 1, + .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT), + .ops = &clk_fixed_factor_ops, }, }; -static struct div_clk byte_clk_src = { - .ops = &fixed_4div_ops, - .data = { - .min_div = 4, - .max_div = 4, +static struct clk_regmap_div dsi0pll_pclk_src = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, + .shift = 0, + .width = 8, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi0pll_pclk_src", + .parent_names = (const char *[]){"dsi0pll_vco_clk"}, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + .ops = &clk_regmap_div_ops, + }, }, - .c = { - .parent = &byte_mux_8916.c, - .dbg_name = "byte_clk_src_8916", - .ops = &byte_clk_src_ops, - CLK_INIT(byte_clk_src.c), +}; + +static struct clk_regmap_div dsi1pll_pclk_src = { + .reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, + .shift = 0, + .width = 8, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "dsi1pll_pclk_src", + .parent_names = (const char *[]){"dsi1pll_vco_clk"}, + .num_parents = 1, + .flags = CLK_GET_RATE_NOCACHE, + .ops = &clk_regmap_div_ops, + }, }, }; -static struct clk_lookup mdss_dsi_pllcc_8916[] = { - CLK_LIST(pixel_clk_src), - CLK_LIST(byte_clk_src), +static struct clk_hw *mdss_dsi_pllcc_28lpm[] = { + [VCO_CLK_0] = &dsi0pll_vco_clk.hw, + [ANALOG_POSTDIV_0_CLK] = &dsi0pll_analog_postdiv.clkr.hw, + [INDIRECT_PATH_SRC_0_CLK] = &dsi0pll_indirect_path_src.hw, + [BYTECLK_SRC_MUX_0_CLK] = &dsi0pll_byteclk_src_mux.clkr.hw, + [BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw, + [PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw, + [VCO_CLK_1] = &dsi1pll_vco_clk.hw, + [ANALOG_POSTDIV_1_CLK] = &dsi1pll_analog_postdiv.clkr.hw, + [INDIRECT_PATH_SRC_1_CLK] = &dsi1pll_indirect_path_src.hw, + [BYTECLK_SRC_MUX_1_CLK] = &dsi1pll_byteclk_src_mux.clkr.hw, + [BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw, + [PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw, }; -int dsi_pll_clock_register_lpm(struct platform_device *pdev, +int dsi_pll_clock_register_28lpm(struct platform_device *pdev, struct mdss_pll_resources *pll_res) { - int rc; - - if (!pdev || !pdev->dev.of_node) { - pr_err("Invalid input parameters\n"); + int rc = 0, ndx, i; + struct clk *clk; + struct clk_onecell_data *clk_data; + int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_28lpm); + struct regmap *rmap; + + int const ssc_freq_min = 30000; /* min. recommended freq. value */ + int const ssc_freq_max = 33000; /* max. recommended freq. value */ + int const ssc_ppm_max = 5000; /* max. recommended ppm */ + + if (!pdev || !pdev->dev.of_node || + !pll_res || !pll_res->pll_base) { + pr_err("Invalid params\n"); return -EINVAL; } - if (!pll_res || !pll_res->pll_base) { - pr_err("Invalid PLL resources\n"); - return -EPROBE_DEFER; + ndx = pll_res->index; + + if (ndx >= DSI_PLL_MAX) { + pr_err("pll index(%d) NOT supported\n", ndx); + return -EINVAL; } - /* Set client data to mux, div and vco clocks */ - byte_clk_src.priv = pll_res; - pixel_clk_src.priv = pll_res; - byte_mux_8916.priv = pll_res; - indirect_path_div2_clk_8916.priv = pll_res; - analog_postdiv_clk_8916.priv = pll_res; - dsi_vco_clk_8916.priv = pll_res; pll_res->vco_delay = VCO_DELAY_USEC; - /* Set clock source operations */ - pixel_clk_src_ops = clk_ops_slave_div; - pixel_clk_src_ops.prepare = dsi_pll_div_prepare; + if (pll_res->ssc_en) { + if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) || + (pll_res->ssc_freq > ssc_freq_max)) { + pll_res->ssc_freq = ssc_freq_min; + pr_debug("SSC frequency out of recommended range. Set to default=%d\n", + pll_res->ssc_freq); + } - analog_postdiv_clk_ops = clk_ops_div; - analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare; + if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) { + pll_res->ssc_ppm = ssc_ppm_max; + pr_debug("SSC PPM out of recommended range. Set to default=%d\n", + pll_res->ssc_ppm); + } + } - byte_clk_src_ops = clk_ops_div; - byte_clk_src_ops.prepare = dsi_pll_div_prepare; + clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data), + GFP_KERNEL); + if (!clk_data) + return -ENOMEM; - byte_mux_clk_ops = clk_ops_gen_mux; - byte_mux_clk_ops.prepare = dsi_pll_mux_prepare; + clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks * + sizeof(struct clk *)), GFP_KERNEL); + if (!clk_data->clks) { + devm_kfree(&pdev->dev, clk_data); + return -ENOMEM; + } + clk_data->clk_num = num_clks; + + /* Establish client data */ + if (ndx == 0) { + rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi0pll_byteclk_src_mux.clkr.regmap = rmap; + + rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi0pll_analog_postdiv.clkr.regmap = rmap; + + rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi0pll_pclk_src.clkr.regmap = rmap; + + dsi0pll_vco_clk.priv = pll_res; + for (i = VCO_CLK_0; i <= PCLK_SRC_0_CLK; i++) { + clk = devm_clk_register(&pdev->dev, + mdss_dsi_pllcc_28lpm[i]); + if (IS_ERR(clk)) { + pr_err("clk registration failed for DSI clock:%d\n", + pll_res->index); + rc = -EINVAL; + goto clk_register_fail; + } + clk_data->clks[i] = clk; - if (pll_res->target_id == MDSS_PLL_TARGET_8916 || - pll_res->target_id == MDSS_PLL_TARGET_8939 || - pll_res->target_id == MDSS_PLL_TARGET_8909) { - rc = of_msm_clock_register(pdev->dev.of_node, - mdss_dsi_pllcc_8916, ARRAY_SIZE(mdss_dsi_pllcc_8916)); - if (rc) { - pr_err("Clock register failed\n"); - rc = -EPROBE_DEFER; } + + rc = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, clk_data); + } else { - pr_err("Invalid target ID\n"); - rc = -EINVAL; + rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi1pll_byteclk_src_mux.clkr.regmap = rmap; + + rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi1pll_analog_postdiv.clkr.regmap = rmap; + + rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus, + pll_res, &dsi_pll_28lpm_config); + if (IS_ERR(rmap)) { + pr_err("regmap init failed for DSI clock:%d\n", + pll_res->index); + return -EINVAL; + } + dsi1pll_pclk_src.clkr.regmap = rmap; + + dsi1pll_vco_clk.priv = pll_res; + for (i = VCO_CLK_1; i <= PCLK_SRC_1_CLK; i++) { + clk = devm_clk_register(&pdev->dev, + mdss_dsi_pllcc_28lpm[i]); + if (IS_ERR(clk)) { + pr_err("clk registration failed for DSI clock:%d\n", + pll_res->index); + rc = -EINVAL; + goto clk_register_fail; + } + clk_data->clks[i] = clk; + + } + + rc = of_clk_add_provider(pdev->dev.of_node, + of_clk_src_onecell_get, clk_data); } + if (!rc) { + pr_info("Registered DSI PLL ndx=%d, clocks successfully", ndx); - if (!rc) - pr_info("Registered DSI PLL clocks successfully\n"); + return rc; + } +clk_register_fail: + devm_kfree(&pdev->dev, clk_data->clks); + devm_kfree(&pdev->dev, clk_data); return rc; } diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c new file mode 100644 index 0000000000000000000000000000000000000000..4e23eb939c77be5f4507b67bdbc5282681bcc394 --- /dev/null +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c @@ -0,0 +1,693 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include +#include +#include +#include + +#include "mdss-pll.h" +#include "mdss-dsi-pll.h" +#include "mdss-dsi-pll-28nm.h" + +#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG (0x0) +#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG (0x0008) +#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG (0x000C) +#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG (0x0014) +#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG (0x0024) +#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG (0x002C) +#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG (0x0030) +#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG (0x0034) +#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0 (0x0038) +#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1 (0x003C) +#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2 (0x0040) +#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3 (0x0044) +#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4 (0x0048) +#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0 (0x004C) +#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1 (0x0050) +#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2 (0x0054) +#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3 (0x0058) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0 (0x006C) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2 (0x0074) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3 (0x0078) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4 (0x007C) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5 (0x0080) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6 (0x0084) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7 (0x0088) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8 (0x008C) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9 (0x0090) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10 (0x0094) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11 (0x0098) +#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG (0x009C) +#define DSI_PHY_PLL_UNIPHY_PLL_STATUS (0x00C0) + +#define DSI_PLL_POLL_DELAY_US 50 +#define DSI_PLL_POLL_TIMEOUT_US 500 + +int analog_postdiv_reg_read(void *context, unsigned int reg, + unsigned int *div) +{ + int rc = 0; + struct mdss_pll_resources *rsc = context; + + if (is_gdsc_disabled(rsc)) + return 0; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + *div = MDSS_PLL_REG_R(rsc->pll_base, reg); + + /** + * Common clock framework the divider value is interpreted as one less + * hence we return one less for all dividers except when zero + */ + if (*div != 0) + *div -= 1; + + pr_debug("analog_postdiv div = %d\n", *div); + + (void)mdss_pll_resource_enable(rsc, false); + return rc; +} + +int analog_postdiv_reg_write(void *context, unsigned int reg, + unsigned int div) +{ + int rc = 0; + struct mdss_pll_resources *rsc = context; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + pr_debug("analog_postdiv div = %d\n", div); + + /** + * In common clock framework the divider value provided is one less and + * and hence adjusting the divider value by one prior to writing it to + * hardware + */ + div++; + + MDSS_PLL_REG_W(rsc->pll_base, reg, div); + + (void)mdss_pll_resource_enable(rsc, false); + return rc; +} + +int byteclk_mux_read_sel(void *context, unsigned int reg, + unsigned int *val) +{ + int rc = 0; + struct mdss_pll_resources *rsc = context; + + if (is_gdsc_disabled(rsc)) + return 0; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + *val = ((MDSS_PLL_REG_R(rsc->pll_base, reg) & BIT(1)) >> 1); + pr_debug("byteclk mux mode = %s", *val ? "indirect" : "direct"); + + (void)mdss_pll_resource_enable(rsc, false); + return rc; +} + +int byteclk_mux_write_sel(void *context, unsigned int reg, + unsigned int val) +{ + int rc = 0; + u32 reg_val = 0; + struct mdss_pll_resources *rsc = context; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + pr_debug("byteclk mux set to %s mode\n", val ? "indirect" : "direct"); + + reg_val = MDSS_PLL_REG_R(rsc->pll_base, reg); + reg_val &= ~0x02; + reg_val |= (val << 1); + + MDSS_PLL_REG_W(rsc->pll_base, reg, reg_val); + + (void)mdss_pll_resource_enable(rsc, false); + + return rc; +} + +int pixel_clk_get_div(void *context, unsigned int reg, + unsigned int *div) +{ + int rc = 0; + struct mdss_pll_resources *rsc = context; + + if (is_gdsc_disabled(rsc)) + return 0; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + *div = MDSS_PLL_REG_R(rsc->pll_base, reg); + + /** + *Common clock framework the divider value is interpreted as one less + * hence we return one less for all dividers except when zero + */ + if (*div != 0) + *div -= 1; + + pr_debug("pclk_src div = %d\n", *div); + + (void)mdss_pll_resource_enable(rsc, false); + return rc; +} + +int pixel_clk_set_div(void *context, unsigned int reg, + unsigned int div) +{ + int rc = 0; + struct mdss_pll_resources *rsc = context; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable dsi pll resources, rc=%d\n", rc); + return rc; + } + + pr_debug("pclk_src div = %d\n", div); + + /** + * In common clock framework the divider value provided is one less and + * and hence adjusting the divider value by one prior to writing it to + * hardware + */ + div++; + + MDSS_PLL_REG_W(rsc->pll_base, reg, div); + + (void)mdss_pll_resource_enable(rsc, false); + return rc; +} + +int dsi_pll_lock_status(struct mdss_pll_resources *rsc) +{ + u32 status; + int pll_locked; + + /* poll for PLL ready status */ + if (readl_poll_timeout_atomic((rsc->pll_base + + DSI_PHY_PLL_UNIPHY_PLL_STATUS), + status, + ((status & BIT(0)) == 1), + DSI_PLL_POLL_DELAY_US, + DSI_PLL_POLL_TIMEOUT_US)) { + pr_debug("DSI PLL status=%x failed to Lock\n", status); + pll_locked = 0; + } else { + pll_locked = 1; + } + + return pll_locked; +} + +static int pll_28nm_vco_rate_calc(struct dsi_pll_vco_clk *vco, + struct mdss_dsi_vco_calc *vco_calc, unsigned long vco_clk_rate) +{ + s32 rem; + s64 frac_n_mode, ref_doubler_en_b; + s64 ref_clk_to_pll, div_fb, frac_n_value; + int i; + + /* Configure the Loop filter resistance */ + for (i = 0; i < vco->lpfr_lut_size; i++) + if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate) + break; + if (i == vco->lpfr_lut_size) { + pr_err("unable to get loop filter resistance. vco=%ld\n", + vco_clk_rate); + return -EINVAL; + } + vco_calc->lpfr_lut_res = vco->lpfr_lut[i].r; + + div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem); + if (rem) { + vco_calc->refclk_cfg = 0x1; + frac_n_mode = 1; + ref_doubler_en_b = 0; + } else { + vco_calc->refclk_cfg = 0x0; + frac_n_mode = 0; + ref_doubler_en_b = 1; + } + + pr_debug("refclk_cfg = %lld\n", vco_calc->refclk_cfg); + + ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (vco_calc->refclk_cfg)) + + (ref_doubler_en_b * vco->ref_clk_rate)); + + div_fb = div_s64_rem(vco_clk_rate, ref_clk_to_pll, &rem); + frac_n_value = div_s64(((s64)rem * (1 << 16)), ref_clk_to_pll); + vco_calc->gen_vco_clk = vco_clk_rate; + + pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll); + pr_debug("div_fb = %lld\n", div_fb); + pr_debug("frac_n_value = %lld\n", frac_n_value); + + pr_debug("Generated VCO Clock: %lld\n", vco_calc->gen_vco_clk); + rem = 0; + if (frac_n_mode) { + vco_calc->sdm_cfg0 = 0; + vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1; + vco_calc->sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem); + vco_calc->sdm_cfg2 = rem; + } else { + vco_calc->sdm_cfg0 = (0x1 << 5); + vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1; + vco_calc->sdm_cfg1 = 0; + vco_calc->sdm_cfg2 = 0; + vco_calc->sdm_cfg3 = 0; + } + + pr_debug("sdm_cfg0=%lld\n", vco_calc->sdm_cfg0); + pr_debug("sdm_cfg1=%lld\n", vco_calc->sdm_cfg1); + pr_debug("sdm_cfg2=%lld\n", vco_calc->sdm_cfg2); + pr_debug("sdm_cfg3=%lld\n", vco_calc->sdm_cfg3); + + vco_calc->cal_cfg11 = div_s64_rem(vco_calc->gen_vco_clk, + 256 * 1000000, &rem); + vco_calc->cal_cfg10 = rem / 1000000; + pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n", + vco_calc->cal_cfg10, vco_calc->cal_cfg11); + + return 0; +} + +static void pll_28nm_ssc_param_calc(struct dsi_pll_vco_clk *vco, + struct mdss_dsi_vco_calc *vco_calc) +{ + struct mdss_pll_resources *rsc = vco->priv; + s64 ppm_freq, incr, spread_freq, div_rf, frac_n_value; + s32 rem; + + if (!rsc->ssc_en) { + pr_debug("DSI PLL SSC not enabled\n"); + return; + } + + vco_calc->ssc.kdiv = DIV_ROUND_CLOSEST(vco->ref_clk_rate, + 1000000) - 1; + vco_calc->ssc.triang_steps = DIV_ROUND_CLOSEST(vco->ref_clk_rate, + rsc->ssc_freq * (vco_calc->ssc.kdiv + 1)); + ppm_freq = div_s64(vco_calc->gen_vco_clk * rsc->ssc_ppm, + 1000000); + incr = div64_s64(ppm_freq * 65536, vco->ref_clk_rate * 2 * + vco_calc->ssc.triang_steps); + + vco_calc->ssc.triang_inc_7_0 = incr & 0xff; + vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3; + + if (!rsc->ssc_center) + spread_freq = vco_calc->gen_vco_clk - ppm_freq; + else + spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2); + + div_rf = div_s64(spread_freq, 2 * vco->ref_clk_rate); + vco_calc->ssc.dc_offset = (div_rf - 1); + + div_s64_rem(spread_freq, 2 * vco->ref_clk_rate, &rem); + frac_n_value = div_s64((s64)rem * 65536, 2 * vco->ref_clk_rate); + + vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff; + vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff; +} + +static void pll_28nm_vco_config(struct dsi_pll_vco_clk *vco, + struct mdss_dsi_vco_calc *vco_calc) +{ + struct mdss_pll_resources *rsc = vco->priv; + void __iomem *pll_base = rsc->pll_base; + u32 vco_delay_us = rsc->vco_delay; + bool ssc_en = rsc->ssc_en; + + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG, + vco_calc->lpfr_lut_res); + + /* Loop filter capacitance values : c1 and c2 */ + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15); + + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d); + + if (!ssc_en) { + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1, + (u32)(vco_calc->sdm_cfg1 & 0xff)); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2, + (u32)(vco_calc->sdm_cfg2 & 0xff)); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3, + (u32)(vco_calc->sdm_cfg3 & 0xff)); + } else { + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1, + (u32)vco_calc->ssc.dc_offset); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2, + (u32)vco_calc->ssc.freq_seed_7_0); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3, + (u32)vco_calc->ssc.freq_seed_15_8); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0, + (u32)vco_calc->ssc.kdiv); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1, + (u32)vco_calc->ssc.triang_inc_7_0); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2, + (u32)vco_calc->ssc.triang_inc_9_8); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3, + (u32)vco_calc->ssc.triang_steps); + } + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00); + + /* Add hardware recommended delay for correct PLL configuration */ + if (vco_delay_us) + udelay(vco_delay_us); + + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, + (u32)vco_calc->refclk_cfg); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0, + (u32)vco_calc->sdm_cfg0); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10, + (u32)(vco_calc->cal_cfg10 & 0xff)); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11, + (u32)(vco_calc->cal_cfg11 & 0xff)); + MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20); +} + +static int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate) +{ + struct mdss_dsi_vco_calc vco_calc = {0}; + int rc = 0; + + rc = pll_28nm_vco_rate_calc(vco, &vco_calc, rate); + if (rc) { + pr_err("vco rate calculation failed\n"); + return rc; + } + + pll_28nm_ssc_param_calc(vco, &vco_calc); + pll_28nm_vco_config(vco, &vco_calc); + + return 0; +} + +static unsigned long vco_get_rate(struct dsi_pll_vco_clk *vco) +{ + struct mdss_pll_resources *rsc = vco->priv; + int rc; + u32 sdm0, doubler, sdm_byp_div; + u64 vco_rate; + u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3; + u64 ref_clk = vco->ref_clk_rate; + + if (is_gdsc_disabled(rsc)) + return 0; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("Failed to enable mdss dsi pll resources\n"); + return rc; + } + + /* Check to see if the ref clk doubler is enabled */ + doubler = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0); + ref_clk += (doubler * vco->ref_clk_rate); + + /* see if it is integer mode or sdm mode */ + sdm0 = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0); + if (sdm0 & BIT(6)) { + /* integer mode */ + sdm_byp_div = (MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1; + vco_rate = ref_clk * sdm_byp_div; + } else { + /* sdm mode */ + sdm_dc_off = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF; + pr_debug("sdm_dc_off = %d\n", sdm_dc_off); + sdm2 = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF; + sdm3 = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF; + sdm_freq_seed = (sdm3 << 8) | sdm2; + pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed); + + vco_rate = (ref_clk * (sdm_dc_off + 1)) + + mult_frac(ref_clk, sdm_freq_seed, BIT(16)); + pr_debug("vco rate = %lld", vco_rate); + } + + pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate); + + mdss_pll_resource_enable(rsc, false); + + return (unsigned long)vco_rate; +} + +static int dsi_pll_enable(struct dsi_pll_vco_clk *vco) +{ + int i, rc; + struct mdss_pll_resources *rsc = vco->priv; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("failed to enable dsi pll(%d) resources\n", + rsc->index); + return rc; + } + + /* Try all enable sequences until one succeeds */ + for (i = 0; i < vco->pll_en_seq_cnt; i++) { + rc = vco->pll_enable_seqs[i](rsc); + pr_debug("DSI PLL %s after sequence #%d\n", + rc ? "unlocked" : "locked", i + 1); + if (!rc) + break; + } + + if (rc) { + mdss_pll_resource_enable(rsc, false); + pr_err("DSI PLL failed to lock\n"); + } + rsc->pll_on = true; + + return rc; +} + +static void dsi_pll_disable(struct dsi_pll_vco_clk *vco) +{ + struct mdss_pll_resources *rsc = vco->priv; + + if (!rsc->pll_on && + mdss_pll_resource_enable(rsc, true)) { + pr_err("failed to enable dsi pll(%d) resources\n", + rsc->index); + return; + } + + rsc->handoff_resources = false; + + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00); + + mdss_pll_resource_enable(rsc, false); + rsc->pll_on = false; + + pr_debug("DSI PLL Disabled\n"); +} + +int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw); + struct mdss_pll_resources *rsc = vco->priv; + int rc; + + if (!rsc) { + pr_err("pll resource not found\n"); + return -EINVAL; + } + + if (rsc->pll_on) + return 0; + + pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate); + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("failed to enable mdss dsi pll(%d), rc=%d\n", + rsc->index, rc); + return rc; + } + + /* + * DSI PLL software reset. Add HW recommended delays after toggling + * the software reset bit off and back on. + */ + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01); + udelay(1000); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00); + udelay(1000); + + rc = vco_set_rate(vco, rate); + + mdss_pll_resource_enable(rsc, false); + + return 0; +} + +long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long rrate = rate; + struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw); + + if (rate < vco->min_rate) + rrate = vco->min_rate; + if (rate > vco->max_rate) + rrate = vco->max_rate; + + *parent_rate = rrate; + + return rrate; +} + +unsigned long vco_28nm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw); + struct mdss_pll_resources *rsc = vco->priv; + int rc; + u64 vco_rate = 0; + + if (!rsc) { + pr_err("dsi pll resources not available\n"); + return 0; + } + + if (is_gdsc_disabled(rsc)) + return 0; + + rc = mdss_pll_resource_enable(rsc, true); + if (rc) { + pr_err("failed to enable dsi pll(%d) resources\n", + rsc->index); + return 0; + } + + if (dsi_pll_lock_status(rsc)) { + rsc->handoff_resources = true; + rsc->pll_on = true; + vco_rate = vco_get_rate(vco); + } else { + mdss_pll_resource_enable(rsc, false); + } + + return (unsigned long)vco_rate; +} + +int vco_28nm_prepare(struct clk_hw *hw) +{ + int rc = 0; + struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw); + struct mdss_pll_resources *rsc = vco->priv; + + if (!rsc) { + pr_err("dsi pll resources not available\n"); + return -EINVAL; + } + + if ((rsc->vco_cached_rate != 0) + && (rsc->vco_cached_rate == clk_hw_get_rate(hw))) { + rc = hw->init->ops->set_rate(hw, rsc->vco_cached_rate, + rsc->vco_cached_rate); + if (rc) { + pr_err("pll(%d ) set_rate failed. rc=%d\n", + rsc->index, rc); + goto error; + } + + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, + rsc->cached_postdiv1); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, + rsc->cached_postdiv3); + MDSS_PLL_REG_W(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, + rsc->cached_vreg_cfg); + } + + rc = dsi_pll_enable(vco); + +error: + return rc; +} + +void vco_28nm_unprepare(struct clk_hw *hw) +{ + struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw); + struct mdss_pll_resources *rsc = vco->priv; + + if (!rsc) { + pr_err("dsi pll resources not available\n"); + return; + } + + rsc->cached_postdiv1 = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG); + rsc->cached_postdiv3 = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG); + rsc->cached_vreg_cfg = MDSS_PLL_REG_R(rsc->pll_base, + DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG); + + rsc->vco_cached_rate = clk_hw_get_rate(hw); + + dsi_pll_disable(vco); +} diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h new file mode 100644 index 0000000000000000000000000000000000000000..b2ab4196ce472388280e932bce3c60f6c4c8768e --- /dev/null +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MDSS_DSI_PLL_28NM_H +#define __MDSS_DSI_PLL_28NM_H + +#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020) +#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2 (0x0064) +#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG (0x0068) +#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1 (0x0070) + +#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG (0x0004) +#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG (0x0028) +#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG (0x0010) + +struct ssc_params { + s32 kdiv; + s64 triang_inc_7_0; + s64 triang_inc_9_8; + s64 triang_steps; + s64 dc_offset; + s64 freq_seed_7_0; + s64 freq_seed_15_8; +}; + +struct mdss_dsi_vco_calc { + s64 sdm_cfg0; + s64 sdm_cfg1; + s64 sdm_cfg2; + s64 sdm_cfg3; + s64 cal_cfg10; + s64 cal_cfg11; + s64 refclk_cfg; + s64 gen_vco_clk; + u32 lpfr_lut_res; + struct ssc_params ssc; +}; + +unsigned long vco_28nm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate); +int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); +long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate); +int vco_28nm_prepare(struct clk_hw *hw); +void vco_28nm_unprepare(struct clk_hw *hw); + +int analog_postdiv_reg_write(void *context, + unsigned int reg, unsigned int div); +int analog_postdiv_reg_read(void *context, + unsigned int reg, unsigned int *div); +int byteclk_mux_write_sel(void *context, + unsigned int reg, unsigned int val); +int byteclk_mux_read_sel(void *context, + unsigned int reg, unsigned int *val); +int pixel_clk_set_div(void *context, + unsigned int reg, unsigned int div); +int pixel_clk_get_div(void *context, + unsigned int reg, unsigned int *div); + +int dsi_pll_lock_status(struct mdss_pll_resources *rsc); +#endif /* __MDSS_DSI_PLL_28NM_H */ diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c deleted file mode 100644 index 4e66b7837eb85bb368f133671a2d75c7b0557fb6..0000000000000000000000000000000000000000 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c +++ /dev/null @@ -1,587 +0,0 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) "%s: " fmt, __func__ - -#include -#include -#include -#include -#include - -#include "mdss-pll.h" -#include "mdss-dsi-pll.h" - -#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG (0x0) -#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG (0x0004) -#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG (0x0008) -#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG (0x000C) -#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG (0x0010) -#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG (0x0014) -#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG (0x0024) -#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG (0x0028) -#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG (0x002C) -#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG (0x0030) -#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG (0x0034) -#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0 (0x0038) -#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1 (0x003C) -#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2 (0x0040) -#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3 (0x0044) -#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4 (0x0048) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0 (0x006C) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2 (0x0074) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3 (0x0078) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4 (0x007C) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5 (0x0080) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6 (0x0084) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7 (0x0088) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8 (0x008C) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9 (0x0090) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10 (0x0094) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11 (0x0098) -#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG (0x009C) -#define DSI_PHY_PLL_UNIPHY_PLL_STATUS (0x00C0) - -#define DSI_PLL_POLL_DELAY_US 50 -#define DSI_PLL_POLL_TIMEOUT_US 500 - -int set_byte_mux_sel(struct mux_clk *clk, int sel) -{ - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - pr_debug("byte mux set to %s mode\n", sel ? "indirect" : "direct"); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, (sel << 1)); - - return 0; -} - -int get_byte_mux_sel(struct mux_clk *clk) -{ - int mux_mode, rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return 0; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG) & BIT(1); - - pr_debug("byte mux mode = %s", mux_mode ? "indirect" : "direct"); - mdss_pll_resource_enable(dsi_pll_res, false); - - return !!mux_mode; -} - -int dsi_pll_div_prepare(struct clk *c) -{ - struct div_clk *div = to_div_clk(c); - /* Restore the divider's value */ - return div->ops->set_div(div, div->data.div); -} - -int dsi_pll_mux_prepare(struct clk *c) -{ - struct mux_clk *mux = to_mux_clk(c); - int i, rc, sel = 0; - struct mdss_pll_resources *dsi_pll_res = mux->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - for (i = 0; i < mux->num_parents; i++) - if (mux->parents[i].src == c->parent) { - sel = mux->parents[i].sel; - break; - } - - if (i == mux->num_parents) { - pr_err("Failed to select the parent clock\n"); - rc = -EINVAL; - goto error; - } - - /* Restore the mux source select value */ - rc = mux->ops->set_mux_sel(mux, sel); - -error: - mdss_pll_resource_enable(dsi_pll_res, false); - return rc; -} - -int fixed_4div_set_div(struct div_clk *clk, int div) -{ - int rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1)); - - mdss_pll_resource_enable(dsi_pll_res, false); - return rc; -} - -int fixed_4div_get_div(struct div_clk *clk) -{ - int div = 0, rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return 0; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - div = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG); - - mdss_pll_resource_enable(dsi_pll_res, false); - return div + 1; -} - -int digital_set_div(struct div_clk *clk, int div) -{ - int rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, (div - 1)); - - mdss_pll_resource_enable(dsi_pll_res, false); - return rc; -} - -int digital_get_div(struct div_clk *clk) -{ - int div = 0, rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return 0; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - div = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG); - - mdss_pll_resource_enable(dsi_pll_res, false); - return div + 1; -} - -int analog_set_div(struct div_clk *clk, int div) -{ - int rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, div - 1); - - mdss_pll_resource_enable(dsi_pll_res, false); - return rc; -} - -int analog_get_div(struct div_clk *clk) -{ - int div = 0, rc; - struct mdss_pll_resources *dsi_pll_res = clk->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return 0; - - rc = mdss_pll_resource_enable(clk->priv, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - div = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG) + 1; - - mdss_pll_resource_enable(dsi_pll_res, false); - - return div; -} - -int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res) -{ - u32 status; - int pll_locked; - - /* poll for PLL ready status */ - if (readl_poll_timeout_atomic((dsi_pll_res->pll_base + - DSI_PHY_PLL_UNIPHY_PLL_STATUS), - status, - ((status & BIT(0)) == 1), - DSI_PLL_POLL_DELAY_US, - DSI_PLL_POLL_TIMEOUT_US)) { - pr_debug("DSI PLL status=%x failed to Lock\n", status); - pll_locked = 0; - } else { - pll_locked = 1; - } - - return pll_locked; -} - -int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate) -{ - s64 vco_clk_rate = rate; - s32 rem; - s64 refclk_cfg, frac_n_mode, ref_doubler_en_b; - s64 ref_clk_to_pll, div_fbx1000, frac_n_value; - s64 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3; - s64 gen_vco_clk, cal_cfg10, cal_cfg11; - u32 res; - int i; - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - /* Configure the Loop filter resistance */ - for (i = 0; i < vco->lpfr_lut_size; i++) - if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate) - break; - if (i == vco->lpfr_lut_size) { - pr_err("unable to get loop filter resistance. vco=%ld\n", rate); - return -EINVAL; - } - res = vco->lpfr_lut[i].r; - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG, res); - - /* Loop filter capacitance values : c1 and c2 */ - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15); - - div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem); - if (rem) { - refclk_cfg = 0x1; - frac_n_mode = 1; - ref_doubler_en_b = 0; - } else { - refclk_cfg = 0x0; - frac_n_mode = 0; - ref_doubler_en_b = 1; - } - - pr_debug("refclk_cfg = %lld\n", refclk_cfg); - - ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (refclk_cfg)) - + (ref_doubler_en_b * vco->ref_clk_rate)); - div_fbx1000 = div_s64((vco_clk_rate * 1000), ref_clk_to_pll); - - div_s64_rem(div_fbx1000, 1000, &rem); - frac_n_value = div_s64((rem * (1 << 16)), 1000); - gen_vco_clk = div_s64(div_fbx1000 * ref_clk_to_pll, 1000); - - pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll); - pr_debug("div_fb = %lld\n", div_fbx1000); - pr_debug("frac_n_value = %lld\n", frac_n_value); - - pr_debug("Generated VCO Clock: %lld\n", gen_vco_clk); - rem = 0; - if (frac_n_mode) { - sdm_cfg0 = (0x0 << 5); - sdm_cfg0 |= (0x0 & 0x3f); - sdm_cfg1 = (div_s64(div_fbx1000, 1000) & 0x3f) - 1; - sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem); - sdm_cfg2 = rem; - } else { - sdm_cfg0 = (0x1 << 5); - sdm_cfg0 |= (div_s64(div_fbx1000, 1000) & 0x3f) - 1; - sdm_cfg1 = (0x0 & 0x3f); - sdm_cfg2 = 0; - sdm_cfg3 = 0; - } - - pr_debug("sdm_cfg0=%lld\n", sdm_cfg0); - pr_debug("sdm_cfg1=%lld\n", sdm_cfg1); - pr_debug("sdm_cfg2=%lld\n", sdm_cfg2); - pr_debug("sdm_cfg3=%lld\n", sdm_cfg3); - - cal_cfg11 = div_s64_rem(gen_vco_clk, 256 * 1000000, &rem); - cal_cfg10 = rem / 1000000; - pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n", cal_cfg10, cal_cfg11); - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d); - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1, (u32)(sdm_cfg1 & 0xff)); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2, (u32)(sdm_cfg2 & 0xff)); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3, (u32)(sdm_cfg3 & 0xff)); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00); - - /* Add hardware recommended delay for correct PLL configuration */ - if (dsi_pll_res->vco_delay) - udelay(dsi_pll_res->vco_delay); - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, (u32)refclk_cfg); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0, (u32)sdm_cfg0); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10, (u32)(cal_cfg10 & 0xff)); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11, (u32)(cal_cfg11 & 0xff)); - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20); - - return 0; -} - -unsigned long vco_get_rate(struct clk *c) -{ - u32 sdm0, doubler, sdm_byp_div; - u64 vco_rate; - u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - u64 ref_clk = vco->ref_clk_rate; - int rc; - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return 0; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - /* Check to see if the ref clk doubler is enabled */ - doubler = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0); - ref_clk += (doubler * vco->ref_clk_rate); - - /* see if it is integer mode or sdm mode */ - sdm0 = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0); - if (sdm0 & BIT(6)) { - /* integer mode */ - sdm_byp_div = (MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1; - vco_rate = ref_clk * sdm_byp_div; - } else { - /* sdm mode */ - sdm_dc_off = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF; - pr_debug("sdm_dc_off = %d\n", sdm_dc_off); - sdm2 = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF; - sdm3 = MDSS_PLL_REG_R(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF; - sdm_freq_seed = (sdm3 << 8) | sdm2; - pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed); - - vco_rate = (ref_clk * (sdm_dc_off + 1)) + - mult_frac(ref_clk, sdm_freq_seed, BIT(16)); - pr_debug("vco rate = %lld", vco_rate); - } - - pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate); - - mdss_pll_resource_enable(dsi_pll_res, false); - - return (unsigned long)vco_rate; -} - -static int dsi_pll_enable(struct clk *c) -{ - int i, rc; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return rc; - } - - /* Try all enable sequences until one succeeds */ - for (i = 0; i < vco->pll_en_seq_cnt; i++) { - rc = vco->pll_enable_seqs[i](dsi_pll_res); - pr_debug("DSI PLL %s after sequence #%d\n", - rc ? "unlocked" : "locked", i + 1); - if (!rc) - break; - } - - if (rc) { - mdss_pll_resource_enable(dsi_pll_res, false); - pr_err("DSI PLL failed to lock\n"); - } - dsi_pll_res->pll_on = true; - - return rc; -} - -static void dsi_pll_disable(struct clk *c) -{ - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - if (!dsi_pll_res->pll_on && - mdss_pll_resource_enable(dsi_pll_res, true)) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return; - } - - dsi_pll_res->handoff_resources = false; - - MDSS_PLL_REG_W(dsi_pll_res->pll_base, - DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00); - - mdss_pll_resource_enable(dsi_pll_res, false); - dsi_pll_res->pll_on = false; - - pr_debug("DSI PLL Disabled\n"); -} - -long vco_round_rate(struct clk *c, unsigned long rate) -{ - unsigned long rrate = rate; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - - if (rate < vco->min_rate) - rrate = vco->min_rate; - if (rate > vco->max_rate) - rrate = vco->max_rate; - - return rrate; -} - -enum handoff vco_handoff(struct clk *c) -{ - int rc; - enum handoff ret = HANDOFF_DISABLED_CLK; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - if (is_gdsc_disabled(dsi_pll_res)) - return HANDOFF_DISABLED_CLK; - - rc = mdss_pll_resource_enable(dsi_pll_res, true); - if (rc) { - pr_err("Failed to enable mdss dsi pll resources\n"); - return ret; - } - - if (dsi_pll_lock_status(dsi_pll_res)) { - dsi_pll_res->handoff_resources = true; - dsi_pll_res->pll_on = true; - c->rate = vco_get_rate(c); - ret = HANDOFF_ENABLED_CLK; - } else { - mdss_pll_resource_enable(dsi_pll_res, false); - } - - return ret; -} - -int vco_prepare(struct clk *c) -{ - int rc = 0; - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - if (!dsi_pll_res) { - pr_err("Dsi pll resources are not available\n"); - return -EINVAL; - } - - if ((dsi_pll_res->vco_cached_rate != 0) - && (dsi_pll_res->vco_cached_rate == c->rate)) { - rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate); - if (rc) { - pr_err("vco_set_rate failed. rc=%d\n", rc); - goto error; - } - } - - rc = dsi_pll_enable(c); - -error: - return rc; -} - -void vco_unprepare(struct clk *c) -{ - struct dsi_pll_vco_clk *vco = to_vco_clk(c); - struct mdss_pll_resources *dsi_pll_res = vco->priv; - - if (!dsi_pll_res) { - pr_err("Dsi pll resources are not available\n"); - return; - } - - dsi_pll_res->vco_cached_rate = c->rate; - dsi_pll_disable(c); -} - diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h index 1edcb8afc11c78ac01b6e40a007650c722c50f76..42d3a1188ccc4c197da1b545ab3bc90cdad6c6aa 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h @@ -17,11 +17,6 @@ #include "mdss-pll.h" #define MAX_DSI_PLL_EN_SEQS 10 -#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020) -#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2 (0x0064) -#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG (0x0068) -#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1 (0x0070) - /* Register offsets for 20nm PHY PLL */ #define MMSS_DSI_PHY_PLL_PLL_CNTRL (0x0014) #define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN (0x002C) @@ -51,6 +46,8 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev, int dsi_pll_clock_register_7nm(struct platform_device *pdev, struct mdss_pll_resources *pll_res); +int dsi_pll_clock_register_28lpm(struct platform_device *pdev, + struct mdss_pll_resources *pll_res); static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw) { diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c index d31d7c2a7f0a3c4100e20fbb92e7a7d2486d5783..8ff3567f1f15803d132a8225f877803dc25b175e 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.c +++ b/drivers/clk/qcom/mdss/mdss-pll.c @@ -133,6 +133,8 @@ static int mdss_pll_resource_parse(struct platform_device *pdev, pll_res->pll_interface_type = MDSS_DP_PLL_7NM; else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm")) pll_res->pll_interface_type = MDSS_DSI_PLL_7NM; + else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm")) + pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM; else goto err; @@ -167,6 +169,9 @@ static int mdss_pll_clock_register(struct platform_device *pdev, case MDSS_DP_PLL_7NM: rc = dp_pll_clock_register_7nm(pdev, pll_res); break; + case MDSS_DSI_PLL_28LPM: + rc = dsi_pll_clock_register_28lpm(pdev, pll_res); + break; case MDSS_UNKNOWN_PLL: default: rc = -EINVAL; @@ -397,6 +402,7 @@ static const struct of_device_id mdss_pll_dt_match[] = { {.compatible = "qcom,mdss_dp_pll_10nm"}, {.compatible = "qcom,mdss_dsi_pll_7nm"}, {.compatible = "qcom,mdss_dp_pll_7nm"}, + {.compatible = "qcom,mdss_dsi_pll_28lpm"}, {} }; diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h index 2c27ddaa1a31f794e72c00ea535a9011cb2b3731..48c57f79eb5efef2866a254837a20d91e238c9d7 100644 --- a/drivers/clk/qcom/mdss/mdss-pll.h +++ b/drivers/clk/qcom/mdss/mdss-pll.h @@ -12,7 +12,7 @@ #ifndef __MDSS_PLL_H #define __MDSS_PLL_H -#include + #include #include #include @@ -22,6 +22,11 @@ #include "../clk-regmap-divider.h" #include "../clk-regmap-mux.h" +#if defined(CONFIG_DRM) +#include +#else +#include +#endif #define MDSS_PLL_REG_W(base, offset, data) \ writel_relaxed((data), (base) + (offset)) @@ -40,6 +45,7 @@ enum { MDSS_DP_PLL_10NM, MDSS_DSI_PLL_7NM, MDSS_DP_PLL_7NM, + MDSS_DSI_PLL_28LPM, MDSS_UNKNOWN_PLL, }; @@ -103,6 +109,10 @@ struct mdss_pll_resources { u32 cached_cfg1; u32 cached_outdiv; + u32 cached_postdiv1; + u32 cached_postdiv3; + u32 cached_vreg_cfg; + /* dsi/edp/hmdi pll interface type */ u32 pll_interface_type; diff --git a/drivers/clk/qcom/vdd-level-sm6150.h b/drivers/clk/qcom/vdd-level-sm6150.h new file mode 100644 index 0000000000000000000000000000000000000000..88397238c7c16b746d371e78bdf604b691432deb --- /dev/null +++ b/drivers/clk/qcom/vdd-level-sm6150.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_SM6150_H +#define __DRIVERS_CLK_QCOM_VDD_LEVEL_SM6150_H + +#include +#include + +enum vdd_mx_levels { + VDD_MX_NONE, + VDD_MX_MIN, /* MIN SVS */ + VDD_MX_LOWER, /* SVS2 */ + VDD_MX_LOW, /* SVS */ + VDD_MX_LOW_L1, /* SVSL1 */ + VDD_MX_NOMINAL, /* NOM */ + VDD_MX_HIGH, /* TURBO */ + VDD_MX_NUM, +}; + +static int vdd_mx_corner[] = { + RPMH_REGULATOR_LEVEL_OFF, /* VDD_NONE */ + RPMH_REGULATOR_LEVEL_MIN_SVS, /* VDD_MIN */ + RPMH_REGULATOR_LEVEL_LOW_SVS, /* VDD_LOWER */ + RPMH_REGULATOR_LEVEL_SVS, /* VDD_LOW */ + RPMH_REGULATOR_LEVEL_SVS_L1, /* VDD_LOW_L1 */ + RPMH_REGULATOR_LEVEL_NOM, /* VDD_NOMINAL */ + RPMH_REGULATOR_LEVEL_TURBO, /* VDD_HIGH */ +}; + +enum vdd_levels { + VDD_NONE, + VDD_MIN, /* MIN SVS */ + VDD_LOWER, /* SVS2 */ + VDD_LOW, /* SVS */ + VDD_LOW_L1, /* SVSL1 */ + VDD_NOMINAL, /* NOM */ + VDD_NOMINAL_L1, /* NOM1 */ + VDD_HIGH, /* TURBO */ + VDD_HIGH_L1, /* TURBO1 */ + VDD_NUM, +}; + +static int vdd_corner[] = { + RPMH_REGULATOR_LEVEL_OFF, /* VDD_NONE */ + RPMH_REGULATOR_LEVEL_MIN_SVS, /* VDD_MIN */ + RPMH_REGULATOR_LEVEL_LOW_SVS, /* VDD_LOWER */ + RPMH_REGULATOR_LEVEL_SVS, /* VDD_LOW */ + RPMH_REGULATOR_LEVEL_SVS_L1, /* VDD_LOW_L1 */ + RPMH_REGULATOR_LEVEL_NOM, /* VDD_NOMINAL */ + RPMH_REGULATOR_LEVEL_NOM_L1, /* VDD_NOMINAL_L1 */ + RPMH_REGULATOR_LEVEL_TURBO, /* VDD_HIGH */ + RPMH_REGULATOR_LEVEL_TURBO_L1, /* VDD_HIGH_L1 */ + RPMH_REGULATOR_LEVEL_MAX, /* VDD_MAX */ +}; + +#endif diff --git a/drivers/clk/qcom/videocc-sm6150.c b/drivers/clk/qcom/videocc-sm6150.c new file mode 100644 index 0000000000000000000000000000000000000000..0e3d9825f50e2f753e988431fb1137a96ac78af5 --- /dev/null +++ b/drivers/clk/qcom/videocc-sm6150.c @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "clk: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "vdd-level-sm6150.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner); + +enum { + P_BI_TCXO, + P_CHIP_SLEEP_CLK, + P_CORE_BI_PLL_TEST_SE, + P_VIDEO_PLL0_OUT_AUX, + P_VIDEO_PLL0_OUT_AUX2, + P_VIDEO_PLL0_OUT_MAIN, +}; + +static const struct parent_map video_cc_parent_map_0[] = { + { P_CHIP_SLEEP_CLK, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const video_cc_parent_names_0[] = { + "chip_sleep_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map video_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_VIDEO_PLL0_OUT_MAIN, 1 }, + { P_VIDEO_PLL0_OUT_AUX, 2 }, + { P_VIDEO_PLL0_OUT_AUX2, 3 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const video_cc_parent_names_1[] = { + "bi_tcxo", + "video_pll0_out_main", + "video_pll0_out_aux", + "video_pll0_out_aux2", + "core_bi_pll_test_se", +}; + +static const struct parent_map video_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const video_cc_parent_names_2[] = { + "bi_tcxo", + "core_bi_pll_test_se", +}; + +static struct pll_vco video_cc_pll_vco[] = { + { 500000000, 1000000000, 2 }, +}; + +/* 600MHz configuration */ +static const struct alpha_pll_config video_pll0_config = { + .l = 0x1F, + .alpha_u = 0x40, + .alpha = 0x00, + .alpha_en_mask = BIT(24), + .vco_val = 0x2 << 20, + .vco_mask = 0x3 << 20, + .main_output_mask = BIT(0), + .config_ctl_val = 0x4001055b, + .user_ctl_val = 0x00000001, + .user_ctl_hi_val = 0x00000004, +}; + +static struct clk_alpha_pll video_pll0_out_main = { + .offset = 0x42c, + .vco_table = video_cc_pll_vco, + .num_vco = ARRAY_SIZE(video_cc_pll_vco), + .flags = SUPPORTS_DYNAMIC_UPDATE, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "video_pll0_out_main", + .parent_names = (const char *[]){ "bi_tcxo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_MIN] = 1000000000, + [VDD_NOMINAL] = 2000000000}, + }, + }, +}; + +/* chip_sleep_clk is external clocks running at 32000Hz */ +static struct clk_fixed_rate chip_sleep_clk = { + .fixed_rate = 32000, + .hw.init = &(struct clk_init_data){ + .name = "chip_sleep_clk", + .ops = &clk_fixed_rate_ops, + }, +}; + +static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = { + F(32000, P_CHIP_SLEEP_CLK, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_sleep_clk_src = { + .cmd_rcgr = 0xaf8, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_0, + .freq_tbl = ftbl_video_cc_sleep_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_sleep_clk_src", + .parent_names = video_cc_parent_names_0, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 32000}, + }, +}; + +static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(133333333, P_VIDEO_PLL0_OUT_MAIN, 4.5, 0, 0), + F(240000000, P_VIDEO_PLL0_OUT_MAIN, 2.5, 0, 0), + F(300000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(380000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(410000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + F(460000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_venus_clk_src = { + .cmd_rcgr = 0x7f0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_1, + .freq_tbl = ftbl_video_cc_venus_clk_src, + .enable_safe_config = true, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_venus_clk_src", + .parent_names = video_cc_parent_names_1, + .num_parents = 5, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 133333333, + [VDD_LOW] = 240000000, + [VDD_LOW_L1] = 300000000, + [VDD_NOMINAL] = 380000000, + [VDD_NOMINAL_L1] = 410000000, + [VDD_HIGH] = 460000000}, + }, +}; + +static const struct freq_tbl ftbl_video_cc_xo_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 video_cc_xo_clk_src = { + .cmd_rcgr = 0xa98, + .mnd_width = 0, + .hid_width = 5, + .parent_map = video_cc_parent_map_2, + .freq_tbl = ftbl_video_cc_xo_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "video_cc_xo_clk_src", + .parent_names = video_cc_parent_names_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + .vdd_class = &vdd_cx, + .num_rate_max = VDD_NUM, + .rate_max = (unsigned long[VDD_NUM]) { + [VDD_LOWER] = 19200000}, + }, +}; + +static struct clk_branch video_cc_apb_clk = { + .halt_reg = 0x990, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x990, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_apb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_sleep_clk = { + .halt_reg = 0xb18, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb18, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_sleep_clk", + .parent_names = (const char *[]){ + "video_cc_sleep_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_vcodec0_axi_clk = { + .halt_reg = 0x8f0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8f0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_vcodec0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_vcodec0_core_clk = { + .halt_reg = 0x890, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x890, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_vcodec0_core_clk", + .parent_names = (const char *[]){ + "video_cc_venus_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_venus_ahb_clk = { + .halt_reg = 0x9b0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9b0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_venus_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_venus_ctl_axi_clk = { + .halt_reg = 0x8d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8d0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_venus_ctl_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_venus_ctl_core_clk = { + .halt_reg = 0x850, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x850, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_venus_ctl_core_clk", + .parent_names = (const char *[]){ + "video_cc_venus_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch video_cc_xo_clk = { + .halt_reg = 0xab8, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "video_cc_xo_clk", + .parent_names = (const char *[]){ + "video_cc_xo_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +struct clk_hw *video_cc_sm6150_hws[] = { + [CHIP_SLEEP_CLK] = &chip_sleep_clk.hw, +}; + +static struct clk_regmap *video_cc_sm6150_clocks[] = { + [VIDEO_CC_APB_CLK] = &video_cc_apb_clk.clkr, + [VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr, + [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr, + [VIDEO_CC_VCODEC0_AXI_CLK] = &video_cc_vcodec0_axi_clk.clkr, + [VIDEO_CC_VCODEC0_CORE_CLK] = &video_cc_vcodec0_core_clk.clkr, + [VIDEO_CC_VENUS_AHB_CLK] = &video_cc_venus_ahb_clk.clkr, + [VIDEO_CC_VENUS_CLK_SRC] = &video_cc_venus_clk_src.clkr, + [VIDEO_CC_VENUS_CTL_AXI_CLK] = &video_cc_venus_ctl_axi_clk.clkr, + [VIDEO_CC_VENUS_CTL_CORE_CLK] = &video_cc_venus_ctl_core_clk.clkr, + [VIDEO_CC_XO_CLK] = &video_cc_xo_clk.clkr, + [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr, + [VIDEO_PLL0_OUT_MAIN] = &video_pll0_out_main.clkr, +}; + +static const struct regmap_config video_cc_sm6150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xb94, + .fast_io = true, +}; + +static const struct qcom_cc_desc video_cc_sm6150_desc = { + .config = &video_cc_sm6150_regmap_config, + .clks = video_cc_sm6150_clocks, + .num_clks = ARRAY_SIZE(video_cc_sm6150_clocks), + .hwclks = video_cc_sm6150_hws, + .num_hwclks = ARRAY_SIZE(video_cc_sm6150_hws), +}; + +static const struct of_device_id video_cc_sm6150_match_table[] = { + { .compatible = "qcom,videocc-sm6150" }, + { } +}; +MODULE_DEVICE_TABLE(of, video_cc_sm6150_match_table); + +static int video_cc_sm6150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret = 0; + + vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx"); + if (IS_ERR(vdd_cx.regulator[0])) { + if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_cx regulator\n"); + return PTR_ERR(vdd_cx.regulator[0]); + } + + regmap = qcom_cc_map(pdev, &video_cc_sm6150_desc); + if (IS_ERR(regmap)) { + pr_err("Failed to map the video_cc registers\n"); + return PTR_ERR(regmap); + } + + clk_alpha_pll_configure(&video_pll0_out_main, regmap, + &video_pll0_config); + + ret = qcom_cc_really_probe(pdev, &video_cc_sm6150_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register Video CC clocks\n"); + return ret; + } + + dev_info(&pdev->dev, "Registered Video CC clocks\n"); + + return ret; +} + +static struct platform_driver video_cc_sm6150_driver = { + .probe = video_cc_sm6150_probe, + .driver = { + .name = "video_cc-sm6150", + .of_match_table = video_cc_sm6150_match_table, + }, +}; + +static int __init video_cc_sm6150_init(void) +{ + return platform_driver_register(&video_cc_sm6150_driver); +} +subsys_initcall(video_cc_sm6150_init); + +static void __exit video_cc_sm6150_exit(void) +{ + platform_driver_unregister(&video_cc_sm6150_driver); +} +module_exit(video_cc_sm6150_exit); + +MODULE_DESCRIPTION("QTI VIDEO_CC sm6150 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:video_cc-sm6150"); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index e580a5e6346c2533ab34dd12cda0dcabe7369476..30c23b882675a18e38ca099238bc3e35cb71c635 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -248,8 +248,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec, dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, PTR_ERR(clk)); else - dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n", - clkspec->args[0], clkspec->args[1], clk, clk); + dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", + clkspec->args[0], clkspec->args[1], clk, + clk_get_rate(clk)); return clk; } @@ -314,7 +315,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core, if (IS_ERR_OR_NULL(clk)) goto fail; - dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk); + dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); priv->clks[id] = clk; return; @@ -380,7 +381,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, if (IS_ERR(clk)) goto fail; - dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk); + dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); priv->clks[id] = clk; return; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index cc6062049170eb0b5a8001955497ed59cfa8c639..4bb47be12c9e42b949bb9f6a20181ca910a31833 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -374,6 +374,14 @@ config ARM64_ERRATUM_858921 The workaround will be dynamically enabled when an affected core is detected. +config ARM_ARCH_TIMER_VCT_ACCESS + bool "Support for ARM architected timer virtual counter access in userspace" + default !ARM64 + depends on ARM_ARCH_TIMER + help + This option enables support for reading the ARM architected timer's + virtual counter in userspace. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select TIMER_OF if OF diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index f8df0d8bb3014323426e60e452fbf43f0b4f9db2..dd9614496f536e0558e5842cd6551208b6517022 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -777,7 +777,7 @@ static void arch_counter_set_user_access(void) */ if (arch_timer_this_cpu_has_cntvct_wa()) pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); - else + else if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS)) cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; arch_timer_set_cntkctl(cntkctl); diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 557ed25b42e3b77581a6862de92c2eeeb8d2a291..d175b9545581c25f0a24d96bc4b41a5c924b9e9e 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -20,6 +20,7 @@ #define TPM_SC 0x10 #define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3) #define TPM_SC_CMOD_DIV_DEFAULT 0x3 +#define TPM_SC_TOF_MASK (0x1 << 7) #define TPM_CNT 0x14 #define TPM_MOD 0x18 #define TPM_STATUS 0x1c @@ -29,6 +30,7 @@ #define TPM_C0SC_MODE_SHIFT 2 #define TPM_C0SC_MODE_MASK 0x3c #define TPM_C0SC_MODE_SW_COMPARE 0x4 +#define TPM_C0SC_CHF_MASK (0x1 << 7) #define TPM_C0V 0x24 static void __iomem *timer_base; @@ -205,9 +207,13 @@ static int __init tpm_timer_init(struct device_node *np) * 4) Channel0 disabled * 5) DMA transfers disabled */ + /* make sure counter is disabled */ writel(0, timer_base + TPM_SC); + /* TOF is W1C */ + writel(TPM_SC_TOF_MASK, timer_base + TPM_SC); writel(0, timer_base + TPM_CNT); - writel(0, timer_base + TPM_C0SC); + /* CHF is W1C */ + writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC); /* increase per cnt, div 8 by default */ writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index becc3cd0923eee3ad63adc0fb12ea01b34f2a6c7..55abc5474756d9c1f17526b79e1c29c65312208a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -697,6 +697,8 @@ static ssize_t store_##file_name \ struct cpufreq_policy new_policy; \ \ memcpy(&new_policy, policy, sizeof(*policy)); \ + new_policy.min = policy->user_policy.min; \ + new_policy.max = policy->user_policy.max; \ \ new_policy.min = new_policy.user_policy.min; \ new_policy.max = new_policy.user_policy.max; \ diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index ca38229b045ab288a2f250dddaf1b174e8c0572f..43e14bb512c8da4cd2c0f8a73e37a1fe1205a170 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely(time_elapsed > 2 * sampling_rate && + } else if (unlikely((int)idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * clear prev_load to guarantee that the load will be * computed again next time. * - * Detecting this situation is easy: the governor's - * utilization update handler would not have run during - * CPU-idle periods. Hence, an unusually large - * 'time_elapsed' (as compared to the sampling rate) + * Detecting this situation is easy: an unusually large + * 'idle_time' (as compared to the sampling rate) * indicates this scenario. */ load = j_cdbs->prev_load; @@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) j_cdbs->prev_load = load; } - if (time_elapsed > 2 * sampling_rate) { - unsigned int periods = time_elapsed / sampling_rate; + if (unlikely((int)idle_time > 2 * sampling_rate)) { + unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) idle_periods = periods; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 20226d4243f2342dd83dd17ab49fa9a0a72d9e42..a905bbb45667b55ed0ba9e1fa614e6e26c2c5ddb 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -285,6 +285,7 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; static int hwp_active __read_mostly; +static int hwp_mode_bdw __read_mostly; static bool per_cpu_limits __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; @@ -1371,7 +1372,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); cpu->pstate.scaling = pstate_funcs.get_scaling(); cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + + if (hwp_active && !hwp_mode_bdw) { + unsigned int phy_max, current_max; + + intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); + cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + } else { + cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + } if (pstate_funcs.get_aperf_mperf_shift) cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); @@ -2261,28 +2270,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; } static inline void intel_pstate_request_control_from_smm(void) {} #endif /* CONFIG_ACPI */ +#define INTEL_PSTATE_HWP_BROADWELL 0x01 + +#define ICPU_HWP(model, hwp_mode) \ + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode } + static const struct x86_cpu_id hwp_support_ids[] __initconst = { - { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, + ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL), + ICPU_HWP(X86_MODEL_ANY, 0), {} }; static int __init intel_pstate_init(void) { + const struct x86_cpu_id *id; int rc; if (no_load) return -ENODEV; - if (x86_match_cpu(hwp_support_ids)) { + id = x86_match_cpu(hwp_support_ids); + if (id) { copy_cpu_funcs(&core_funcs); if (!no_hwp) { hwp_active++; + hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; goto hwp_cpu_matched; } } else { - const struct x86_cpu_id *id; - id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) return -ENODEV; diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c index 7f7626f29a792e4eee4be1076cc15f50ca914fb9..735db9250a16bf12768085b831112c2410cb861c 100644 --- a/drivers/cpufreq/qcom-cpufreq.c +++ b/drivers/cpufreq/qcom-cpufreq.c @@ -457,7 +457,7 @@ static int msm_cpufreq_probe(struct platform_device *pdev) if (!IS_ERR(ftbl)) { for_each_possible_cpu(cpu) per_cpu(freq_table, cpu) = ftbl; - return 0; + goto out_register; } /* @@ -497,6 +497,7 @@ static int msm_cpufreq_probe(struct platform_device *pdev) per_cpu(freq_table, cpu) = ftbl; } +out_register: ret = register_pm_notifier(&msm_cpufreq_pm_notifier); if (ret) return ret; diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index e06605b21841acae0d1d4b5d5d6ea963a7d732b8..1d7d5d121d55bbd3cf70cae9904223c4831ee1bc 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -43,9 +43,31 @@ struct stop_psscr_table { static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly; -static u64 snooze_timeout __read_mostly; +static u64 default_snooze_timeout __read_mostly; static bool snooze_timeout_en __read_mostly; +static u64 get_snooze_timeout(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int i; + + if (unlikely(!snooze_timeout_en)) + return default_snooze_timeout; + + for (i = index + 1; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable) + continue; + + return s->target_residency * tb_ticks_per_usec; + } + + return default_snooze_timeout; +} + static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev, local_irq_enable(); - snooze_exit_time = get_tb() + snooze_timeout; + snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index); ppc64_runlatch_off(); HMT_very_low(); while (!need_resched()) { @@ -463,11 +485,9 @@ static int powernv_idle_probe(void) cpuidle_state_table = powernv_states; /* Device tree can indicate more idle states */ max_idle_state = powernv_add_idle_states(); - if (max_idle_state > 1) { + default_snooze_timeout = TICK_USEC * tb_ticks_per_usec; + if (max_idle_state > 1) snooze_timeout_en = true; - snooze_timeout = powernv_states[1].target_residency * - tb_ticks_per_usec; - } } else return -ENODEV; diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index c7865bc273c60b7459e4e2632d8e8ed74b7caa1e..ced301631e3a4074ce5c1167afcd51247f75fd09 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -156,7 +156,7 @@ static uint32_t least_cluster_latency(struct lpm_cluster *cluster, uint32_t latency = 0; int i; - if (!cluster->list.next) { + if (list_empty(&cluster->list)) { for (i = 0; i < cluster->nlevels; i++) { level = &cluster->levels[i]; pwr_params = &level->pwr; diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8effad0e7d269fcf1835125f8107f0c4d0af3730..0359f0c484fcb2157b538720db267675fef1a229 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -770,4 +770,7 @@ config CRYPTO_DEV_ARTPEC6 To compile this driver as a module, choose M here. +if ARCH_QCOM +source drivers/crypto/msm/Kconfig +endif endif # CRYPTO_HW diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 54f3b375a453bbcfba6b9013020e2993004b24a1..a8a2a271b63d21224214e153545c09b25576953c 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -735,15 +735,18 @@ struct aead_edesc { * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @iv_dma: dma address of iv for checking continuity and link table + * @iv_dir: DMA mapping direction for IV * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables + * and IV */ struct ablkcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; + enum dma_data_direction iv_dir; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -753,7 +756,8 @@ struct ablkcipher_edesc { static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, - dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, + dma_addr_t iv_dma, int ivsize, + enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { @@ -765,7 +769,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); @@ -776,7 +780,7 @@ static void aead_unmap(struct device *dev, struct aead_request *req) { caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, 0, 0, + edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -789,7 +793,7 @@ static void ablkcipher_unmap(struct device *dev, caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, + edesc->iv_dma, ivsize, edesc->iv_dir, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } @@ -878,6 +882,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, ivsize, 0); + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->iv_dir == DMA_FROM_DEVICE) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) + + edesc->sec4_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } + kfree(edesc); ablkcipher_request_complete(req, err); @@ -888,10 +904,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; +#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -909,14 +925,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, edesc->dst_nents > 1 ? 100 : req->nbytes, 1); ablkcipher_unmap(jrdev, edesc, req); - - /* - * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. - */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, - ivsize, 0); - kfree(edesc); ablkcipher_request_complete(req, err); @@ -1057,15 +1065,14 @@ static void init_authenc_job(struct aead_request *req, */ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options = 0, in_options; - dma_addr_t dst_dma, src_dma; - int len, sec4_sg_index = 0; + u32 out_options = 0; + dma_addr_t dst_dma; + int len; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", @@ -1081,30 +1088,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - if (iv_contig) { - src_dma = edesc->iv_dma; - in_options = 0; - } else { - src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents + 1; - in_options = LDST_SGF; - } - append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); + append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, + LDST_SGF); if (likely(req->src == req->dst)) { - if (edesc->src_nents == 1 && iv_contig) { - dst_dma = sg_dma_address(req->src); - } else { - dst_dma = edesc->sec4_sg_dma + - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; } else { if (edesc->dst_nents == 1) { dst_dma = sg_dma_address(req->dst); } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); + dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * + sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } @@ -1116,13 +1111,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, */ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, struct ablkcipher_edesc *edesc, - struct ablkcipher_request *req, - bool iv_contig) + struct ablkcipher_request *req) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); u32 *desc = edesc->hw_desc; - u32 out_options, in_options; + u32 in_options; dma_addr_t dst_dma, src_dma; int len, sec4_sg_index = 0; @@ -1148,15 +1142,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, } append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); - if (iv_contig) { - dst_dma = edesc->iv_dma; - out_options = 0; - } else { - dst_dma = edesc->sec4_sg_dma + - sec4_sg_index * sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } - append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF); } /* @@ -1245,7 +1233,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, GFP_DMA | flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1449,8 +1437,7 @@ static int aead_decrypt(struct aead_request *req) * allocate and map the ablkcipher extended descriptor for ablkcipher */ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request - *req, int desc_bytes, - bool *iv_contig_out) + *req, int desc_bytes) { struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); @@ -1459,8 +1446,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool in_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1504,33 +1491,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - sec4_sg_ents = 0; - } else { - in_contig = false; - sec4_sg_ents = 1 + mapped_src_nents; - } + sec4_sg_ents = 1 + mapped_src_nents; dst_sg_idx = sec4_sg_ents; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - /* allocate space for base edesc and hw desc commands, link tables */ - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1539,13 +1513,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_TO_DEVICE; - if (!in_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, mapped_src_nents, - edesc->sec4_sg + 1, 0); + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); } + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); + if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + dst_sg_idx, 0); @@ -1556,7 +1541,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1569,7 +1554,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request sec4_sg_bytes, 1); #endif - *iv_contig_out = in_contig; return edesc; } @@ -1579,19 +1563,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_enc, - ctx->sh_desc_enc_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -1615,20 +1596,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + /* Create and submit job descriptor*/ - init_ablkcipher_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, iv_contig); + init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); desc = edesc->hw_desc; #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", @@ -1653,8 +1639,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) */ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( struct skcipher_givcrypt_request *greq, - int desc_bytes, - bool *iv_contig_out) + int desc_bytes) { struct ablkcipher_request *req = &greq->creq; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); @@ -1664,8 +1649,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; - dma_addr_t iv_dma = 0; - bool out_contig; + dma_addr_t iv_dma; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1710,36 +1695,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( } } - /* - * Check if iv can be contiguous with source and destination. - * If so, include it. If not, create scatterlist. - */ - iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = sec4_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - sec4_sg_ents += 1 + mapped_dst_nents; - } + sec4_sg_ents += 1 + mapped_dst_nents; - /* allocate space for base edesc and hw desc commands, link tables */ + /* + * allocate space for base edesc and hw desc commands, link tables, IV + */ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, GFP_DMA | flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1748,24 +1717,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; + edesc->iv_dir = DMA_FROM_DEVICE; + + /* Make sure IV is located in a DMAable area */ + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, + 0, DMA_NONE, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } if (mapped_src_nents > 1) sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, 0); - if (!out_contig) { - dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, - iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, - edesc->sec4_sg + dst_sg_idx + 1, 0); - } + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + + dst_sg_idx + 1, 0); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } @@ -1778,7 +1756,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( sec4_sg_bytes, 1); #endif - *iv_contig_out = out_contig; return edesc; } @@ -1789,19 +1766,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - bool iv_contig = false; u32 *desc; int ret = 0; /* allocate extended descriptor */ - edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &iv_contig); + edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, - edesc, req, iv_contig); + edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "ablkcipher jobdesc@" __stringify(__LINE__) ": ", diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index b648e31673f905e7a3758991a82a017628a8f083..e7966e37a5aaeeb9fe259b5a2c3d460892b14e85 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -401,7 +401,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct aead_edesc { int src_nents; @@ -412,9 +412,6 @@ struct aead_edesc { unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_AEAD_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -426,7 +423,7 @@ struct aead_edesc { * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table * @drv_req: driver-specific request structure - * @sgt: the h/w link table + * @sgt: the h/w link table, followed by IV */ struct ablkcipher_edesc { int src_nents; @@ -435,9 +432,6 @@ struct ablkcipher_edesc { int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; -#define CAAM_QI_MAX_ABLKCIPHER_SG \ - ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \ - sizeof(struct qm_sg_entry)) struct qm_sg_entry sgt[0]; }; @@ -649,17 +643,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } } - if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) { + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) ivsize = crypto_aead_ivsize(aead); - iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, - dst_nents, 0, 0, op_type, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - } /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. @@ -667,16 +652,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents + (mapped_dst_nents > 1 ? mapped_dst_nents : 0); - if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_AEAD_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > + CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } - sg_table = &edesc->sgt[0]; - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + + if (ivsize) { + u8 *iv = (u8 *)(sg_table + qm_sg_ents); + + /* Make sure IV is located in a DMAable area */ + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; @@ -813,15 +815,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) #endif ablkcipher_unmap(qidev, edesc, req); - qi_cache_free(edesc); + + /* In case initial IV was generated, copy it in GIVCIPHER request */ + if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) { + u8 *iv; + struct skcipher_givcrypt_request *greq; + + greq = container_of(req, struct skcipher_givcrypt_request, + creq); + iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes; + memcpy(greq->giv, iv, ivsize); + } /* * The crypto API expects us to set the IV (req->info) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, - ivsize, 0); + if (edesc->drv_req.drv_ctx->op_type != DECRYPT) + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - + ivsize, ivsize, 0); + qi_cache_free(edesc); ablkcipher_request_complete(req, status); } @@ -836,9 +850,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool in_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; enum optype op_type = encrypt ? ENCRYPT : DECRYPT; @@ -885,55 +899,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } } - iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - - if (mapped_src_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->src)) { - in_contig = true; - qm_sg_ents = 0; - } else { - in_contig = false; - qm_sg_ents = 1 + mapped_src_nents; - } + qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, op_type, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + memcpy(iv, req->info, ivsize); + + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; - if (!in_contig) { - dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); - } + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); if (mapped_dst_nents > 1) sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + @@ -951,20 +963,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request fd_sgt = &edesc->drv_req.fd_sgt[0]; - if (!in_contig) - dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, - ivsize + req->nbytes, 0); - else - dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes, - 0); + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, + ivsize + req->nbytes, 0); if (req->src == req->dst) { - if (!in_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + - sizeof(*sg_table), req->nbytes, 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), - req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + + sizeof(*sg_table), req->nbytes, 0); } else if (mapped_dst_nents > 1) { dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table), req->nbytes, 0); @@ -988,10 +992,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; struct ablkcipher_edesc *edesc; dma_addr_t iv_dma; - bool out_contig; + u8 *iv; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); struct qm_sg_entry *sg_table, *fd_sgt; - int dst_sg_idx, qm_sg_ents; + int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, GIVENCRYPT); @@ -1039,46 +1043,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( mapped_dst_nents = src_nents; } - iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE); - if (dma_mapping_error(qidev, iv_dma)) { - dev_err(qidev, "unable to map IV\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0, 0); - return ERR_PTR(-ENOMEM); - } - qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; dst_sg_idx = qm_sg_ents; - if (mapped_dst_nents == 1 && - iv_dma + ivsize == sg_dma_address(req->dst)) { - out_contig = true; - } else { - out_contig = false; - qm_sg_ents += 1 + mapped_dst_nents; - } - if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { - dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", - qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + qm_sg_ents += 1 + mapped_dst_nents; + qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); + if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes + + ivsize > CAAM_QI_MEMCACHE_SIZE)) { + dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", + qm_sg_ents, ivsize); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } - /* allocate space for base edesc and link tables */ + /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(GFP_DMA | flags); if (!edesc) { dev_err(qidev, "could not allocate extended descriptor\n"); - caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, GIVENCRYPT, 0, 0); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + return ERR_PTR(-ENOMEM); + } + + /* Make sure IV is located in a DMAable area */ + sg_table = &edesc->sgt[0]; + iv = (u8 *)(sg_table + qm_sg_ents); + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE); + if (dma_mapping_error(qidev, iv_dma)) { + dev_err(qidev, "unable to map IV\n"); + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, + 0, 0, 0, 0); + qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; - sg_table = &edesc->sgt[0]; - edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = ablkcipher_done; edesc->drv_req.drv_ctx = drv_ctx; @@ -1086,11 +1089,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( if (mapped_src_nents > 1) sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0); - if (!out_contig) { - dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - dst_sg_idx + 1, 0); - } + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0); + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1, + 0); edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); @@ -1111,13 +1112,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src), req->nbytes, 0); - if (!out_contig) - dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * - sizeof(*sg_table), ivsize + req->nbytes, - 0); - else - dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), - ivsize + req->nbytes, 0); + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * + sizeof(*sg_table), ivsize + req->nbytes, 0); return edesc; } @@ -1127,6 +1123,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); int ret; if (unlikely(caam_congested)) @@ -1137,6 +1134,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt) if (IS_ERR(edesc)) return PTR_ERR(edesc); + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + if (!encrypt) + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - + ivsize, ivsize, 0); + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); if (!ret) { ret = -EINPROGRESS; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 7a897209f181383e7e0ae7ece652ffa25b709877..7ff4a25440acd9813d4fb19b9ef7355d6765451d 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); @@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); @@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, akcipher_request_complete(req, err); } +static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, + unsigned int nbytes, + unsigned int flags) +{ + struct sg_mapping_iter miter; + int lzeros, ents; + unsigned int len; + unsigned int tbytes = nbytes; + const u8 *buff; + + ents = sg_nents_for_len(sgl, nbytes); + if (ents < 0) + return ents; + + sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); + + lzeros = 0; + len = 0; + while (nbytes > 0) { + while (len && !*buff) { + lzeros++; + len--; + buff++; + } + + if (len && *buff) + break; + + sg_miter_next(&miter); + buff = miter.addr; + len = miter.length; + + nbytes -= lzeros; + lzeros = 0; + } + + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; + + return tbytes - nbytes; +} + static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = ctx->dev; + struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; + int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sgc; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; + int lzeros; + + lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); + if (lzeros < 0) + return ERR_PTR(lzeros); + + req->src_len -= lzeros; + req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); src_nents = sg_nents_for_len(req->src, req->src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); @@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->d_dma)) { @@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; int sec4_sg_index = 0; size_t p_sz = key->p_sz; - size_t q_sz = key->p_sz; + size_t q_sz = key->q_sz; pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->p_dma)) { @@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = { .max_size = caam_rsa_max_size, .init = caam_rsa_init_tfm, .exit = caam_rsa_exit_tfm, + .reqsize = sizeof(struct caam_rsa_req_ctx), .base = { .cra_name = "rsa", .cra_driver_name = "rsa-caam", diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index fd145c46eae17a8928749df2b4e3bcf26a400730..82645bcf8b27e0ba094a8d7ff5f1628b84e05f77 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h @@ -95,6 +95,14 @@ struct caam_rsa_ctx { struct device *dev; }; +/** + * caam_rsa_req_ctx - per request context. + * @src: input scatterlist (stripped of leading zeros) + */ +struct caam_rsa_req_ctx { + struct scatterlist src[2]; +}; + /** * rsa_edesc - s/w-extended rsa descriptor * @src_nents : number of segments in input scatterlist diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h index dc451e0a43c5158d8658425956e6ba01276192d9..58fb3ed6e64424766ac6907b62f77a934bedcc78 100644 --- a/drivers/crypto/cavium/zip/common.h +++ b/drivers/crypto/cavium/zip/common.h @@ -46,8 +46,10 @@ #ifndef __COMMON_H__ #define __COMMON_H__ +#include #include #include +#include #include #include #include @@ -149,6 +151,25 @@ struct zip_operation { u32 sizeofzops; }; +static inline int zip_poll_result(union zip_zres_s *result) +{ + int retries = 1000; + + while (!result->s.compcode) { + if (!--retries) { + pr_err("ZIP ERR: request timed out"); + return -ETIMEDOUT; + } + udelay(10); + /* + * Force re-reading of compcode which is updated + * by the ZIP coprocessor. + */ + rmb(); + } + return 0; +} + /* error messages */ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \ fmt "\n", __func__, __LINE__, ## args) diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 8df4d26cf9d46894e68c86f2032fb3f9b4ca899d..b92b6e7e100f307e4481f35912511df5cf3cebe6 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_comp; zip_ops->input_len = slen; zip_ops->output_len = *dlen; memcpy(zip_ops->input, src, slen); - ret = zip_deflate(zip_ops, &zip_state, zip); + ret = zip_deflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } @@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; - struct zip_state zip_state; + struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; @@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen, if (!zip) return -ENODEV; - memset(&zip_state, 0, sizeof(struct zip_state)); + zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); + if (!zip_state) + return -ENOMEM; + zip_ops = &zip_ctx->zip_decomp; memcpy(zip_ops->input, src, slen); @@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen, zip_ops->input_len = slen; zip_ops->output_len = *dlen; - ret = zip_inflate(zip_ops, &zip_state, zip); + ret = zip_inflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } - + kfree(zip_state); return ret; } diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c index 9a944b8c1e29808b755baecd4ab526efc89fdf32..d7133f857d67198f551db5f39136e141aec8cd55 100644 --- a/drivers/crypto/cavium/zip/zip_deflate.c +++ b/drivers/crypto/cavium/zip/zip_deflate.c @@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, /* Stats update for compression requests submitted */ atomic64_inc(&zip_dev->stats.comp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Stats update for compression requests completed */ atomic64_inc(&zip_dev->stats.comp_req_complete); diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c index 50cbdd83dbf21dd8c036213b3bdc0ea730e98429..7e0d73e2f89e1296ef4e36c5de26609e46a74410 100644 --- a/drivers/crypto/cavium/zip/zip_inflate.c +++ b/drivers/crypto/cavium/zip/zip_inflate.c @@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, /* Decompression requests submitted stats update */ atomic64_inc(&zip_dev->stats.decomp_req_submit); - while (!result_ptr->s.compcode) - continue; + /* Wait for completion or error */ + zip_poll_result(result_ptr); /* Decompression requests completed stats update */ atomic64_inc(&zip_dev->stats.decomp_req_complete); diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 0b648217e8cbc5e1353b57319c4e7c461063a772..e342d2af11094d314dc1b5f1c567603b800ebba0 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -1397,8 +1397,8 @@ static void qcom_ice_debug(struct platform_device *pdev) qcom_ice_dump_test_bus(ice_dev); pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n", ice_dev->ice_instance_type, - (unsigned long long)ice_dev->ice_reset_start_time.tv64, - (unsigned long long)ice_dev->ice_reset_complete_time.tv64); + (unsigned long long)ice_dev->ice_reset_start_time, + (unsigned long long)ice_dev->ice_reset_complete_time); if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time, ice_dev->ice_reset_start_time)) > 0) @@ -1430,9 +1430,7 @@ static int qcom_ice_config_start(struct platform_device *pdev, struct request *req, struct ice_data_setting *setting, bool async) { - struct ice_crypto_setting *crypto_data; struct ice_crypto_setting pfk_crypto_data = {0}; - union map_info *info; int ret = 0; bool is_pfe = false; @@ -1455,7 +1453,6 @@ static int qcom_ice_config_start(struct platform_device *pdev, /* It is not an error to have a request with no bio */ return 0; } - //pr_err("%s bio is %pK\n", __func__, req->bio); ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async); if (is_pfe) { @@ -1470,30 +1467,6 @@ static int qcom_ice_config_start(struct platform_device *pdev, &pfk_crypto_data, setting); } - /* - * info field in req->end_io_data could be used by mulitple dm or - * non-dm entities. To ensure that we are running operation on dm - * based request, check BIO_DONT_FREE flag - */ - if (bio_flagged(req->bio, BIO_INLINECRYPT)) { - info = dm_get_rq_mapinfo(req); - if (!info) { - pr_debug("%s info not available in request\n", - __func__); - return 0; - } - - crypto_data = (struct ice_crypto_setting *)info->ptr; - if (!crypto_data) { - pr_err("%s crypto_data not available in request\n", - __func__); - return -EINVAL; - } - - return qti_ice_setting_config(req, pdev, - crypto_data, setting); - } - /* * It is not an error. If target is not req-crypt based, all request * from storage driver would come here to check if there is any ICE diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index c40ac30ec00262c247b06e1f07c2a7aab815a445..c1f8da958c78b1c87dd083b729bd4832ab9f907b 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1082,7 +1082,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) free_pages((unsigned long)sg_virt(ctx->sg), - get_order(ctx->sg->length)); + get_order(ctx->sg->length + ctx->bufcnt)); if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) kfree(ctx->sg); diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 96072b9b55c43ee2d134f1451a25d56c2130ab8d..d7316f7a3a696177b1c5168e8226dccb2c912d40 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); crypto_cipher_set_flags(fallback, crypto_cipher_get_flags((struct diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 7394d35d5936aa403821b0a29bed5ee70e27668f..5285ece4f33a36df39bfd18068dd14ce6a1213db 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); - crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 17d84217dd765a29d8d9e69921820ebd52823a65..02ba5f2aa0e6e8d09b431d9ad0019fa4c1549f94 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -48,8 +48,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); crypto_blkcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 8cd6e62e4c909b95373a60e0bfc8063002b5dda1..8bd9aff0f55fba6639b67147cf97c2fcfce12bfc 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_skcipher_driver_name(fallback)); crypto_skcipher_set_flags( fallback, diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 27a94a11900926d9614d5a747f7e95d07f21afb1..1c4b5b889fbacf181c074baf21d7ae0d65d64c3e 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); crypto_shash_set_flags(fallback, crypto_shash_get_flags((struct crypto_shash diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 52c2706df55602137e11648098aac154c1aa5f9d..70d8bcb2a93e8d2473d74529e0f815077cfa913c 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -255,6 +255,13 @@ config DEVFREQ_SPDM This driver allows any SPDM based client to vote for bandwidth. Used with the QTI SPDM Hypervisor Governor. +config DEVFREQ_GOV_CDSPL3 + bool "QTI DEVFREQ governor for CDSP L3 requests" + depends on QCOM_CDSP_RM + help + CDSP resource manager will use this governor to vote for L3 clock + for IO-coherent traffic generated from CDSP + source "drivers/devfreq/event/Kconfig" endif # PM_DEVFREQ diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 0f669c91334e7880c9ff486110f2403f0dbf16af..f819efc03a1e6b7e29dd629e99721dda0ed5e121 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON) += governor_bw_hwmon.o obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON) += governor_cache_hwmon.o obj-$(CONFIG_DEVFREQ_GOV_SPDM_HYP) += governor_spdm_bw_hyp.o obj-$(CONFIG_DEVFREQ_GOV_MEMLAT) += governor_memlat.o +obj-$(CONFIG_DEVFREQ_GOV_CDSPL3) += governor_cdsp_l3.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c index defd6ab97476e84745d78267151837ee797c7b0d..d8193289a32ee11b28ef94539a9bf1f3ac178bb9 100644 --- a/drivers/devfreq/arm-memlat-mon.c +++ b/drivers/devfreq/arm-memlat-mon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -77,7 +77,7 @@ static unsigned long compute_freq(struct cpu_pmu_stats *cpustats, { ktime_t ts; unsigned int diff; - unsigned long freq = 0; + uint64_t freq = 0; ts = ktime_get(); diff = ktime_to_us(ktime_sub(ts, cpustats->prev_ts)); diff --git a/drivers/devfreq/governor_cdsp_l3.c b/drivers/devfreq/governor_cdsp_l3.c new file mode 100644 index 0000000000000000000000000000000000000000..a5ece4b43741ca5d50338c61b345eef22b357098 --- /dev/null +++ b/drivers/devfreq/governor_cdsp_l3.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "governor_cdspl3: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "governor.h" + +struct cdspl3 { + struct device_node *of_node; + struct devfreq *df; + unsigned int l3_freq_hz; +}; + +static struct cdspl3 p_me; + +static int cdsp_l3_request_callback(unsigned int freq_khz) +{ + if (p_me.df) { + mutex_lock(&p_me.df->lock); + p_me.l3_freq_hz = freq_khz * 1000; + update_devfreq(p_me.df); + mutex_unlock(&p_me.df->lock); + } else { + pr_err("CDSP L3 request for %dKHz not served", freq_khz); + return -ENODEV; + } + return 0; +} + +static struct cdsprm_l3 cdsprm = { + .set_l3_freq = cdsp_l3_request_callback, +}; + +static int devfreq_get_target_freq(struct devfreq *df, + unsigned long *freq) +{ + if (freq) + *freq = (unsigned long)p_me.l3_freq_hz; + return 0; +} + +static int gov_start(struct devfreq *df) +{ + if (p_me.of_node != df->dev.parent->of_node) { + dev_err(df->dev.parent, + "Device match error in CDSP L3 frequency governor\n"); + return -ENODEV; + } + p_me.df = df; + p_me.l3_freq_hz = 0; + /* + * Send governor start message to CDSP RM driver + */ + cdsprm_register_cdspl3gov(&cdsprm); + return 0; +} + +static int gov_stop(struct devfreq *df) +{ + p_me.df = 0; + p_me.l3_freq_hz = 0; + /* + * Send governor stop message to CDSP RM driver + */ + cdsprm_unregister_cdspl3gov(); + return 0; +} + +static int devfreq_event_handler(struct devfreq *df, + unsigned int event, void *data) +{ + int ret; + + switch (event) { + case DEVFREQ_GOV_START: + ret = gov_start(df); + if (ret) + return ret; + dev_info(df->dev.parent, + "Successfully started CDSP L3 governor\n"); + break; + case DEVFREQ_GOV_STOP: + dev_info(df->dev.parent, + "Received stop CDSP L3 governor event\n"); + ret = gov_stop(df); + if (ret) + return ret; + break; + default: + break; + } + return 0; +} + +static struct devfreq_governor cdsp_l3_gov = { + .name = "cdspl3", + .get_target_freq = devfreq_get_target_freq, + .event_handler = devfreq_event_handler, +}; + +static int cdsp_l3_driver_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + p_me.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0); + if (!p_me.of_node) { + dev_err(dev, "Couldn't find a target device\n"); + return -ENODEV; + } + ret = devfreq_add_governor(&cdsp_l3_gov); + if (ret) + dev_err(dev, "Failed registering CDSP L3 requests %d\n", + ret); + return ret; +} + +static const struct of_device_id cdsp_l3_match_table[] = { + { .compatible = "qcom,cdsp-l3" }, + {} +}; + +static struct platform_driver cdsp_l3 = { + .probe = cdsp_l3_driver_probe, + .driver = { + .name = "cdsp-l3", + .of_match_table = cdsp_l3_match_table, + } +}; + +static int __init cdsp_l3_gov_module_init(void) +{ + return platform_driver_register(&cdsp_l3); + +} +module_init(cdsp_l3_gov_module_init); + +static void __exit cdsp_l3_gov_module_exit(void) +{ + devfreq_remove_governor(&cdsp_l3_gov); + platform_driver_unregister(&cdsp_l3); +} +module_exit(cdsp_l3_gov_module_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index 0fa9a0a090bca10098653dc998cef653549c1b25..6132c9a4a1a92038aab1e50c7736bc72d84c6742 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -55,6 +55,8 @@ static const int required_gpios[] = { AP2MDM_STATUS, }; +void *ipc_log; + static void mdm_debug_gpio_show(struct mdm_ctrl *mdm) { struct device *dev = mdm->dev; @@ -83,10 +85,25 @@ static void mdm_debug_gpio_show(struct mdm_ctrl *mdm) __func__, MDM_GPIO(mdm, MDM2AP_VDDMIN)); } +static void mdm_debug_gpio_ipc_log(struct mdm_ctrl *mdm) +{ + esoc_mdm_log("MDM2AP_ERRFATAL gpio = %d\n", + MDM_GPIO(mdm, MDM2AP_ERRFATAL)); + esoc_mdm_log("AP2MDM_ERRFATAL gpio = %d\n", + MDM_GPIO(mdm, AP2MDM_ERRFATAL)); + esoc_mdm_log("MDM2AP_STATUS gpio = %d\n", + MDM_GPIO(mdm, MDM2AP_STATUS)); + esoc_mdm_log("AP2MDM_STATUS gpio = %d\n", + MDM_GPIO(mdm, AP2MDM_STATUS)); + esoc_mdm_log("AP2MDM_SOFT_RESET gpio = %d\n", + MDM_GPIO(mdm, AP2MDM_SOFT_RESET)); +} + static void mdm_enable_irqs(struct mdm_ctrl *mdm) { if (!mdm) return; + esoc_mdm_log("Enabling the interrupts\n"); if (mdm->irq_mask & IRQ_ERRFATAL) { enable_irq(mdm->errfatal_irq); mdm->irq_mask &= ~IRQ_ERRFATAL; @@ -105,6 +122,7 @@ void mdm_disable_irqs(struct mdm_ctrl *mdm) { if (!mdm) return; + esoc_mdm_log("Disabling the interrupts\n"); if (!(mdm->irq_mask & IRQ_ERRFATAL)) { disable_irq_nosync(mdm->errfatal_irq); mdm->irq_mask |= IRQ_ERRFATAL; @@ -192,6 +210,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) esoc->clink_ops->notify(ESOC_BOOT_DONE, esoc); } } + esoc_mdm_log("ESOC_PWR_ON: Setting AP2MDM_ERRFATAL = 0\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0); mdm->init = 1; mdm_do_first_power_on(mdm); @@ -206,11 +225,17 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) break; graceful_shutdown = true; if (!esoc->userspace_handle_shutdown) { + esoc_mdm_log( + "ESOC_PWR_OFF: sending sysmon-shutdown to modem\n"); ret = sysmon_send_shutdown(&esoc->subsys); if (ret) { + esoc_mdm_log( + "ESOC_PWR_OFF: sysmon-shutdown failed: %d\n", ret); dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n", ret); graceful_shutdown = false; + esoc_mdm_log( + "ESOC_PWR_OFF: forcefully powering-off modem\n"); } } else { esoc_clink_queue_request(ESOC_REQ_SEND_SHUTDOWN, esoc); @@ -236,13 +261,19 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) * monitored by multiple mdms(might be wrongly interpreted as * a primary crash). */ - if (esoc->statusline_not_a_powersource == false) + if (esoc->statusline_not_a_powersource == false) { + esoc_mdm_log( + "ESOC_FORCE_PWR_OFF: setting AP2MDM_STATUS = 0\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0); + } + esoc_mdm_log( + "ESOC_FORCE_PWR_OFF: Queueing request: ESOC_REQ_SHUTDOWN\n"); esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc); mdm_power_down(mdm); mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG); break; case ESOC_RESET: + esoc_mdm_log("ESOC_RESET: Resetting the modem\n"); mdm_toggle_soft_reset(mdm, false); break; case ESOC_PREPARE_DEBUG: @@ -252,8 +283,12 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) * an APQ crash, wait till mdm is ready for ramdumps. */ mdm->ready = false; + esoc_mdm_log( + "ESOC_PREPARE_DEBUG: Cancelling the status check work\n"); cancel_delayed_work(&mdm->mdm2ap_status_check_work); if (!mdm->esoc->auto_boot) { + esoc_mdm_log( + "ESOC_PREPARE_DEBUG: setting AP2MDM_ERRFATAL = 1\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n"); @@ -266,6 +301,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) if (mdm->skip_restart_for_mdm_crash) break; + esoc_mdm_log("ESOC_EXE_DEBUG: Resetting the modem\n"); mdm->debug = 1; mdm_toggle_soft_reset(mdm, false); /* @@ -273,8 +309,12 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) * then power down the mdm and switch gpios to booting * config */ + esoc_mdm_log( + "ESOC_EXE_DEBUG: Waiting for ramdumps to be collected\n"); wait_for_completion(&mdm->debug_done); if (mdm->debug_fail) { + esoc_mdm_log( + "ESOC_EXE_DEBUG: Failed to collect the ramdumps\n"); dev_err(mdm->dev, "unable to collect ramdumps\n"); mdm->debug = 0; return -EIO; @@ -288,11 +328,13 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) * Deassert APQ to mdm err fatal * Power on the mdm */ + esoc_mdm_log("ESOC_EXIT_DEBUG: Setting AP2MDM_ERRFATAL = 0\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0); dev_dbg(mdm->dev, "exiting debug state after power on\n"); mdm->get_restart_reason = true; break; default: + esoc_mdm_log("Invalid command\n"); return -EINVAL; }; return 0; @@ -306,7 +348,11 @@ static void mdm2ap_status_check(struct work_struct *work) struct device *dev = mdm->dev; struct esoc_clink *esoc = mdm->esoc; + esoc_mdm_log( + "Powerup timer expired after images are transferred to modem\n"); + if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) { + esoc_mdm_log("MDM2AP_STATUS did not go high\n"); dev_dbg(dev, "MDM2AP_STATUS did not go high\n"); esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc); } @@ -336,14 +382,17 @@ static void mdm_get_restart_reason(struct work_struct *work) ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf, sizeof(sfr_buf)); if (!ret) { + esoc_mdm_log("restart reason is %s\n", sfr_buf); dev_err(dev, "mdm restart reason is %s\n", sfr_buf); break; } msleep(SFR_RETRY_INTERVAL); } while (++ntries < SFR_MAX_RETRIES); - if (ntries == SFR_MAX_RETRIES) + if (ntries == SFR_MAX_RETRIES) { + esoc_mdm_log("restart reason not obtained. err: %d\n", ret); dev_dbg(dev, "%s: Error retrieving restart reason: %d\n", __func__, ret); + } mdm->get_restart_reason = false; } @@ -355,36 +404,53 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) struct mdm_ctrl *mdm = get_esoc_clink_data(esoc); struct device *dev = mdm->dev; + esoc_mdm_log("Notification: %d\n", notify); + switch (notify) { case ESOC_IMG_XFER_DONE: - if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) + if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) { + esoc_mdm_log( + "ESOC_IMG_XFER_DONE: Begin timeout of %lu ms for modem_status\n", + MDM2AP_STATUS_TIMEOUT_MS); schedule_delayed_work(&mdm->mdm2ap_status_check_work, msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS)); + } break; case ESOC_BOOT_DONE: + esoc_mdm_log( + "ESOC_BOOT_DONE: Sending the notification: ESOC_RUN_STATE\n"); esoc_clink_evt_notify(ESOC_RUN_STATE, esoc); break; case ESOC_IMG_XFER_RETRY: + esoc_mdm_log("ESOC_IMG_XFER_RETRY: Resetting the device\n"); mdm->init = 1; mdm_toggle_soft_reset(mdm, false); break; case ESOC_IMG_XFER_FAIL: + esoc_mdm_log( + "ESOC_IMG_XFER_FAIL: Send notification: ESOC_INVALID_STATE\n"); esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc); break; case ESOC_BOOT_FAIL: + esoc_mdm_log( + "ESOC_BOOT_FAIL: Send notification: ESOC_INVALID_STATE\n"); esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc); break; case ESOC_PON_RETRY: + esoc_mdm_log( + "ESOC_PON_RETRY: Send notification: ESOC_RETRY_PON_EVT\n"); esoc_clink_evt_notify(ESOC_RETRY_PON_EVT, esoc); break; case ESOC_UPGRADE_AVAILABLE: break; case ESOC_DEBUG_DONE: + esoc_mdm_log("ESOC_DEBUG_DONE: Ramdumps collection done\n"); mdm->debug_fail = false; mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG); complete(&mdm->debug_done); break; case ESOC_DEBUG_FAIL: + esoc_mdm_log("ESOC_DEBUG_FAIL: Ramdumps collection failed\n"); mdm->debug_fail = true; complete(&mdm->debug_done); break; @@ -392,9 +458,13 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) mdm_disable_irqs(mdm); status_down = false; dev_dbg(dev, "signal apq err fatal for graceful restart\n"); + esoc_mdm_log( + "ESOC_PRIMARY_CRASH: Setting AP2MDM_ERRFATAL = 1\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); if (esoc->primary) break; + esoc_mdm_log( + "ESOC_PRIMARY_CRASH: Waiting for MDM2AP_STATUS to go LOW\n"); timeout = local_clock(); do_div(timeout, NSEC_PER_MSEC); timeout += MDM_MODEM_TIMEOUT; @@ -409,6 +479,8 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) } while (!time_after64(now, timeout)); if (!status_down) { + esoc_mdm_log( + "ESOC_PRIMARY_CRASH: MDM2AP_STATUS didn't go LOW. Resetting modem\n"); dev_err(mdm->dev, "%s MDM2AP status did not go low\n", __func__); mdm_toggle_soft_reset(mdm, true); @@ -418,6 +490,8 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) mdm_disable_irqs(mdm); mdm->debug = 0; mdm->ready = false; + esoc_mdm_log( + "ESOC_PRIMARY_REBOOT: Powering down the modem\n"); mdm_power_down(mdm); break; }; @@ -435,6 +509,7 @@ static irqreturn_t mdm_errfatal(int irq, void *dev_id) if (!mdm->ready) goto mdm_pwroff_irq; esoc = mdm->esoc; + esoc_mdm_log("MDM2AP_ERRFATAL IRQ received!\n"); dev_err(dev, "%s: mdm sent errfatal interrupt\n", __func__); subsys_set_crash_status(esoc->subsys_dev, true); @@ -442,6 +517,8 @@ static irqreturn_t mdm_errfatal(int irq, void *dev_id) esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc); return IRQ_HANDLED; mdm_pwroff_irq: + esoc_mdm_log( + "MDM2AP_ERRFATAL IRQ received before modem booted. Ignoring.\n"); dev_info(dev, "errfatal irq when in pwroff\n"); no_mdm_irq: return IRQ_HANDLED; @@ -458,15 +535,19 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id) return IRQ_HANDLED; dev = mdm->dev; esoc = mdm->esoc; + esoc_mdm_log("MDM2AP_STATUS IRQ received!\n"); /* * On auto boot devices, there is a possibility of receiving * status change interrupt before esoc_clink structure is * initialized. Ignore them. */ - if (!esoc) + if (!esoc) { + esoc_mdm_log("Unexpected IRQ. Ignoring.\n"); return IRQ_HANDLED; + } value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)); if (value == 0 && mdm->ready) { + esoc_mdm_log("Unexpected reset of external modem\n"); dev_err(dev, "unexpected reset external modem\n"); subsys_set_crash_status(esoc->subsys_dev, true); esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc); @@ -478,6 +559,8 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id) if (esoc->auto_boot && mdm->ready) return IRQ_HANDLED; + esoc_mdm_log( + "Modem ready. Cancelling the the status_check work\n"); cancel_delayed_work(&mdm->mdm2ap_status_check_work); dev_dbg(dev, "status = 1: mdm is now ready\n"); mdm->ready = true; @@ -887,30 +970,35 @@ static int sdx50m_setup_hw(struct mdm_ctrl *mdm, ret = mdm_dt_parse_gpios(mdm); if (ret) { + esoc_mdm_log("Failed to parse DT gpios\n"); dev_err(mdm->dev, "Failed to parse DT gpios\n"); goto err_destroy_wrkq; } ret = mdm_pon_dt_init(mdm); if (ret) { + esoc_mdm_log("Failed to parse PON DT gpios\n"); dev_err(mdm->dev, "Failed to parse PON DT gpio\n"); goto err_destroy_wrkq; } ret = mdm_pinctrl_init(mdm); if (ret) { + esoc_mdm_log("Failed to init pinctrl\n"); dev_err(mdm->dev, "Failed to init pinctrl\n"); goto err_destroy_wrkq; } ret = mdm_pon_setup(mdm); if (ret) { + esoc_mdm_log("Failed to setup PON\n"); dev_err(mdm->dev, "Failed to setup PON\n"); goto err_destroy_wrkq; } ret = mdm_configure_ipc(mdm, pdev); if (ret) { + esoc_mdm_log("Failed to configure the ipc\n"); dev_err(mdm->dev, "Failed to configure the ipc\n"); goto err_release_ipc; } @@ -935,10 +1023,12 @@ static int sdx50m_setup_hw(struct mdm_ctrl *mdm, ret = esoc_clink_register(esoc); if (ret) { + esoc_mdm_log("esoc registration failed\n"); dev_err(mdm->dev, "esoc registration failed\n"); goto err_free_irq; } dev_dbg(mdm->dev, "esoc registration done\n"); + esoc_mdm_log("Done configuring the GPIOs and esoc registration\n"); init_completion(&mdm->debug_done); INIT_WORK(&mdm->mdm_status_work, mdm_status_fn); @@ -949,6 +1039,8 @@ static int sdx50m_setup_hw(struct mdm_ctrl *mdm, mdm->esoc = esoc; mdm->init = 0; + mdm_debug_gpio_ipc_log(mdm); + return 0; err_free_irq: @@ -994,6 +1086,7 @@ static int mdm_probe(struct platform_device *pdev) const struct mdm_ops *mdm_ops; struct device_node *node = pdev->dev.of_node; struct mdm_ctrl *mdm; + int ret; match = of_match_node(mdm_dt_match, node); if (IS_ERR_OR_NULL(match)) @@ -1002,7 +1095,16 @@ static int mdm_probe(struct platform_device *pdev) mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL); if (IS_ERR_OR_NULL(mdm)) return PTR_ERR(mdm); - return mdm_ops->config_hw(mdm, mdm_ops, pdev); + + ipc_log = ipc_log_context_create(ESOC_MDM_IPC_PAGES, "esoc-mdm", 0); + if (!ipc_log) + dev_err(&pdev->dev, "Failed to setup IPC logging\n"); + + ret = mdm_ops->config_hw(mdm, mdm_ops, pdev); + if (ret) + ipc_log_context_destroy(ipc_log); + + return ret; } static struct platform_driver mdm_driver = { diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c index cba2128f5295af3c57826e893524d0562729ab94..7d477f325c28da3d594c810c881665178fc1b169 100644 --- a/drivers/esoc/esoc-mdm-drv.c +++ b/drivers/esoc/esoc-mdm-drv.c @@ -19,8 +19,25 @@ #include "esoc-mdm.h" #include "mdm-dbg.h" -/* Maximum number of powerup trial requests per session */ -#define ESOC_MAX_PON_REQ 2 +/* Default number of powerup trial requests per session */ +#define ESOC_DEF_PON_REQ 2 +static unsigned int n_pon_tries = ESOC_DEF_PON_REQ; +module_param(n_pon_tries, uint, 0644); +MODULE_PARM_DESC(n_pon_tries, +"Number of power-on retrials allowed upon boot failure"); + +enum esoc_boot_fail_action { + BOOT_FAIL_ACTION_RETRY, + BOOT_FAIL_ACTION_COLD_RESET, + BOOT_FAIL_ACTION_SHUTDOWN, + BOOT_FAIL_ACTION_PANIC, + BOOT_FAIL_ACTION_NOP, +}; + +static unsigned int boot_fail_action = BOOT_FAIL_ACTION_NOP; +module_param(boot_fail_action, uint, 0644); +MODULE_PARM_DESC(boot_fail_action, +"Actions: 0:Retry PON; 1:Cold reset; 2:Power-down; 3:APQ Panic; 4:No action"); enum esoc_pon_state { PON_INIT, @@ -65,6 +82,8 @@ static int esoc_msm_restart_handler(struct notifier_block *nb, if (action == SYS_RESTART) { if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT)) return NOTIFY_OK; + esoc_mdm_log( + "Reboot notifier: Notifying esoc of cold reboot\n"); dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n"); clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink); } @@ -74,23 +93,35 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng) { struct mdm_drv *mdm_drv = to_mdm_drv(eng); + bool unexpected_state = false; switch (evt) { case ESOC_INVALID_STATE: + esoc_mdm_log( + "ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n"); mdm_drv->pon_state = PON_FAIL; complete(&mdm_drv->pon_done); break; case ESOC_RUN_STATE: + esoc_mdm_log( + "ESOC_RUN_STATE: Calling complete with state: PON_SUCCESS\n"); mdm_drv->pon_state = PON_SUCCESS; mdm_drv->mode = RUN, complete(&mdm_drv->pon_done); break; case ESOC_RETRY_PON_EVT: + esoc_mdm_log( + "ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n"); mdm_drv->pon_state = PON_RETRY; complete(&mdm_drv->pon_done); break; case ESOC_UNEXPECTED_RESET: + esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n"); + unexpected_state = true; case ESOC_ERR_FATAL: + if (!unexpected_state) + esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n"); + /* * Modem can crash while we are waiting for pon_done during * a subsystem_get(). Setting mode to CRASH will prevent a @@ -98,12 +129,20 @@ static void mdm_handle_clink_evt(enum esoc_evt evt, * this by seting mode to CRASH only if device was up and * running. */ + if (mdm_drv->mode == CRASH) + esoc_mdm_log( + "Modem in crash state already. Ignoring.\n"); + if (mdm_drv->mode != RUN) + esoc_mdm_log("Modem not up. Ignoring.\n"); if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN) return; mdm_drv->mode = CRASH; + esoc_mdm_log("Starting SSR work\n"); queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work); break; case ESOC_REQ_ENG_ON: + esoc_mdm_log( + "evt_state: ESOC_REQ_ENG_ON; Registered a req engine\n"); complete(&mdm_drv->req_eng_wait); break; default: @@ -128,6 +167,8 @@ static void esoc_client_link_power_on(struct esoc_clink *esoc_clink, struct esoc_client_hook *client_hook; dev_dbg(&esoc_clink->dev, "Calling power_on hooks\n"); + esoc_mdm_log( + "Calling power_on hooks with crash state: %d\n", mdm_crashed); for (i = 0; i < ESOC_MAX_HOOKS; i++) { client_hook = esoc_clink->client_hook[i]; @@ -144,6 +185,8 @@ static void esoc_client_link_power_off(struct esoc_clink *esoc_clink, struct esoc_client_hook *client_hook; dev_dbg(&esoc_clink->dev, "Calling power_off hooks\n"); + esoc_mdm_log( + "Calling power_off hooks with crash state: %d\n", mdm_crashed); for (i = 0; i < ESOC_MAX_HOOKS; i++) { client_hook = esoc_clink->client_hook[i]; @@ -162,6 +205,8 @@ static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys) subsys); const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops; + esoc_mdm_log("MDM crashed notification from SSR\n"); + if (mdm_dbg_stall_notify(ESOC_PRIMARY_CRASH)) return; clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink); @@ -176,7 +221,10 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys, struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink); const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops; + esoc_mdm_log("Shutdown request from SSR\n"); + if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) { + esoc_mdm_log("Shutdown in crash mode\n"); if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) /* We want to mask debug command. * In this case return success @@ -187,18 +235,23 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys, esoc_clink_queue_request(ESOC_REQ_CRASH_SHUTDOWN, esoc_clink); esoc_client_link_power_off(esoc_clink, true); + esoc_mdm_log("Executing the ESOC_PREPARE_DEBUG command\n"); ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG, esoc_clink); if (ret) { + esoc_mdm_log("ESOC_PREPARE_DEBUG command failed\n"); dev_err(&esoc_clink->dev, "failed to enter debug\n"); return ret; } mdm_drv->mode = IN_DEBUG; } else if (!force_stop) { - if (esoc_clink->subsys.sysmon_shutdown_ret) + esoc_mdm_log("Graceful shutdown mode\n"); + if (esoc_clink->subsys.sysmon_shutdown_ret) { + esoc_mdm_log( + "Executing the ESOC_FORCE_PWR_OFF command\n"); ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF, esoc_clink); - else { + } else { if (mdm_dbg_stall_cmd(ESOC_PWR_OFF)) /* Since power off command is masked * we return success, and leave the state @@ -206,9 +259,12 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys, */ return 0; dev_dbg(&esoc_clink->dev, "Sending sysmon-shutdown\n"); + esoc_mdm_log("Executing the ESOC_PWR_OFF command\n"); ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink); } if (ret) { + esoc_mdm_log( + "Executing the ESOC_PWR_OFF command failed\n"); dev_err(&esoc_clink->dev, "failed to exe power off\n"); return ret; } @@ -217,6 +273,7 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys, clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF, esoc_clink); mdm_drv->mode = PWR_OFF; } + esoc_mdm_log("Shutdown completed\n"); return 0; } @@ -225,6 +282,8 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink) struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink); struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink); + esoc_mdm_log("Doing cleanup\n"); + esoc_client_link_power_off(esoc_clink, false); mdm_disable_irqs(mdm); mdm_drv->pon_state = PON_INIT; @@ -232,6 +291,46 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink) reinit_completion(&mdm_drv->req_eng_wait); } +/* Returns 0 to proceed towards another retry, or an error code to quit */ +static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial) +{ + struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink); + + switch (boot_fail_action) { + case BOOT_FAIL_ACTION_RETRY: + mdm_subsys_retry_powerup_cleanup(esoc_clink); + esoc_mdm_log("Request to retry a warm reset\n"); + (*pon_trial)++; + break; + /* + * Issue a shutdown here and rerun the powerup again. + * This way it becomes a cold reset. Else, we end up + * issuing a cold reset & a warm reset back to back. + */ + case BOOT_FAIL_ACTION_COLD_RESET: + mdm_subsys_retry_powerup_cleanup(esoc_clink); + esoc_mdm_log("Doing cold reset by power-down and warm reset\n"); + (*pon_trial)++; + mdm_power_down(mdm); + break; + case BOOT_FAIL_ACTION_PANIC: + esoc_mdm_log("Calling panic!!\n"); + panic("Panic requested on external modem boot failure\n"); + break; + case BOOT_FAIL_ACTION_NOP: + esoc_mdm_log("Leaving the modem in its curent state\n"); + return -EIO; + case BOOT_FAIL_ACTION_SHUTDOWN: + default: + mdm_subsys_retry_powerup_cleanup(esoc_clink); + esoc_mdm_log("Shutdown the modem and quit\n"); + mdm_power_down(mdm); + return -EIO; + } + + return 0; +} + static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys) { int ret; @@ -240,34 +339,46 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys) subsys); struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink); const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops; - struct mdm_ctrl *mdm = get_esoc_clink_data(esoc_clink); int timeout = INT_MAX; u8 pon_trial = 1; + esoc_mdm_log("Powerup request from SSR\n"); + do { + esoc_mdm_log("Boot trial: %d\n", pon_trial); if (!esoc_clink->auto_boot && !esoc_req_eng_enabled(esoc_clink)) { + esoc_mdm_log("Wait for req eng registration\n"); dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n"); wait_for_completion(&mdm_drv->req_eng_wait); } + esoc_mdm_log("Req eng available\n"); if (mdm_drv->mode == PWR_OFF) { + esoc_mdm_log("In normal power-on mode\n"); if (mdm_dbg_stall_cmd(ESOC_PWR_ON)) return -EBUSY; + esoc_mdm_log("Executing the ESOC_PWR_ON command\n"); ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); if (ret) { + esoc_mdm_log("ESOC_PWR_ON command failed\n"); dev_err(&esoc_clink->dev, "pwr on fail\n"); return ret; } esoc_client_link_power_on(esoc_clink, false); } else if (mdm_drv->mode == IN_DEBUG) { + esoc_mdm_log("In SSR power-on mode\n"); + esoc_mdm_log("Executing the ESOC_EXIT_DEBUG command\n"); ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink); if (ret) { + esoc_mdm_log( + "ESOC_EXIT_DEBUG command failed\n"); dev_err(&esoc_clink->dev, "cannot exit debug mode\n"); return ret; } mdm_drv->mode = PWR_OFF; + esoc_mdm_log("Executing the ESOC_PWR_ON command\n"); ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink); if (ret) { dev_err(&esoc_clink->dev, "pwr on fail\n"); @@ -285,19 +396,24 @@ static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys) */ if (esoc_clink->auto_boot) timeout = 10 * HZ; + esoc_mdm_log( + "Modem turned-on. Waiting for pon_done notification..\n"); ret = wait_for_completion_timeout(&mdm_drv->pon_done, timeout); if (mdm_drv->pon_state == PON_FAIL || ret <= 0) { dev_err(&esoc_clink->dev, "booting failed\n"); - mdm_subsys_retry_powerup_cleanup(esoc_clink); - mdm_power_down(mdm); - return -EIO; + esoc_mdm_log("booting failed\n"); + ret = mdm_handle_boot_fail(esoc_clink, &pon_trial); + if (ret) + return ret; } else if (mdm_drv->pon_state == PON_RETRY) { + esoc_mdm_log( + "Boot failed. Doing cleanup and attempting to retry\n"); pon_trial++; mdm_subsys_retry_powerup_cleanup(esoc_clink); } else if (mdm_drv->pon_state == PON_SUCCESS) { break; } - } while (pon_trial <= ESOC_MAX_PON_REQ); + } while (pon_trial <= n_pon_tries); return 0; } @@ -311,9 +427,14 @@ static int mdm_subsys_ramdumps(int want_dumps, subsys); const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops; + esoc_mdm_log("Ramdumps called from SSR\n"); + if (want_dumps) { + esoc_mdm_log("Executing the ESOC_EXE_DEBUG command\n"); ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink); if (ret) { + esoc_mdm_log( + "Failed executing the ESOC_EXE_DEBUG command\n"); dev_err(&esoc_clink->dev, "debugging failed\n"); return ret; } @@ -375,6 +496,7 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv) dev_dbg(&esoc_clink->dev, "dbg engine initialized\n"); debug_init_done = true; } + return 0; queue_err: esoc_clink_unregister_ssr(esoc_clink); diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c index 3f73364c2fedd739f86778da394fa7ad1e34daad..e1e335226867e9ef1b44519da51235aa5531726f 100644 --- a/drivers/esoc/esoc-mdm-pon.c +++ b/drivers/esoc/esoc-mdm-pon.c @@ -51,6 +51,11 @@ static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) soft_reset_direction_assert = 1; soft_reset_direction_de_assert = 0; } + + esoc_mdm_log("RESET GPIO value (before doing a reset): %d\n", + gpio_get_value(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))); + esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", + soft_reset_direction_assert); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_assert); /* @@ -64,6 +69,9 @@ static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) * panic handler, which has to executed atomically. */ mdelay(100); + + esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", + soft_reset_direction_de_assert); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert); return 0; @@ -75,6 +83,7 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm) int pblrdy; struct device *dev = mdm->dev; + esoc_mdm_log("Powering on modem for the first time\n"); dev_dbg(dev, "Powering on modem for the first time\n"); if (mdm->esoc->auto_boot) return 0; @@ -82,6 +91,7 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm) mdm_toggle_soft_reset(mdm, false); /* Add a delay to allow PON sequence to complete*/ msleep(150); + esoc_mdm_log("Setting AP2MDM_STATUS = 1\n"); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1); if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) { for (i = 0; i < MDM_PBLRDY_CNT; i++) { @@ -98,8 +108,10 @@ static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm) * Send request for image. Let userspace confirm establishment of * link to external modem. */ - else + else { + esoc_mdm_log("Queueing the request: ESOC_REQ_IMG\n"); esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc); + } return 0; } @@ -134,6 +146,7 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm) struct device *dev = mdm->dev; int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0; /* Assert the soft reset line whether mdm2ap_status went low or not */ + esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", soft_reset_direction); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction); dev_dbg(dev, "Doing a hard reset\n"); @@ -152,6 +165,7 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm) static void mdm9x55_cold_reset(struct mdm_ctrl *mdm) { dev_dbg(mdm->dev, "Triggering mdm cold reset"); + gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); @@ -168,6 +182,8 @@ static void mdm9x55_cold_reset(struct mdm_ctrl *mdm) static void sdx50m_cold_reset(struct mdm_ctrl *mdm) { dev_dbg(mdm->dev, "Triggering mdm cold reset"); + esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", + !!mdm->soft_reset_inverted); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); @@ -177,6 +193,8 @@ static void sdx50m_cold_reset(struct mdm_ctrl *mdm) */ mdelay(600); + esoc_mdm_log("Setting AP2MDM_SOFT_RESET = %d\n", + !!mdm->soft_reset_inverted); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); } diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h index ca96ce291ae4ad26d6a6204d08c712b316826ec4..81dd0dfe1cadf3c6ccf7397b6b42f660a77b8889 100644 --- a/drivers/esoc/esoc.h +++ b/drivers/esoc/esoc.h @@ -23,6 +23,18 @@ #include #include #include +#include + +#define ESOC_MDM_IPC_PAGES 10 + +extern void *ipc_log; + +#define esoc_mdm_log(__msg, ...) \ +do { \ + if (ipc_log) \ + ipc_log_string(ipc_log, \ + "[%s]: "__msg, __func__, ##__VA_ARGS__); \ +} while (0) #define ESOC_DEV_MAX 4 #define ESOC_NAME_LEN 20 diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c index 214a48883c373228a5e19779764b3ad2fded82a2..6777627daee5fbbdf579b212725dc9e9cc249936 100644 --- a/drivers/esoc/esoc_dev.c +++ b/drivers/esoc/esoc_dev.c @@ -140,13 +140,16 @@ void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng) struct esoc_clink *esoc_clink = eng->esoc_clink; struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id); - if (!esoc_udev) + if (!esoc_udev) { + esoc_mdm_log("esoc_udev not found\n"); return; + } clink_req = (u32)req; err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req, sizeof(clink_req), &esoc_udev->req_fifo_lock); if (err != sizeof(clink_req)) { + esoc_mdm_log("Unable to queue request %d; err: %d\n", req, err); pr_err("unable to queue request for %s\n", esoc_clink->name); return; } @@ -160,13 +163,16 @@ void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng) struct esoc_clink *esoc_clink = eng->esoc_clink; struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id); - if (!esoc_udev) + if (!esoc_udev) { + esoc_mdm_log("esoc_udev not found\n"); return; + } clink_evt = (u32)evt; err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt, sizeof(clink_evt), &esoc_udev->evt_fifo_lock); if (err != sizeof(clink_evt)) { + esoc_mdm_log("Unable to queue event %d; err: %d\n", evt, err); pr_err("unable to queue event for %s\n", esoc_clink->name); return; } @@ -230,25 +236,39 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case ESOC_REG_REQ_ENG: + esoc_mdm_log("ESOC_REG_REQ_ENG\n"); err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng); - if (err) + if (err) { + esoc_mdm_log("ESOC_REG_REQ_ENG failed: %d\n", err); return err; + } uhandle->req_eng_reg = true; break; case ESOC_REG_CMD_ENG: + esoc_mdm_log("ESOC_REG_CMD_ENG\n"); err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng); - if (err) + if (err) { + esoc_mdm_log("ESOC_REG_CMD_ENG failed: %d\n", err); return err; + } uhandle->cmd_eng_reg = true; break; case ESOC_CMD_EXE: - if (esoc_clink->cmd_eng != &uhandle->eng) + if (esoc_clink->cmd_eng != &uhandle->eng) { + esoc_mdm_log("ESOC_CMD_EXE failed to access\n"); return -EACCES; + } get_user(esoc_cmd, (u32 __user *)arg); + esoc_mdm_log("ESOC_CMD_EXE: Executing esoc command: %u\n", + esoc_cmd); return clink_ops->cmd_exe(esoc_cmd, esoc_clink); case ESOC_WAIT_FOR_REQ: - if (esoc_clink->req_eng != &uhandle->eng) + if (esoc_clink->req_eng != &uhandle->eng) { + esoc_mdm_log("ESOC_WAIT_FOR_REQ: Failed to access\n"); return -EACCES; + } + esoc_mdm_log( + "ESOC_WAIT_FOR_REQ: Waiting for req event to arrive.\n"); err = wait_event_interruptible(esoc_udev->req_wait, !kfifo_is_empty(&esoc_udev->req_fifo)); if (!err) { @@ -256,27 +276,40 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd, sizeof(req), &esoc_udev->req_fifo_lock); if (err != sizeof(req)) { + esoc_mdm_log( + "ESOC_WAIT_FOR_REQ: Failed to read the event\n"); pr_err("read from clink %s req q failed\n", esoc_clink->name); return -EIO; } put_user(req, (unsigned int __user *)uarg); + esoc_mdm_log( + "ESOC_WAIT_FOR_REQ: Event arrived: %u\n", req); } return err; case ESOC_NOTIFY: get_user(esoc_cmd, (u32 __user *)arg); + esoc_mdm_log("ESOC_NOTIFY: Notifying esoc about cmd: %u\n", + esoc_cmd); clink_ops->notify(esoc_cmd, esoc_clink); break; case ESOC_GET_STATUS: clink_ops->get_status(&status, esoc_clink); + esoc_mdm_log( + "ESOC_GET_STATUS: Sending the status from esoc: %u\n", status); put_user(status, (unsigned int __user *)uarg); break; case ESOC_GET_ERR_FATAL: clink_ops->get_err_fatal(&status, esoc_clink); + esoc_mdm_log( + "ESOC_GET_ERR_FATAL: Sending err_fatal status from esoc: %u\n", + status); put_user(status, (unsigned int __user *)uarg); break; case ESOC_WAIT_FOR_CRASH: + esoc_mdm_log( + "ESOC_WAIT_FOR_CRASH: Waiting for evt to arrive..\n"); err = wait_event_interruptible(esoc_udev->evt_wait, !kfifo_is_empty(&esoc_udev->evt_fifo)); if (!err) { @@ -284,11 +317,15 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd, sizeof(evt), &esoc_udev->evt_fifo_lock); if (err != sizeof(evt)) { + esoc_mdm_log( + "ESOC_WAIT_FOR_CRASH: Failed to read event\n"); pr_err("read from clink %s evt q failed\n", esoc_clink->name); return -EIO; } put_user(evt, (unsigned int __user *)uarg); + esoc_mdm_log("ESOC_WAIT_FOR_CRASH: Event arrived: %u\n", + req); } return err; case ESOC_GET_LINK_ID: @@ -309,12 +346,14 @@ static int esoc_dev_open(struct inode *inode, struct file *file) esoc_udev = esoc_udev_get_by_minor(minor); if (!esoc_udev) { + esoc_mdm_log("failed to get udev\n"); pr_err("failed to get udev\n"); return -ENOMEM; } esoc_clink = get_esoc_clink(esoc_udev->clink->id); if (!esoc_clink) { + esoc_mdm_log("failed to get clink\n"); pr_err("failed to get clink\n"); return -ENOMEM; } @@ -330,6 +369,8 @@ static int esoc_dev_open(struct inode *inode, struct file *file) eng->handle_clink_req = esoc_udev_handle_clink_req; eng->handle_clink_evt = esoc_udev_handle_clink_evt; file->private_data = uhandle; + esoc_mdm_log( + "%s successfully attached to esoc driver\n", current->comm); return 0; } @@ -339,14 +380,23 @@ static int esoc_dev_release(struct inode *inode, struct file *file) struct esoc_uhandle *uhandle = file->private_data; esoc_clink = uhandle->esoc_clink; - if (uhandle->req_eng_reg) + if (uhandle->req_eng_reg) { + esoc_mdm_log("Unregistering req_eng\n"); esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng); - if (uhandle->cmd_eng_reg) + } else { + esoc_mdm_log("No req_eng to unregister\n"); + } + if (uhandle->cmd_eng_reg) { + esoc_mdm_log("Unregistering cmd_eng\n"); esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng); + } else { + esoc_mdm_log("No cmd_eng to unregister\n"); + } uhandle->req_eng_reg = false; uhandle->cmd_eng_reg = false; put_esoc_clink(esoc_clink); kfree(uhandle); + esoc_mdm_log("%s Unregistered with esoc\n", current->comm); return 0; } static const struct file_operations esoc_dev_fops = { diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 3f67b618b47e73d7b427c14f114225afaeb85a86..e098d7a7138a595e643a70817ff84408e9075d51 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -486,7 +486,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) } EXPORT_SYMBOL_GPL(extcon_sync); -int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, bool val) +int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, u8 val) { int index; diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index b9bd827caa22ca29fe5303bfdf33395831d4128a..1b4d465cc5d9f9f998869f80d3895095ea86c087 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -97,6 +97,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg, u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ? (phys_seed >> 32) & mask : TEXT_OFFSET; + /* + * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not + * be a multiple of EFI_KIMG_ALIGN, and we must ensure that + * we preserve the misalignment of 'offset' relative to + * EFI_KIMG_ALIGN so that statically allocated objects whose + * alignment exceeds PAGE_SIZE appear correctly aligned in + * memory. + */ + offset |= TEXT_OFFSET % EFI_KIMG_ALIGN; + /* * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index dce21c08623af115dd7d42e2f857af09a8959f17..3396bbcd6cdb94cc0ab6135dd1e77a9013aa20ff 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3296,6 +3296,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, struct gpio_desc *desc = NULL; int status; enum gpio_lookup_flags lookupflags = 0; + /* Maybe we have a device name, maybe not */ + const char *devname = dev ? dev_name(dev) : "?"; dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id); @@ -3324,8 +3326,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - /* If a connection label was passed use that, else use the device name as label */ - status = gpiod_request(desc, con_id ? con_id : dev_name(dev)); + /* + * If a connection label was passed use that, else attempt to use + * the device name as label + */ + status = gpiod_request(desc, con_id ? con_id : devname); if (status < 0) return ERR_PTR(status); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 660b3fbade4194f796ebe0be8e4fc7f7e9c46109..8a05efa7edf02b65f23d295684764b155032384a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -716,12 +716,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, struct timespec64 time; dev = kfd_device_by_id(args->gpu_id); - if (dev == NULL) - return -EINVAL; - - /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + if (dev) + /* Reading GPU clock counter from KGD */ + args->gpu_clock_counter = + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); + else + /* Node without GPU resource */ + args->gpu_clock_counter = 0; /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic64(&time); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index adf9ae0e0b7c9d3dfda8f21e39f6137b6eceab08..bac60e5c3c1c5279bae334ad5499165d241b355b 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -25,6 +25,15 @@ config DRM_ANALOGIX_ANX78XX the HDMI output of an application processor to MyDP or DisplayPort. +config DRM_ANALOGIX_ANX7625 + tristate "Analogix ANX7625 bridge" + select DRM_KMS_HELPER + ---help--- + ANX7625 is DSI to DisplayPort bridge transmitter + driver. The driver can support direct Display port + only, USB type C interface is not supported in + this driver. + config DRM_DUMB_VGA_DAC tristate "Dumb VGA DAC Bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 60dab87e4783cf35f38bcccbe5152fefbe56d5df..2c431adc3c411fbdaf7470401ff4eb65b1a1cf92 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-y += synopsys/ +obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += analogix-anx7625.o diff --git a/drivers/gpu/drm/bridge/analogix-anx7625.c b/drivers/gpu/drm/bridge/analogix-anx7625.c new file mode 100644 index 0000000000000000000000000000000000000000..b16d6de2cabdac71c8e723b4ccd33225b2853477 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix-anx7625.c @@ -0,0 +1,1530 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Copyright(c) 2016, Analogix Semiconductor. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "analogix-anx7625.h" + +#define TX_P0 0x70 +#define TX_P1 0x7A +#define TX_P2 0x72 +#define RX_P0 0x7e +#define RX_P1 0x84 +#define RX_P2 0x54 +#define TCPC_INTERFACE 0x58 + +#define ReadReg(addr, offset) ({\ + unsigned int buf;\ + reg_read(anx7625, addr, offset, &buf);\ + buf;\ +}) + +#define Read_Reg(addr, offset, buf) ({\ + reg_read(anx7625, addr, offset, buf);\ +}) + +#define ReadBlockReg(addr, offset, len, dat)\ + reg_read_block(anx7625, addr, offset, dat, len) + +#define WriteReg(addr, offset, val) ({\ + reg_write(anx7625, addr, offset, val);\ +}) + +#define WriteBlockReg(addr, offset, len, dat)\ + reg_write_block(anx7625, addr, offset, dat, len) + +#define sp_write_reg_or(address, offset, mask) \ +{ WriteReg(address, offset, ((unsigned char)ReadReg(address, offset) \ + | (mask))); } +#define sp_write_reg_and(address, offset, mask) \ +{ WriteReg(address, offset, ((unsigned char)ReadReg(address, offset) \ + &(mask))); } + +#define sp_write_reg_and_or(address, offset, and_mask, or_mask) \ +{ WriteReg(address, offset, (((unsigned char)ReadReg(address, offset)) \ + &and_mask) | (or_mask)); } +#define sp_write_reg_or_and(address, offset, or_mask, and_mask) \ +{ WriteReg(address, offset, (((unsigned char)ReadReg(address, offset)) \ + | or_mask) & (and_mask)); } + +struct anx7625_platform_data { + struct gpio_desc *gpiod_cdet; + struct gpio_desc *gpiod_p_on; + struct gpio_desc *gpiod_reset; + + int cdet_irq; + int intp_irq; +}; + +struct MIPI_Video_Format { + unsigned char timing_id; + unsigned char MIPI_video_type[32]; + unsigned char MIPI_lane_count; + unsigned long MIPI_pixel_frequency; /*Hz*/ + + unsigned long M; + unsigned long N; + unsigned char post_divider; + /* bit[7:4]: DIFF_I_RATIO, bit[3:0]: DIFF_K_RATIO; i.e. 0x84:0x1B. + * These settings affect ODFC PLL locking range. + */ + unsigned char diff_ratio; + + unsigned char compress_ratio; + unsigned char video_3D_type; + unsigned char *pps_reg; + const struct RegisterValueConfig *custom_reg0; + const struct RegisterValueConfig *custom_reg1; + + struct TimingInfor { + unsigned int MIPI_HTOTAL; + unsigned int MIPI_HActive; + unsigned int MIPI_VTOTAL; + unsigned int MIPI_VActive; + + unsigned int MIPI_H_Front_Porch; + unsigned int MIPI_H_Sync_Width; + unsigned int MIPI_H_Back_Porch; + + + unsigned int MIPI_V_Front_Porch; + unsigned int MIPI_V_Sync_Width; + unsigned int MIPI_V_Back_Porch; + } MIPI_inputl[2]; +}; + +struct anx7625 { + struct drm_dp_aux aux; + struct drm_bridge bridge; + struct i2c_client *client; + struct edid *edid; + struct drm_dp_link link; + struct anx7625_platform_data pdata; + struct mutex lock; + int mode_idx; + + u16 chipid; + + bool powered; + bool enabled; + int connected; + bool hpd_status; + u8 sys_sta_bak; + + unsigned char last_read_DevAddr; +}; + +static void Reg_Access_Conflict_Workaround(struct anx7625 *anx7625, + unsigned char DevAddr) +{ + unsigned char RegAddr; + int ret = 0, i; + + if (DevAddr != anx7625->last_read_DevAddr) { + switch (DevAddr) { + case 0x54: + case 0x72: + default: + RegAddr = 0x00; + break; + + case 0x58: + RegAddr = 0x00; + break; + + case 0x70: + RegAddr = 0xD1; + break; + + case 0x7A: + RegAddr = 0x60; + break; + + case 0x7E: + RegAddr = 0x39; + break; + + case 0x84: + RegAddr = 0x7F; + break; + } + + anx7625->client->addr = (DevAddr >> 1); + for (i = 0; i < 5; i++) { + ret = i2c_smbus_write_byte_data(anx7625->client, + RegAddr, 0x00); + if (ret >= 0) + break; + pr_err("failed to write i2c addr=%x:%x, retry %d...\n", + DevAddr, RegAddr, i); + usleep_range(1000, 1100); + } + anx7625->last_read_DevAddr = DevAddr; + } +} + +static int reg_read(struct anx7625 *anx7625, + int addr, int offset, unsigned int *buf) +{ + int ret, i; + + Reg_Access_Conflict_Workaround(anx7625, addr); + anx7625->client->addr = (addr >> 1); + for (i = 0; i < 5; i++) { + ret = i2c_smbus_read_byte_data( + anx7625->client, offset); + if (ret >= 0) + break; + pr_err("failed to read anx7625 %x:%x, retry %d...\n", + addr, offset, i); + usleep_range(1000, 1100); + } + *buf = ret; + return 0; +} + +static int reg_write(struct anx7625 *anx7625, + int addr, int offset, unsigned int val) +{ + int ret, i; + + Reg_Access_Conflict_Workaround(anx7625, addr); + anx7625->client->addr = (addr >> 1); + for (i = 0; i < 5; i++) { + ret = i2c_smbus_write_byte_data( + anx7625->client, offset, val); + if (ret >= 0) + break; + pr_err("failed to write anx7625 %x:%x, retry %d...\n", + addr, offset, i); + usleep_range(1000, 1100); + } + return 0; +} + +static int reg_read_block(struct anx7625 *anx7625, + int addr, int offset, u8 *buf, int len) +{ + int ret, i; + + Reg_Access_Conflict_Workaround(anx7625, addr); + anx7625->client->addr = (addr >> 1); + for (i = 0; i < 5; i++) { + ret = i2c_smbus_read_i2c_block_data( + anx7625->client, offset, len, buf); + if (ret >= 0) + break; + pr_err("failed to read anx7625 %x:%x, retry %d...\n", + addr, offset, i); + usleep_range(1000, 1100); + } + return 0; +} + +static int reg_write_block(struct anx7625 *anx7625, + int addr, int offset, u8 *buf, int len) +{ + int ret, i; + + Reg_Access_Conflict_Workaround(anx7625, addr); + anx7625->client->addr = (addr >> 1); + for (i = 0; i < 5; i++) { + ret = i2c_smbus_write_i2c_block_data( + anx7625->client, offset, len, buf); + if (ret >= 0) + break; + pr_err("failed to write anx7625 %x:%x, retry %d...\n", + addr, offset, i); + usleep_range(1000, 1100); + } + return 0; +} + +#define mipi_pixel_frequency(id) \ + mipi_video_timing_table[id].MIPI_pixel_frequency +#define mipi_lane_count(id) \ + mipi_video_timing_table[id].MIPI_lane_count +#define mipi_m_value(id) \ + mipi_video_timing_table[id].M +#define mipi_n_value(id) \ + mipi_video_timing_table[id].N +#define mipi_post_divider(id) \ + mipi_video_timing_table[id].post_divider +#define mipi_diff_ratio(id) \ + mipi_video_timing_table[id].diff_ratio +#define mipi_compress_ratio(id) \ + mipi_video_timing_table[id].compress_ratio + +#define mipi_original_htotal(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_HTOTAL +#define mipi_original_hactive(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_HActive +#define mipi_original_vtotal(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_VTOTAL +#define mipi_original_vactive(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_VActive +#define mipi_original_hfp(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_H_Front_Porch +#define mipi_original_hsw(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_H_Sync_Width +#define mipi_original_hbp(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_H_Back_Porch +#define mipi_original_vfp(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_V_Front_Porch +#define mipi_original_vsw(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_V_Sync_Width +#define mipi_original_vbp(id) \ + mipi_video_timing_table[id].MIPI_inputl[0].MIPI_V_Back_Porch + +#define mipi_decompressed_htotal(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_HTOTAL +#define mipi_decompressed_hactive(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_HActive +#define mipi_decompressed_vtotal(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_VTOTAL +#define mipi_decompressed_vactive(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_VActive +#define mipi_decompressed_hfp(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_H_Front_Porch +#define mipi_decompressed_hsw(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_H_Sync_Width +#define mipi_decompressed_hbp(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_H_Back_Porch +#define mipi_decompressed_vfp(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_V_Front_Porch +#define mipi_decompressed_vsw(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_V_Sync_Width +#define mipi_decompressed_vbp(id) \ + mipi_video_timing_table[id].MIPI_inputl[1].MIPI_V_Back_Porch + +#define video_3d(id) mipi_video_timing_table[id].video_3D_type + +static unsigned char PPS_4K[] = { /*VC707 (DPI+DSC)*/ + 0x11, 0x00, 0x00, 0x89, 0x10, 0x80, 0x08, 0x70, + 0x0f, 0x00, 0x00, 0x08, 0x07, 0x80, 0x07, 0x80, + 0x02, 0x00, 0x04, 0xc0, 0x00, 0x20, 0x01, 0x1e, + 0x00, 0x1a, 0x00, 0x0c, 0x0d, 0xb7, 0x03, 0x94, + 0x18, 0x00, 0x10, 0xf0, 0x03, 0x0c, 0x20, 0x00, + 0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38, + 0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, + 0x7d, 0x7e, 0x01, 0x02, 0x01, 0x00, 0x09, 0x40, + 0x09, 0xbe, 0x19, 0xfc, 0x19, 0xfa, 0x19, 0xf8, + 0x1a, 0x38, 0x1a, 0x78, 0x1a, 0xb6, 0x2a, 0xf6, + 0x2b, 0x34, 0x2b, 0x74, 0x3b, 0x74, 0x6b, 0xf4, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static unsigned char PPS_AR[] = {/*1440x2560@70*/ + 0x11, 0x00, 0x00, 0x89, 0x30, 0x80, 0x0A, 0x00, + 0x05, 0xA0, 0x00, 0x10, 0x05, 0xa0, 0x05, 0xa0, + 0x02, 0x00, 0x03, 0xd0, 0x00, 0x20, 0x02, 0x33, + 0x00, 0x14, 0x00, 0x0c, 0x06, 0x67, 0x02, 0x63, + 0x18, 0x00, 0x10, 0xf0, 0x03, 0x0c, 0x20, 0x00, + 0x06, 0x0b, 0x0b, 0x33, 0x0e, 0x1c, 0x2a, 0x38, + 0x46, 0x54, 0x62, 0x69, 0x70, 0x77, 0x79, 0x7b, + 0x7d, 0x7e, 0x01, 0x02, 0x01, 0x00, 0x09, 0x40, + 0x09, 0xbe, 0x19, 0xfc, 0x19, 0xfa, 0x19, 0xf8, + 0x1a, 0x38, 0x1a, 0x78, 0x1a, 0xb6, 0x2a, 0xf6, + 0x2b, 0x34, 0x2b, 0x74, 0x3b, 0x74, 0x6b, 0xf4, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static unsigned char PPS_Custom[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +static const struct RegisterValueConfig Bit_Matrix[] = { + {TX_P2, AUDIO_CONTROL_REGISTER, 0x80}, + {TX_P2, VIDEO_BIT_MATRIX_12, 0x18}, + {TX_P2, VIDEO_BIT_MATRIX_13, 0x19}, + {TX_P2, VIDEO_BIT_MATRIX_14, 0x1a}, + {TX_P2, VIDEO_BIT_MATRIX_15, 0x1b}, + {TX_P2, VIDEO_BIT_MATRIX_16, 0x1c}, + {TX_P2, VIDEO_BIT_MATRIX_17, 0x1d}, + {TX_P2, VIDEO_BIT_MATRIX_18, 0x1e}, + {TX_P2, VIDEO_BIT_MATRIX_19, 0x1f}, + {TX_P2, VIDEO_BIT_MATRIX_20, 0x20}, + {TX_P2, VIDEO_BIT_MATRIX_21, 0x21}, + {TX_P2, VIDEO_BIT_MATRIX_22, 0x22}, + {TX_P2, VIDEO_BIT_MATRIX_23, 0x23}, + {0x00, 0x00, 0x00} +}; + +static struct MIPI_Video_Format mipi_video_timing_table[] = { + /* lane_count--pixel_clk-----M---N--div- */ + /* -diff--compr--3d--table--custom0--custom1*/ + /* original timing */ + /* total-H active-Vtotal-V active-HFP-HSW-HBP-VFP-VSW-VBP*/ + /* decompressed timing */ + /* tota-H active-Vtotal-V active-HFP-HSW-HBP-VFP-VSW-VBP*/ + { + 0, "720x480@60", 3, 27000000, 0xC00000, 0x100000, 0x0B, + 0x3B, 0, VIDEO_3D_NONE, NULL, Bit_Matrix, NULL, + { { 858, 720, 525, 480, 16, 60, 62, 10, 6, 29 } } + }, + { + 1, "1280X720P@60", 3, 74250000, 0xB00000, 0x080000, 0x07, + 0x3A, 0, VIDEO_3D_NONE, NULL, Bit_Matrix, NULL, + { { 1650, 1280, 750, 720, 110, 40, 220, 5, 5, 20 } } + }, + { + 2, "1920x1080p@30", 3, 74000000, 0x940000, 0x06C000, 0x07, + 0x3B, 0, VIDEO_3D_NONE, NULL, Bit_Matrix, NULL, + { { 2200, 1920, 1125, 1080, 88, 44, 148, 4, 5, 36 } } + }, + { + 3, "1920x1080p@60", 3, 148500000, 0xB00000, 0x080000, 0x03, + 0x37, 0, VIDEO_3D_NONE, NULL, Bit_Matrix, NULL, + { { 2200, 1920, 1125, 1080, 88, 44, 148, 4, 5, 36 } } + }, + /*MTK 4K24 DPI*/ + { + 4, "3840x2160@24", 3, 297000000, 0xB00000, 0x080000, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_4K, Bit_Matrix, NULL, + { { 1650, 1280, 2250, 2160, 242, 30, 98, 8, 10, 72 }, + { 4950, 3840, 2250, 2160, 726, 90, 294, 8, 10, 72 } + } + }, + /*MTK 4K30 DPI*/ + { + 5, "3840x2160@30", 3, 297000000, 0xB00000, 0x080000, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_4K, Bit_Matrix, NULL, + { { 1474, 1280, 2250, 2160, 66, 30, 98, 8, 10, 72 }, + { 4422, 3840, 2250, 2160, 198, 90, 294, 8, 10, 72 } + } + }, + + {/*DSI*/ + 6, "720x480@60", 3, 27000000, 0xC00000, 0x100000, 0x0B, + 0x3B, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 858, 720, 525, 480, 16, 60, 62, 10, 6, 29 } } + }, + {/*DSI*/ + 7, "1280X720P@60", 3, 74250000, 0xB00000, 0x080000, 0x07, + 0x3A, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 1650, 1280, 750, 720, 110, 40, 220, 5, 5, 20 } } + }, + {/*DSI*/ + 8, "1920x1080p@30", 3, 74250000, 0xB00000, 0x080000, 0x07, + 0x3B, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 2200, 1920, 1125, 1080, 88, 44, 148, 4, 5, 36 } } + }, + {/*DSI*/ + 9, "1920x1080p@60", 3, 148500000, 0xB00000, 0x080000, 0x03, + 0x37, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 2200, 1920, 1125, 1080, 88, 44, 148, 4, 5, 36 } } + }, + + /* 3840x2160p24 - MTK X30 -DSI*/ + {/*DSI*/ + 10, "3840x2160p24", 3, 268176696, 0xAA808D, 0x089544, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_4K, NULL, NULL, + { { 1650, 1280, 2250, 2160, 242, 30, 98, 8, 10, 72 }, + { 4950, 3840, 2250, 2160, 726, 90, 294, 8, 10, 72 } + } + }, + /* 3840x2160p30 3:1 DSC - MTK X30 -DSI*/ + {/*DSI*/ + 11, "1280x2160p30", 3, 297000000, 0xA7B3AB, 0x07A120, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_4K, NULL, NULL, + { { 1467, 1280, 2250, 2160, 66, 30, 91, 8, 10, 72 }, + { 4400, 3840, 2250, 2160, 198, 90, 272, 8, 10, 72 } + } + }, + + { + 12, "1440X2560P@70", 3, 285000000, 0xB00000, 0x080000, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_AR, Bit_Matrix, NULL, + { {524, 480, 2576, 2560, 24, 10, 12, 6, 8, 2 }, + {1580, 1440, 2576, 2560, 80, 20, 40, 6, 8, 2} + } + }, + { + 13, "********@60", 0, 0, 0, 0, 0, + 0, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } + }, + { + 14, "********@60", 0, 0, 0, 0, 0, + 0, 0, VIDEO_3D_NONE, NULL, NULL, NULL, + { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } + }, + { + 15, "custom@DPI/DSI", 3, 297000000, 0xB00000, 0x080000, 0x01, + 0x37, 3, VIDEO_3D_NONE, PPS_Custom, Bit_Matrix, NULL, + { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + } + }, +}; + +static inline struct anx7625 *bridge_to_anx7625(struct drm_bridge *bridge) +{ + return container_of(bridge, struct anx7625, bridge); +} + +#define write_dpcd_addr(addrh, addrm, addrl) \ + do { \ + unsigned int temp; \ + if (ReadReg(RX_P0, AP_AUX_ADDR_7_0) != (unchar)addrl) \ + WriteReg(RX_P0, AP_AUX_ADDR_7_0, (unchar)addrl); \ + if (ReadReg(RX_P0, AP_AUX_ADDR_15_8) != (unchar)addrm) \ + WriteReg(RX_P0, AP_AUX_ADDR_15_8, (unchar)addrm); \ + Read_Reg(RX_P0, AP_AUX_ADDR_19_16, &temp); \ + if ((unchar)(temp & 0x0F) != ((unchar)addrh & 0x0F)) \ + WriteReg(RX_P0, AP_AUX_ADDR_19_16, \ + (temp & 0xF0) | ((unchar)addrh)); \ + } while (0) + +static void wait_aux_op_finish(struct anx7625 *anx7625, unchar *err_flag) +{ + unchar cnt; + uint c; + + *err_flag = 0; + cnt = 150; + while (ReadReg(RX_P0, AP_AUX_CTRL_STATUS) & AP_AUX_CTRL_OP_EN) { + usleep_range(2000, 2100); + if ((cnt--) == 0) { + TRACE("aux operate failed!\n"); + *err_flag = 1; + break; + } + } + + Read_Reg(RX_P0, AP_AUX_CTRL_STATUS, &c); + if (c & 0x0F) { + TRACE1("wait aux operation status %02x\n", (uint)c); + *err_flag = 1; + } +} + +unchar sp_tx_aux_dpcdread_bytes(struct anx7625 *anx7625, + unchar addrh, unchar addrm, unchar addrl, + unchar cCount, unchar *pBuf) +{ + uint c, i; + unchar bOK; + + /*command and length*/ + c = ((cCount - 1) << 4) | 0x09; + WriteReg(RX_P0, AP_AUX_COMMAND, c); + /*address*/ + write_dpcd_addr(addrh, addrm, addrl); + /*aux en*/ + sp_write_reg_or(RX_P0, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + usleep_range(2000, 2100); + /* TRACE3("auxch addr = 0x%02x%02x%02x\n", addrh,addrm,addrl);*/ + wait_aux_op_finish(anx7625, &bOK); + if (bOK == AUX_ERR) { + TRACE("aux read failed\n"); + return AUX_ERR; + } + + for (i = 0; i < cCount; i++) { + Read_Reg(RX_P0, AP_AUX_BUFF_START + i, &c); + *(pBuf + i) = c; + /*TRACE2("Buf[%d] = 0x%02x\n", (uint)i, *(pBuf + i));*/ + if (i >= MAX_BUF_CNT) + break; + } + + return AUX_OK; +} + +unchar sp_tx_aux_dpcdwrite_bytes(struct anx7625 *anx7625, + unchar addrh, unchar addrm, unchar addrl, + unchar cCount, unchar *pBuf) +{ + unchar c, i, ret; + + /*command and length*/ + c = ((cCount - 1) << 4) | 0x08; + WriteReg(RX_P0, AP_AUX_COMMAND, c); + /*address*/ + write_dpcd_addr(addrh, addrm, addrl); + /*data*/ + for (i = 0; i < cCount; i++) { + c = *pBuf; + pBuf++; + WriteReg(RX_P0, AP_AUX_BUFF_START + i, c); + + if (i >= 15) + break; + } + /*aux en*/ + sp_write_reg_or(RX_P0, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + wait_aux_op_finish(anx7625, &ret); + TRACE("aux write done\n"); + return ret; +} + +static unchar sp_tx_aux_wr(struct anx7625 *anx7625, unchar offset) +{ + unchar c; + + WriteReg(RX_P0, AP_AUX_BUFF_START, offset); + WriteReg(RX_P0, AP_AUX_COMMAND, 0x04); + sp_write_reg_or(RX_P0, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + wait_aux_op_finish(anx7625, &c); + + return c; +} + +static unchar sp_tx_aux_rd(struct anx7625 *anx7625, unchar len_cmd) +{ + unchar c; + + WriteReg(RX_P0, AP_AUX_COMMAND, len_cmd); + sp_write_reg_or(RX_P0, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); + wait_aux_op_finish(anx7625, &c); + + return c; +} + +static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct anx7625 *anx7625 = container_of(aux, struct anx7625, aux); + u8 *buffer = msg->buffer; + int err = 0; + + if (!buffer || !msg->size) + return 0; + + if ((msg->request & DP_AUX_NATIVE_READ) == DP_AUX_NATIVE_READ) { + err = sp_tx_aux_dpcdread_bytes(anx7625, + (msg->address >> 16) & 0xff, + (msg->address >> 8) & 0xff, + (msg->address) & 0xff, + msg->size, + buffer); + } else if ((msg->request & DP_AUX_NATIVE_WRITE) == + DP_AUX_NATIVE_WRITE) { + err = sp_tx_aux_dpcdwrite_bytes(anx7625, + (msg->address >> 16) & 0xff, + (msg->address >> 8) & 0xff, + (msg->address) & 0xff, + msg->size, + buffer); + } else if ((msg->request & DP_AUX_I2C_READ) == DP_AUX_I2C_READ) { + err = sp_tx_aux_rd(anx7625, ((msg->size - 1) << 4) | 0x01); + if (!err) { + ReadBlockReg(RX_P0, AP_AUX_BUFF_START, + msg->size, buffer); + } + } else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) { + WriteReg(RX_P0, AP_AUX_ADDR_7_0, (msg->address) & 0xff); + WriteReg(RX_P0, AP_AUX_ADDR_15_8, 0); + sp_write_reg_and(RX_P0, AP_AUX_ADDR_19_16, 0xf0); + err = sp_tx_aux_wr(anx7625, buffer[0]); + } + + msg->reply = DP_AUX_I2C_REPLY_ACK; + + if (err) + pr_err("anx7625 aux transfer failed %d\n", err); + + return msg->size; +} + +static int anx7625_enable_interrupts(struct anx7625 *anx7625) +{ + /* enable all interrupts */ + WriteReg(RX_P0, INTERFACE_INTR_MASK, 0x7f); + + return 0; +} + +static int anx7625_disable_interrupts(struct anx7625 *anx7625) +{ + /* disable all interrupts */ + WriteReg(RX_P0, INTERFACE_INTR_MASK, 0xff); + + return 0; +} + +static int anx7625_poweroff(struct anx7625 *anx7625) +{ + struct anx7625_platform_data *pdata = &anx7625->pdata; + + if (!anx7625->powered) + return 0; + + anx7625_disable_interrupts(anx7625); + + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + usleep_range(1000, 2000); + + gpiod_set_value_cansleep(pdata->gpiod_p_on, 0); + usleep_range(1000, 2000); + + anx7625->powered = false; + return 0; +} + +static int anx7625_poweron(struct anx7625 *anx7625) +{ + struct anx7625_platform_data *pdata = &anx7625->pdata; + + if (anx7625->powered) + return 0; + + gpiod_set_value_cansleep(pdata->gpiod_p_on, 1); + usleep_range(10000, 11000); + + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(10000, 11000); + + /* setup clock */ + WriteReg(RX_P0, XTAL_FRQ_SEL, XTAL_FRQ_27M); + + /*First, reset main ocm*/ + WriteReg(RX_P0, 0x88, 0x40); + + /* disable PD */ + WriteReg(RX_P0, AP_AV_STATUS, AP_DISABLE_PD); + + /*after configuration, start main ocm running.*/ + WriteReg(RX_P0, 0x88, 0x00); + + /* enable interrupt */ + anx7625_enable_interrupts(anx7625); + + anx7625->powered = true; + return 0; +} + +static void DSI_Video_Timing_Configuration(struct anx7625 *anx7625) +{ + int table_id = anx7625->mode_idx; + + /*configure clock*/ + WriteReg(RX_P0, PIXEL_CLOCK_L, + (mipi_pixel_frequency(table_id) / 1000000) & 0xFF); + WriteReg(RX_P0, PIXEL_CLOCK_H, + (mipi_pixel_frequency(table_id) / 1000000) >> 8); + /*lane count*/ + sp_write_reg_and(RX_P1, MIPI_LANE_CTRL_0, 0xfc); + sp_write_reg_or(RX_P1, MIPI_LANE_CTRL_0, + mipi_lane_count(table_id)); + /*Htotal*/ + WriteReg(RX_P2, HORIZONTAL_TOTAL_PIXELS_L, + mipi_original_htotal(table_id) & 0xFF); + WriteReg(RX_P2, HORIZONTAL_TOTAL_PIXELS_H, + mipi_original_htotal(table_id) >> 8); + /*Hactive*/ + WriteReg(RX_P2, HORIZONTAL_ACTIVE_PIXELS_L, + mipi_original_hactive(table_id) & 0xFF); + WriteReg(RX_P2, HORIZONTAL_ACTIVE_PIXELS_H, + mipi_original_hactive(table_id) >> 8); + /*HFP*/ + WriteReg(RX_P2, HORIZONTAL_FRONT_PORCH_L, + mipi_original_hfp(table_id) & 0xFF); + WriteReg(RX_P2, HORIZONTAL_FRONT_PORCH_H, + mipi_original_hfp(table_id) >> 8); + /*HWS*/ + WriteReg(RX_P2, HORIZONTAL_SYNC_WIDTH_L, + mipi_original_hsw(table_id) & 0xFF); + WriteReg(RX_P2, HORIZONTAL_SYNC_WIDTH_H, + mipi_original_hsw(table_id) >> 8); + /*HBP*/ + WriteReg(RX_P2, HORIZONTAL_BACK_PORCH_L, + mipi_original_hbp(table_id) & 0xFF); + WriteReg(RX_P2, HORIZONTAL_BACK_PORCH_H, + mipi_original_hbp(table_id) >> 8); + /*Vactive*/ + WriteReg(RX_P2, ACTIVE_LINES_L, + mipi_original_vactive(table_id) & 0xFF); + WriteReg(RX_P2, ACTIVE_LINES_H, + mipi_original_vactive(table_id) >> 8); + /*VFP*/ + WriteReg(RX_P2, VERTICAL_FRONT_PORCH, + mipi_original_vfp(table_id)); + /*VWS*/ + WriteReg(RX_P2, VERTICAL_SYNC_WIDTH, + mipi_original_vsw(table_id)); + /*VBP*/ + WriteReg(RX_P2, VERTICAL_BACK_PORCH, + mipi_original_vbp(table_id)); + /*M value*/ + WriteReg(RX_P1, MIPI_PLL_M_NUM_23_16, + (mipi_m_value(table_id) >> 16) & 0xff); + WriteReg(RX_P1, MIPI_PLL_M_NUM_15_8, + (mipi_m_value(table_id) >> 8) & 0xff); + WriteReg(RX_P1, MIPI_PLL_M_NUM_7_0, + mipi_m_value(table_id) & 0xff); + /*N value*/ + WriteReg(RX_P1, MIPI_PLL_N_NUM_23_16, + (mipi_n_value(table_id) >> 16) & 0xff); + WriteReg(RX_P1, MIPI_PLL_N_NUM_15_8, + (mipi_n_value(table_id) >> 8) & 0xff); + WriteReg(RX_P1, MIPI_PLL_N_NUM_7_0, + mipi_n_value(table_id) & 0xff); + /*diff*/ + WriteReg(RX_P1, MIPI_DIGITAL_ADJ_1, + mipi_diff_ratio(table_id)); +} + +static void API_ODFC_Configuration(struct anx7625 *anx7625) +{ + int table_id = anx7625->mode_idx; + + /*config input reference clock frequency 27MHz/19.2MHz*/ + sp_write_reg_and(RX_P1, MIPI_DIGITAL_PLL_16, + ~(REF_CLK_27000kHz << MIPI_FREF_D_IND)); + sp_write_reg_or(RX_P1, MIPI_DIGITAL_PLL_16, + (((XTAL_FRQ >= 26000000UL) && (XTAL_FRQ <= 27000000UL)) ? + (REF_CLK_27000kHz << MIPI_FREF_D_IND) + : (REF_CLK_19200kHz << MIPI_FREF_D_IND))); + /*post divider*/ + sp_write_reg_and(RX_P1, MIPI_DIGITAL_PLL_8, 0x0f); + sp_write_reg_or(RX_P1, MIPI_DIGITAL_PLL_8, + mipi_post_divider(table_id) << 4); + + /*add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test)*/ + sp_write_reg_and(RX_P1, MIPI_DIGITAL_PLL_7, + ~MIPI_PLL_VCO_TUNE_REG_VAL); + + /*reset ODFC PLL*/ + sp_write_reg_and(RX_P1, MIPI_DIGITAL_PLL_7, + ~MIPI_PLL_RESET_N); + sp_write_reg_or(RX_P1, MIPI_DIGITAL_PLL_7, + MIPI_PLL_RESET_N); + /*force PLL lock*/ + //WriteReg(TX_P0, DP_CONFIG_24, 0x0c); +} + +static void DSC_Video_Timing_Configuration(struct anx7625 *anx7625, + unsigned char table_id) +{ + unchar i; + + /*config uncompressed video format*/ + /*Htotal*/ + WriteReg(TX_P2, HORIZONTAL_TOTAL_PIXELS_L, + (mipi_original_htotal(table_id) * mipi_compress_ratio(table_id)) + & 0xFF); + WriteReg(TX_P2, HORIZONTAL_TOTAL_PIXELS_H, + (mipi_original_htotal(table_id) * mipi_compress_ratio(table_id)) + >> 8); + /*Hactive*/ + WriteReg(TX_P2, HORIZONTAL_ACTIVE_PIXELS_L, + (mipi_original_hactive(table_id) + * mipi_compress_ratio(table_id)) & 0xFF); + WriteReg(TX_P2, HORIZONTAL_ACTIVE_PIXELS_H, + (mipi_original_hactive(table_id) + * mipi_compress_ratio(table_id)) >> 8); + /*HFP*/ + WriteReg(TX_P2, HORIZONTAL_FRONT_PORCH_L, + (mipi_original_hfp(table_id) * mipi_compress_ratio(table_id)) + & 0xFF); + WriteReg(TX_P2, HORIZONTAL_FRONT_PORCH_H, + (mipi_original_hfp(table_id) * mipi_compress_ratio(table_id)) + >> 8); + /*HWS*/ + WriteReg(TX_P2, HORIZONTAL_SYNC_WIDTH_L, + (mipi_original_hsw(table_id) * mipi_compress_ratio(table_id)) + & 0xFF); + WriteReg(TX_P2, HORIZONTAL_SYNC_WIDTH_H, + (mipi_original_hsw(table_id) * mipi_compress_ratio(table_id)) + >> 8); + /*HBP*/ + WriteReg(TX_P2, HORIZONTAL_BACK_PORCH_L, + (mipi_original_hbp(table_id) * mipi_compress_ratio(table_id)) + & 0xFF); + WriteReg(TX_P2, HORIZONTAL_BACK_PORCH_H, + (mipi_original_hbp(table_id) * mipi_compress_ratio(table_id)) + >> 8); + /*Vtotal*/ + WriteReg(TX_P2, TOTAL_LINES_L, + mipi_original_vtotal(table_id) & 0xFF); + WriteReg(TX_P2, TOTAL_LINES_H, + mipi_original_vtotal(table_id) >> 8); + /*Vactive*/ + WriteReg(TX_P2, ACTIVE_LINES_L, + mipi_original_vactive(table_id) & 0xFF); + WriteReg(TX_P2, ACTIVE_LINES_H, + mipi_original_vactive(table_id) >> 8); + /*VFP*/ + WriteReg(TX_P2, VERTICAL_FRONT_PORCH, + mipi_original_vfp(table_id)); + /*VWS*/ + WriteReg(TX_P2, VERTICAL_SYNC_WIDTH, + mipi_original_vsw(table_id)); + /*VBP*/ + WriteReg(TX_P2, VERTICAL_BACK_PORCH, + mipi_original_vbp(table_id)); + + /*config uncompressed video format to woraround */ + /* downstream compatibility issues*/ + /*Htotal*/ + WriteReg(RX_P0, TOTAL_PIXEL_L_7E, + mipi_decompressed_htotal(table_id) & 0xFF); + WriteReg(RX_P0, TOTAL_PIXEL_H_7E, + mipi_decompressed_htotal(table_id) >> 8); + /*Hactive*/ + WriteReg(RX_P0, ACTIVE_PIXEL_L_7E, + mipi_decompressed_hactive(table_id) & 0xFF); + WriteReg(RX_P0, ACTIVE_PIXEL_H_7E, + mipi_decompressed_hactive(table_id) >> 8); + /*HFP*/ + WriteReg(RX_P0, HORIZON_FRONT_PORCH_L_7E, + mipi_decompressed_hfp(table_id) & 0xFF); + WriteReg(RX_P0, HORIZON_FRONT_PORCH_H_7E, + mipi_decompressed_hfp(table_id) >> 8); + /*HWS*/ + WriteReg(RX_P0, HORIZON_SYNC_WIDTH_L_7E, + mipi_decompressed_hsw(table_id) & 0xFF); + WriteReg(RX_P0, HORIZON_SYNC_WIDTH_H_7E, + mipi_decompressed_hsw(table_id) >> 8); + /*HBP*/ + WriteReg(RX_P0, HORIZON_BACK_PORCH_L_7E, + mipi_decompressed_hbp(table_id) & 0xFF); + WriteReg(RX_P0, HORIZON_BACK_PORCH_H_7E, + mipi_decompressed_hbp(table_id) >> 8); + + /*config DSC decoder internal blank timing for decoder to start*/ + WriteReg(RX_P1, H_BLANK_L, ((mipi_original_htotal(table_id) + - mipi_original_hactive(table_id))) & 0xFF); + WriteReg(RX_P1, H_BLANK_H, ((mipi_original_htotal(table_id) + - mipi_original_hactive(table_id))) >> 8); + + /*compress ratio RATIO [7:6] 3:div2; 0,1,2:div3*/ + sp_write_reg_and(RX_P0, R_I2C_1, 0x3f); + sp_write_reg_or(RX_P0, R_I2C_1, + (5 - mipi_compress_ratio(table_id)) << 6); + + /*PPS table*/ + if (mipi_video_timing_table[table_id].pps_reg != NULL) { + for (i = 0; i < 0x80; i += 0x10) + WriteBlockReg(RX_P2, R_PPS_REG_0 + i, 0x10, + (unsigned char *)mipi_video_timing_table + [table_id].pps_reg + i); + } +} + +static void API_Custom_Register0_Configuration(struct anx7625 *anx7625, + unsigned char table_id) +{ + unchar i = 0; + /*custom specific register*/ + if (mipi_video_timing_table[table_id].custom_reg0 != NULL) { + while (mipi_video_timing_table[table_id].custom_reg0[i] + .slave_addr) { + WriteReg(mipi_video_timing_table[table_id] + .custom_reg0[i].slave_addr, + mipi_video_timing_table[table_id] + .custom_reg0[i].reg, + mipi_video_timing_table[table_id] + .custom_reg0[i].val); + i++; + } + } +} + +static void API_Custom_Register1_Configuration(struct anx7625 *anx7625, + unsigned char table_id) +{ + unchar i = 0; + /*custom specific register*/ + if (mipi_video_timing_table[table_id].custom_reg1 != NULL) { + while (mipi_video_timing_table[table_id].custom_reg1[i] + .slave_addr) { + WriteReg(mipi_video_timing_table[table_id] + .custom_reg1[i].slave_addr, + mipi_video_timing_table[table_id] + .custom_reg1[i].reg, + mipi_video_timing_table[table_id] + .custom_reg1[i].val); + i++; + } + } +} + +static void swap_DSI_lane3(struct anx7625 *anx7625) +{ + unsigned char RegValue; + /* swap MIPI-DSI data lane 3 P and N */ + RegValue = ReadReg(RX_P1, MIPI_SWAP); + RegValue |= (1 << MIPI_SWAP_CH3); + WriteReg(RX_P1, MIPI_SWAP, RegValue); +} + +static void API_DSI_Configuration(struct anx7625 *anx7625, + unsigned char table_id) +{ + unsigned char RegValue; + + /* swap MIPI-DSI data lane 3 P and N */ + swap_DSI_lane3(anx7625); + + /* DSI clock settings */ + RegValue = (0 << MIPI_HS_PWD_CLK) | + (0 << MIPI_HS_RT_CLK) | + (0 << MIPI_PD_CLK) | + (1 << MIPI_CLK_RT_MANUAL_PD_EN) | + (1 << MIPI_CLK_HS_MANUAL_PD_EN) | + (0 << MIPI_CLK_DET_DET_BYPASS) | + (0 << MIPI_CLK_MISS_CTRL) | + (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN); + WriteReg(RX_P1, MIPI_PHY_CONTROL_3, RegValue); + + /* Decreased HS prepare timing delay from 160ns to 80ns work with + * a) Dragon board 810 series (Qualcomm Technologies, Inc AP) + * b) Moving DSI source (PG3A pattern generator + + * P332 D-PHY Probe) default D-PHY timing + */ + WriteReg(RX_P1, MIPI_TIME_HS_PRPR, 0x10); /* 5ns/step */ + + sp_write_reg_or(RX_P1, MIPI_DIGITAL_PLL_18, + SELECT_DSI<mode_idx < 10) + DSI_Configuration(anx7625, anx7625->mode_idx); + else + DSI_DSC_Configuration(anx7625, anx7625->mode_idx); + + return 0; +} + +static int anx7625_stop(struct anx7625 *anx7625) +{ + /*set mute flag*/ + sp_write_reg_or(RX_P0, AP_AV_STATUS, AP_MIPI_MUTE); + + /*clear mipi RX en*/ + sp_write_reg_and(RX_P0, AP_AV_STATUS, ~AP_MIPI_RX_EN); + + return 0; +} + +#define STS_HPD_CHANGE \ +(((sys_status&HPD_STATUS) != (anx7625->sys_sta_bak&HPD_STATUS)) ?\ + HPD_STATUS_CHANGE:0) + +static void handle_intr_vector(struct anx7625 *anx7625) +{ + unsigned char sys_status; + u8 intr_vector = ReadReg(RX_P0, INTERFACE_CHANGE_INT); + + WriteReg(RX_P0, INTERFACE_CHANGE_INT, + intr_vector & (~intr_vector)); + + sys_status = ReadReg(RX_P0, SYSTEM_STSTUS); + + if ((~INTR_MASK_SETTING) & + ((intr_vector & HPD_STATUS_CHANGE) | STS_HPD_CHANGE)) { + if (!(sys_status & HPD_STATUS)) { + anx7625->hpd_status = 0; + TRACE1("HPD low\n"); + if (anx7625->enabled) + anx7625_stop(anx7625); + } else { + anx7625->hpd_status = 1; + TRACE1("HPD high\n"); + if (anx7625->enabled) + anx7625_start(anx7625); + } + } + + anx7625->sys_sta_bak = sys_status; +} + +static int anx7625_init_pdata(struct anx7625 *anx7625) +{ + struct anx7625_platform_data *pdata = &anx7625->pdata; + struct device *dev = &anx7625->client->dev; + + /* GPIO for HPD */ + pdata->gpiod_cdet = devm_gpiod_get(dev, "cbl_det", GPIOD_IN); + if (IS_ERR(pdata->gpiod_cdet)) + return PTR_ERR(pdata->gpiod_cdet); + + /* GPIO for chip power enable */ + pdata->gpiod_p_on = devm_gpiod_get(dev, "power_en", GPIOD_OUT_LOW); + if (IS_ERR(pdata->gpiod_p_on)) + return PTR_ERR(pdata->gpiod_p_on); + + /* GPIO for chip reset */ + pdata->gpiod_reset = devm_gpiod_get(dev, "reset_n", GPIOD_OUT_LOW); + + return PTR_ERR_OR_ZERO(pdata->gpiod_reset); +} + +static int anx7625_get_mode_idx(const struct drm_display_mode *mode) +{ + struct MIPI_Video_Format *fmt; + int mode_idx = -1, categoly = 0, i; + + if (mode->htotal >= 3840) + categoly = 1; + + for (i = 6; i < sizeof(mipi_video_timing_table) / + sizeof(mipi_video_timing_table[0]); i++) { + fmt = &mipi_video_timing_table[i]; + if (fmt->MIPI_pixel_frequency == mode->clock * 1000 && + fmt->MIPI_inputl[categoly].MIPI_HTOTAL == + mode->htotal && + fmt->MIPI_inputl[categoly].MIPI_VTOTAL == + mode->vtotal && + fmt->MIPI_inputl[categoly].MIPI_HActive == + mode->hdisplay && + fmt->MIPI_inputl[categoly].MIPI_VActive == + mode->vdisplay && + fmt->MIPI_inputl[categoly].MIPI_H_Front_Porch == + mode->hsync_start - mode->hdisplay && + fmt->MIPI_inputl[categoly].MIPI_H_Sync_Width == + mode->hsync_end - mode->hsync_start && + fmt->MIPI_inputl[categoly].MIPI_H_Back_Porch == + mode->htotal - mode->hsync_end && + fmt->MIPI_inputl[categoly].MIPI_V_Front_Porch == + mode->vsync_start - mode->vdisplay && + fmt->MIPI_inputl[categoly].MIPI_V_Sync_Width == + mode->vsync_end - mode->vsync_start && + fmt->MIPI_inputl[categoly].MIPI_V_Back_Porch == + mode->vtotal - mode->vsync_end) { + mode_idx = i; + break; + } + } + + return mode_idx; +} + +static int anx7625_bridge_attach(struct drm_bridge *bridge) +{ + struct anx7625 *anx7625 = bridge_to_anx7625(bridge); + int err; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + /* Register aux channel */ + anx7625->aux.name = "DP-AUX"; + anx7625->aux.dev = &anx7625->client->dev; + anx7625->aux.transfer = anx7625_aux_transfer; + + err = drm_dp_aux_register(&anx7625->aux); + if (err < 0) { + DRM_ERROR("Failed to register aux channel: %d\n", err); + return err; + } + + return 0; +} + +static enum drm_mode_status +anx7625_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + if (anx7625_get_mode_idx(mode) < 0) { + pr_err("failed to find valid index\n"); + return MODE_NOMODE; + } + + return MODE_OK; +} + +static void anx7625_bridge_disable(struct drm_bridge *bridge) +{ + struct anx7625 *anx7625 = bridge_to_anx7625(bridge); + + mutex_lock(&anx7625->lock); + + anx7625_stop(anx7625); + + anx7625->enabled = false; + + mutex_unlock(&anx7625->lock); + + TRACE("anx7625 disabled\n"); +} + +static void anx7625_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct anx7625 *anx7625 = bridge_to_anx7625(bridge); + int mode_idx; + + mode_idx = anx7625_get_mode_idx(adjusted_mode); + + mutex_lock(&anx7625->lock); + + if (mode_idx >= 0) + anx7625->mode_idx = mode_idx; + else + DRM_ERROR("Failed to find pre-defined mode for %s\n", + mode->name); + + mutex_unlock(&anx7625->lock); +} + +static void anx7625_bridge_enable(struct drm_bridge *bridge) +{ + struct anx7625 *anx7625 = bridge_to_anx7625(bridge); + int err; + + mutex_lock(&anx7625->lock); + + anx7625->enabled = true; + + if (!anx7625->connected) + DRM_ERROR("cable is not connected\n"); + + if (!anx7625->hpd_status) + DRM_ERROR("hpd is not set\n"); + + err = anx7625_start(anx7625); + if (err) + DRM_ERROR("Failed to start: %d\n", err); + + mutex_unlock(&anx7625->lock); + + TRACE("anx7625 enabled\n"); +} + +static const struct drm_bridge_funcs anx7625_bridge_funcs = { + .attach = anx7625_bridge_attach, + .mode_valid = anx7625_bridge_mode_valid, + .disable = anx7625_bridge_disable, + .mode_set = anx7625_bridge_mode_set, + .enable = anx7625_bridge_enable, +}; + +static irqreturn_t anx7625_cdet_threaded_handler(int irq, void *data) +{ + struct anx7625 *anx7625 = data; + int connected; + + mutex_lock(&anx7625->lock); + + connected = gpiod_get_value_cansleep(anx7625->pdata.gpiod_cdet); + + if (anx7625->connected != connected) { + anx7625->connected = connected; + TRACE("cable status %d\n", connected); + } + + mutex_unlock(&anx7625->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t anx7625_intp_threaded_handler(int unused, void *data) +{ + struct anx7625 *anx7625 = data; + unsigned char c; + + mutex_lock(&anx7625->lock); + + c = ReadReg(TCPC_INTERFACE, INTR_ALERT_1); + + if (c & INTR_SOFTWARE_INT) + handle_intr_vector(anx7625); + + while (ReadReg(RX_P0, + INTERFACE_CHANGE_INT) != 0) + handle_intr_vector(anx7625); + + if (c) + WriteReg(TCPC_INTERFACE, INTR_ALERT_1, 0xFF); + + mutex_unlock(&anx7625->lock); + + return IRQ_HANDLED; +} + +static const u16 anx7625_chipid_list[] = { + 0x7625, +}; + +static int anx7625_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct anx7625 *anx7625; + struct anx7625_platform_data *pdata; + unsigned int i, idl, idh, version[2]; + bool found = false; + int err; + + anx7625 = devm_kzalloc(&client->dev, sizeof(*anx7625), GFP_KERNEL); + if (!anx7625) + return -ENOMEM; + + pdata = &anx7625->pdata; + + mutex_init(&anx7625->lock); + + anx7625->client = client; + i2c_set_clientdata(client, anx7625); + + err = anx7625_init_pdata(anx7625); + if (err) { + DRM_ERROR("Failed to initialize pdata: %d\n", err); + return err; + } + + pdata->cdet_irq = gpiod_to_irq(pdata->gpiod_cdet); + if (pdata->cdet_irq < 0) { + DRM_ERROR("Failed to get CDET IRQ: %d\n", pdata->cdet_irq); + return -ENODEV; + } + + pdata->intp_irq = client->irq; + if (!pdata->intp_irq) { + DRM_ERROR("Failed to get INTP IRQ\n"); + return -ENODEV; + } + + /* Power on chip */ + err = anx7625_poweron(anx7625); + if (err) + goto err_poweroff; + + /* Look for supported chip ID */ + err = Read_Reg(TCPC_INTERFACE, PRODUCT_ID_L, &idl); + if (err) + goto err_poweroff; + + err = Read_Reg(TCPC_INTERFACE, PRODUCT_ID_H, &idh); + if (err) + goto err_poweroff; + + err = Read_Reg(RX_P0, OCM_FW_VERSION, &version[0]); + if (err) + goto err_poweroff; + + err = Read_Reg(RX_P0, OCM_FW_REVERSION, &version[1]); + if (err) + goto err_poweroff; + + anx7625->chipid = (u8)idl | ((u8)idh << 8); + + for (i = 0; i < ARRAY_SIZE(anx7625_chipid_list); i++) { + if (anx7625->chipid == anx7625_chipid_list[i]) { + DRM_INFO("Found ANX%x (ver. %x%x) Transmitter\n", + anx7625->chipid, version[0], version[1]); + found = true; + break; + } + } + + if (!found) { + DRM_ERROR("ANX%x (ver. %x%x) not supported by this driver\n", + anx7625->chipid, version[0], version[1]); + err = -ENODEV; + goto err_poweroff; + } + + err = devm_request_threaded_irq(&client->dev, pdata->cdet_irq, NULL, + anx7625_cdet_threaded_handler, + IRQF_TRIGGER_RISING + | IRQF_TRIGGER_RISING + | IRQF_ONESHOT, + "anx7625-hpd", anx7625); + if (err) { + DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n", + err); + goto err_poweroff; + } + + err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL, + anx7625_intp_threaded_handler, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "anx7625-intp", anx7625); + if (err) { + DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err); + goto err_poweroff; + } + +#if IS_ENABLED(CONFIG_OF) + anx7625->bridge.of_node = client->dev.of_node; +#endif + + anx7625->bridge.funcs = &anx7625_bridge_funcs; + + drm_bridge_add(&anx7625->bridge); + + /* init connected status */ + anx7625->connected = + gpiod_get_value_cansleep(anx7625->pdata.gpiod_cdet); + + /* init hpd status */ + anx7625->sys_sta_bak = ReadReg(RX_P0, SYSTEM_STSTUS); + anx7625->hpd_status = (anx7625->sys_sta_bak & HPD_STATUS) ? + true : false; + + return 0; + +err_poweroff: + anx7625_poweroff(anx7625); + DRM_ERROR("Failed to load anx7625 driver: %d\n", err); + return err; +} + +static int anx7625_i2c_remove(struct i2c_client *client) +{ + struct anx7625 *anx7625 = i2c_get_clientdata(client); + + anx7625_poweroff(anx7625); + + drm_bridge_remove(&anx7625->bridge); + + kfree(anx7625->edid); + + return 0; +} + +static const struct i2c_device_id anx7625_id[] = { + { "anx7625", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, anx7625_id); + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id anx7625_id_match_table[] = { + { .compatible = "analogix,anx7625", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, anx7625_id_match_table); +#endif + +static struct i2c_driver anx7625_driver = { + .driver = { + .name = "anx7625", + .owner = THIS_MODULE, +#ifdef CONFIG_OF + .of_match_table = anx7625_id_match_table, +#endif + }, + .probe = anx7625_i2c_probe, + .remove = anx7625_i2c_remove, + .id_table = anx7625_id, +}; + +module_i2c_driver(anx7625_driver); +MODULE_DESCRIPTION("anx7625 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/analogix-anx7625.h b/drivers/gpu/drm/bridge/analogix-anx7625.h new file mode 100644 index 0000000000000000000000000000000000000000..2d78a133ed181ba6015d09c3545b568dc115a42d --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix-anx7625.h @@ -0,0 +1,1220 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Copyright(c) 2016, Analogix Semiconductor. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MI2_REG_H__ +#define __MI2_REG_H__ + +#define DEBUG_LOG_OUTPUT + +#define _BIT0 0x01 +#define _BIT1 0x02 +#define _BIT2 0x04 +#define _BIT3 0x08 +#define _BIT4 0x10 +#define _BIT5 0x20 +#define _BIT6 0x40 +#define _BIT7 0x80 + +/***************************************************************/ +/*Register definition of device address 0x58*/ + +#define PRODUCT_ID_L 0x02 +#define PRODUCT_ID_H 0x03 + +#define TCPC_ROLE_CONTROL 0x1a +#define TCPC_COMMAND 0x23 + +#define ANALOG_CTRL_0 0xA0 +#define DFP_OR_UFP _BIT6 + +#define INTR_ALERT_1 0xCC +#define INTR_SOFTWARE_INT _BIT3 +#define INTR_RECEIVED_MSG _BIT5 + +#define TCPC_CONTROL 0x19 +/* bit positions */ +#define DEBUG_ACCESSORY_CONTROL 4 +#define BIST_TEST_MODE 1 +#define PLUG_ORIENTATION 0 + +#define POWER_CONTROL 0x1C +/* bit positions */ +#define DISABLE_VBUS_VOLTAGE_MONITOR 6 +#define DISABLE_VOLTAGE_ALARMS 5 +#define VCONN_POWER_SUPPORTED_CONTROL 1 +#define ENABLE_VCONN 0 + + + +#define ANALOG_CTRL_1 0xA1 + + +#define ANALOG_CTRL_9 0xA9 +/* bit positions */ +#define SAFE_MODE 7 +#define TEST_USB_PD_EN 6 +#define TEST_EN_MI 5 +#define BMC_MODE1_SEL_VL 3 +#define BMC_MODE1_CAP 0 + +#define TCPC_SWITCH_0 0xB4 +/* bit positions */ +#define SWAP_AUX_R 7 +#define SWAP_AUX_T 6 +#define SW_SEL1_5 5 +#define SW_SEL1_4 4 +#define SW_SEL1_3 3 +#define SW_SEL1_2 2 +#define SW_SEL1_1 1 +#define SW_SEL1_0 0 + +#define TCPC_SWITCH_1 0xB5 +/* bit positions */ +#define SW_SEL2_5 5 +#define SW_SEL2_4 4 +#define SW_SEL2_3 3 +#define SW_SEL2_2 2 +#define SW_SEL2_1 1 +#define SW_SEL2_0 0 + +#define CHIP_POWER_CTRL 0xB6 +/* bit positions */ +#define PD_V10_ODFC 5 +#define PD_V10_DPTX 4 +#define PU_DPTX 3 +#define PU_HDMIRX 2 +#define ISO_EN_N_T 1 +#define PU_PART_DIG 0 + +#define HPD_CTRL 0xBD +/* bit positions */ +#define HPD_OUT 5 +#define R_HPD_UNPLUG 4 +#define R_HPD_PLUGIN 3 +#define R_HPD_IRQ 2 +#define R_HPD_MODE 1 +#define R_HPD_OUT 0 + + + + + +/*================ END of I2C Address 0x58 ========================*/ + + + +/***************************************************************/ +/*Register definition of device address 0x70*/ +#define I2C_ADDR_70_DPTX 0x70 + +#define SYSTEM 0x80 +/* bit positions */ +#define CHIP_AUTH_RESET 7 +#define BYPASS_CHIP_AUTH 6 +#define HDCP_VERSION 5 +#define HDCP2_FW_EN 4 +#define HDCP2_HPD 3 +#define DET_STA 2 +#define FORCE_DET 1 +#define DET_CTRL 0 + +#define DP_CONFIG_3 0x84 +/* bit positions */ +/* bit[7:5] are bpc - bits per channel */ +#define BPC 5 +/* 011: 12 bit */ +/* 010: 10 bit */ +/* 001: 8 bit */ +/* 000: 6 bit */ +/* other: reserved */ +#define YC_COEFF 4 /* ITU-R BT.601 or BT.709 */ +#define D_RANGE 3 +/* color space and chroma format: */ +/* 00: RGB, 01: YCbCr422, */ +/* 10: YCbCr444, */ +/* 11: reserved */ +/* dynamic range: video or graphics */ +#define COLOR_F 1 +#define SYNC_MODE 0 + +#define SP_TX_LINK_BW_SET_REG 0xA0 +#define SP_TX_LANE_COUNT_SET_REG 0xA1 + +#define BUF_DATA_COUNT 0xE4 + +#define AUX_CTRL 0xE5 +#define AUX_ADDR_7_0 0xE6 +#define AUX_ADDR_15_8 0xE7 +#define AUX_ADDR_19_16 0xE8 + +#define SP_TX_INT_STATUS1 0xF7/*DISPLAYPORT_INTERRUPT*/ +#define POLLING_ERR 0x10 + +#define DP_CONFIG_24 0xB0 +#define POLLING_EN 0x02 + +#define DP_CONFIG_20 0xB8 +/* bit positions */ +/* bit[7:6] are reserved */ +#define M_VID_DEBUG 5 +#define NEW_PRBS7 4 +#define DIS_FIFO_RST 3 +#define DISABLE_AUTO_RESET_ENCODER 2 +#define INSERT_ER 1 +#define PRBS31_EN 0 +#define M_VID_0 0xC0 + +#define M_VID_1 0xC1 + +#define M_VID_2 0xC2 + +#define N_VID_0 0xC3 + +#define N_VID_1 0xC4 + +#define N_VID_2 0xC5 + + +#define BUF_DATA_0 0xF0 +#define SP_TX_AUX_STATUS 0xE0 + +#define AUX_CTRL2 0xE9 +#define ADDR_ONLY_BIT 0x02 +#define AUX_OP_EN 0x01 +/***************************************************************/ +/*Register definition of device address 0x72*/ +#define AUX_RST 0x04 +#define RST_CTRL2 0x07 + +#define SP_TX_TOTAL_LINE_STA_L 0x24 +#define SP_TX_TOTAL_LINE_STA_H 0x25 +#define SP_TX_ACT_LINE_STA_L 0x26 +#define SP_TX_ACT_LINE_STA_H 0x27 +#define SP_TX_V_F_PORCH_STA 0x28 +#define SP_TX_V_SYNC_STA 0x29 +#define SP_TX_V_B_PORCH_STA 0x2A +#define SP_TX_TOTAL_PIXEL_STA_L 0x2B +#define SP_TX_TOTAL_PIXEL_STA_H 0x2C +#define SP_TX_ACT_PIXEL_STA_L 0x2D +#define SP_TX_ACT_PIXEL_STA_H 0x2E +#define SP_TX_H_F_PORCH_STA_L 0x2F +#define SP_TX_H_F_PORCH_STA_H 0x30 +#define SP_TX_H_SYNC_STA_L 0x31 +#define SP_TX_H_SYNC_STA_H 0x32 +#define SP_TX_H_B_PORCH_STA_L 0x33 +#define SP_TX_H_B_PORCH_STA_H 0x34 +#define SP_TX_VID_CTRL 0x84 + +#define VIDEO_BIT_MATRIX_12 0x4c +#define VIDEO_BIT_MATRIX_13 0x4d +#define VIDEO_BIT_MATRIX_14 0x4e +#define VIDEO_BIT_MATRIX_15 0x4f +#define VIDEO_BIT_MATRIX_16 0x50 +#define VIDEO_BIT_MATRIX_17 0x51 +#define VIDEO_BIT_MATRIX_18 0x52 +#define VIDEO_BIT_MATRIX_19 0x53 +#define VIDEO_BIT_MATRIX_20 0x54 +#define VIDEO_BIT_MATRIX_21 0x55 +#define VIDEO_BIT_MATRIX_22 0x56 +#define VIDEO_BIT_MATRIX_23 0x57 + +#define AUDIO_CHANNEL_STATUS_1 0xd0 +#define AUDIO_CHANNEL_STATUS_2 0xd1 +#define AUDIO_CHANNEL_STATUS_3 0xd2 +#define AUDIO_CHANNEL_STATUS_4 0xd3 +#define AUDIO_CHANNEL_STATUS_5 0xd4 +#define AUDIO_CHANNEL_STATUS_6 0xd5 +#define TDM_SLAVE_MODE 0x10 +#define I2S_SLAVE_MODE 0x08 + +#define AUDIO_CONTROL_REGISTER 0xe6 +#define TDM_TIMING_MODE 0x08 + +#define I2C_ADDR_72_DPTX 0x72 + + +#define POWER_CONTROL_0 0x05 +/* bit positions */ +#define PD_REG 7 +#define PD_HDCP2 6 +#define PD_HDCP 5 +#define PD_AUDIO 4 +#define PD_VIDEO 3 +#define PD_LINK 2 +#define PD_TOTAL 1 +/* bit[0] is reserved */ + +#define RESET_CONTROL_0 0x06 +/* bit positions */ +#define MISC_RST 7 +#define VID_CAP_RST 6 +#define VID_FIFO_RST 5 +#define AUD_FIFO_RST 4 +#define AUD_RST 3 +#define HDCP_RST 2 +#define SW_RST 1 +#define HW_RST 0 + +#define VIDEO_CONTROL_0 0x08 +/* bit positions */ +#define VIDEO_EN 7 +/* bit[6] is reserved */ +/* DE re-generation mode: 1=enabled, 0=disabled */ +#define DE_GEN 5 +/* De YC-MUX: 1=enabled, 0=disabled */ +#define DEMUX 4 +/* bit[3] is reserved in MI-2 */ +#define HALF_FREQUENCY_MODE 3 +/* alwaysa set bit[2] to 1 in MI-2 */ +#define DDR_MODE 2 +#define DDR_CTRL 1 +#define NEGEDGE_LATCH 0 + + +#define VIDEO_CONTROL_2 0x0A +/* bit positions */ + /* YCbCr cofficients of input video: 1=ITU709, 0=ITU601 */ +#define IN_YC_COEFFI 7 + /* reserved in MI-2 */ +#define HDMI_HPD 6 +/* dalay one of DE: 1=enabled, 0=disabled */ +#define DE_DELAY 5 +/* update enable control of video format parameter in capture block: */ +/* 1=enabled, 0=disabled */ +#define VID_CHK_UPDATE_EN 4 +#define VIDEO_COLOR_SWAP 1 /* bit[3:1]: reserved in MI-2 */ +#define VIDEO_BIT_SWAP 0 /* reserved in MI-2 */ + +#define VIDEO_CONTROL_4 0x0C +/* bit positions */ +#define CSC_STD_SEL 7 +#define XVYCC_RNG_LMT 6 +#define RANGE_Y2R 5 /* reserved in MI-2 */ +#define CSPACE_Y2R 4 /* reserved in MI-2 */ +#define RGB_RNG_LMT 3 +#define YC_RNG_LMT 2 +#define RANGE_R2Y 1 +#define CSPACE_R2Y 0 + +#define VIDEO_CONTROL_5 0x0D +/* bit positions */ +#define TEST_PATTERN_EN 7 /* video BIST: 1=enabled, 0=disabled */ +#define VIDEO_PROCESS_EN 6 +#define IN_PIXEL_REPEAT 4 /* bit[5:4]: reserved in MI-2 */ +/* up sampling mode: 1=FIR filter, 0=copy sample */ +#define VID_US_MODE 3 +/* down sampling mode: 1=FIR filter, 0=skip sample */ +#define VID_DS_MODE 2 +/* reserved in MI-2 */ +#define UP_SAMPLE 1 +/* 4:4:4 to 4:2:2 down sampling: 1=enabled, 0=disabled */ +#define DOWN_SAMPLE 0 + + + +#define VIDEO_CONTROL_7 0x0F +/* bit positions */ +#define VID_HRES_TH 4 +#define VID_VRES_TH 0 + + +#define TOTAL_LINE_CFG_L 0x12 +#define TOTAL_LINE_CFG_H 0x13 /* note: bit[7:6] are reserved */ +#define ACTIVE_LINES_L 0x14 +#define ACTIVE_LINES_H 0x15 /* note: bit[7:6] are reserved */ +#define VERTICAL_FRONT_PORCH 0x16 +#define VERTICAL_SYNC_WIDTH 0x17 +#define VERTICAL_BACK_PORCH 0x18 + +#define HORIZONTAL_TOTAL_PIXELS_L 0x19 +#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* note: bit[7:6] are reserved */ +#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B +#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* note: bit[7:6] are reserved */ +#define HORIZONTAL_FRONT_PORCH_L 0x1D +#define HORIZONTAL_FRONT_PORCH_H 0x1E /* note: bit[7:4] are reserved */ +#define HORIZONTAL_SYNC_WIDTH_L 0x1F +#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* note: bit[7:4] are reserved */ +#define HORIZONTAL_BACK_PORCH_L 0x21 +#define HORIZONTAL_BACK_PORCH_H 0x22 /* note: bit[7:4] are reserved */ + + + + +#define NFCU_02 0xC8 +#define NFCU_03 0xC9 +#define NFCU_04 0xCA +#define NFCU_05 0xCB + +/*======================= END of I2C Address 0x72 ===============*/ +/***************************************************************/ +/*Register definition of device address 0x7A*/ +#define DPPLL_REG4 0xF9 +/* bit positions */ +/* DPPLL_REG4[7:4] is not used */ +#define atest_enable 3 /* DPPLL_REG4[3] is analog signal test enable */ +/* 0: disable test analog signal + * 1: enable test analog signal + */ +#define dtest_enable 2 /* DPPLL_REG4[2] is digital signal test enable */ +/* 0: disable test digital signal + * 1: enable test digital signal + */ +#define test_sig_sel 0 /* DPPLL_REG4[1:0] is test signal selection */ +/* when atest_enable = 1, + * 00: vreg_1p45 + 01: duty_meas (decided by duty_outputen and duty_sel<1:0>) + 10: avdd10 + 11: vcox + */ +/* when dtest_enable = 1, + * 00: refbx (\wr 7e 42 f; \wr 7e 81 e1; \wr 7a f9 4 + ==> Now on WS pin, you'd see a 27 MHz clock.) + 01: ckfbx (\wr 7e 42 f; \wr 7e 81 e1; \wr 7a f9 5 + ==> Now on WS pin, you'd see a 27 MHz signal. + However, this signal is neither square wave nor sine wave, + it should be narrow pulses.) + 10: ck_vco + 11: avss10 + */ + + + + +/*================= END of I2C Address 0x7A =====================*/ + +/***************************************************************/ +/*Register definition of device address 0x7e*/ + +#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E + +#define R_BOOT_RETRY 0x00 +/* bit positions */ +#define SRAM_CS 4 +#define FUSE_WRITE 3 +#define FUSE_STATUS 2 +#define BOOT_RETRY_NUM 0 + +#define R_RAM_CTRL 0x05 +/* bit positions */ +#define FLASH_DONE 7 +#define BOOT_LOAD_DONE 6 +#define CRC_OK 5 +#define LOAD_DONE 4 +#define O_RW_DONE 3 +#define FUSE_BUSY 2 +#define DECRYPT_EN 1 +#define LOAD_START 0 + + +#define FUSE_DATA_IN7_0 0x0A +#define FUSE_DATA_IN15_8 0x0B +#define FUSE_DATA_IN23_16 0x0C + + +/* note: The actual implementation doesn't match register spec v0.5 */ +/* - High byte and low byte are reversed. */ +#define FLASH_ADDR_H 0x0F +#define FLASH_ADDR_L 0x10 + +/* note: The actual implementation doesn't match register spec v0.5 */ +/* - High byte and low byte are reversed. */ +#define EEPROM_ADDR_H 0x0F +#define EEPROM_ADDR_L 0x10 + + +#define FLASH_WRITE_BUF_BASE_ADDR 0x11 +#define FLASH_WRITE_MAX_LENGTH 0x20 + +#define EEPROM_WRITE_BUF_BASE_ADDR 0x11 +#define EEPROM_WRITE_MAX_LENGTH 0x10 + +#define AUTO_PD_MODE 0x2f/*0x6e*/ +#define AUTO_PD_ENABLE 0x02 +#define MAX_VOLTAGE_SETTING 0x29/*0xd0*/ +#define MAX_POWER_SETTING 0x2A/*0xd1*/ +#define MIN_POWER_SETTING 0x2B/*0xd2*/ +#define RDO_MAX_VOLTAGE 0x2C /* 0x7E:0x2C // 0xD3*/ +#define RDO_MAX_POWER 0x2D /* 0x7E:0x2D // 0xD4*/ +#define RDO_MAX_CURRENT 0x2E /* 0x7E:0x2E // 0xD5*/ + +/* note: 0 means 1 byte, x means (x+1) bytes; max x=31 */ +/* note: The actual implementation doesn't match register spec v0.5 */ +/* - High byte and low byte are reversed. */ +#define FLASH_LEN_H 0x31 +#define FLASH_LEN_L 0x32 + +/* note: The actual implementation doesn't match register spec v0.5 */ +/* - High byte and low byte are reversed. */ +#define EEPROM_LEN_H 0x31 +#define EEPROM_LEN_L 0x32 + +#define OCM_FW_VERSION 0x31 +#define OCM_FW_REVERSION 0x32 + +#define R_FLASH_RW_CTRL 0x33 +/* bit positions */ +#define READ_DELAY_SELECT 7 +#define GENERAL_INSTRUCTION_EN 6 +#define FLASH_ERASE_EN 5 +#define RDID_READ_EN 4 +#define REMS_READ_EN 3 +#define WRITE_STATUS_EN 2 +#define FLASH_READ 1 +#define FLASH_WRITE 0 + +/* the value to be written into Flash status register */ +#define STATUS_REGISTER_IN 0x34 +/* Flash REMS READ DATA (depend on Flash vendor definition) */ +#define REMS_READ_ADDR 0x35 + + + +/* This register is for single-byte commands only, i.e. */ +/* in Table 2 in GD25D10B datasheet, all the cells */ +/* following "Byte 1" in the row are blank. */ +/* For all other multi-byte commands, hardware has a wrapper, */ +/* and software shouldn't write the commands in this register directly. */ +#define GENERAL_INSTRUCTION_TYPE 0x36 +/* Flash operation commands - refer to Table 2 in GD25D10B/05B datasheet */ +#define WRITE_ENABLE 0x06 +#define WRITE_DISABLE 0x04 +#define DEEP_POWER_DOWN 0xB9 +#define DEEP_PD_REL 0xAB /* Release from Deep Power-Down */ +#define CHIP_ERASE_A 0xC7 +#define CHIP_ERASE_B 0x60 + +#define FLASH_ERASE_TYPE 0x37 +#define SECTOR_ERASE 0x20 +#define BLOCK_ERASE_32K 0x52 +#define BLOCK_ERASE_64K 0xD8 + +#define STATUS_REGISTER 0x38 /* Flash status register readback value */ +/* bit positions */ +/* Status Register Protect bit, operates in conjunction with */ +/* the Write Protect (WP#) signal */ +/* The SRP bit and WP signal set the device to the Hardware Protected mode.*/ +/* When the SRP = 1, and WP# signal is Low, the non-volatile bits */ +/* of the Status Register (SRP, BP2, BP1, BP0) */ +/* become read-only and the Write Status Register (WRSR) */ +/* instruction is not executed. */ +/* The default value of SRP bit is 0. */ +#define SRP0 7 + +/* Block Protect bits */ +/* These bits are non-volatile. They define the size of the area */ +/* to be software protected against Program and Erase commands. */ +/* These bits are written with the Write Status Register (WRSR) command. */ +/* When the (BP4, BP3, BP2, BP1, BP0) bits are set to 1, */ +/* the relevant memory area becomes protected against Page Program (PP), */ +/* Sector Erase (SE), and Block Erase (BE) commands. */ +/* Refer to Table 1.0 in GD25D10B/05B datasheet for details. */ +/* The (BP4, BP3, BP2, BP1, BP0) bits can be written provided that */ +/* the Hardware Protected mode has not been set. */ +#define BP4 6 +#define BP3 5 +#define BP2 4 +#define BP1 3 +#define BP0 2 + +/* Write Enable Latch bit, indicates the status of */ +/* the internal Write Enable Latch. */ +/* When WEL bit is 1, the internal Write Enable Latch is set. */ +/* When WEL bit is 0, the internal Write Enable Latch is reset, */ +/* and Write Status Register, Program or */ +/* Erase commands are NOT accepted. */ +/* The default value of WEL bit is 0. */ +#define WEL 1 + +/* Write In Progress bit, indicates whether the memory is busy */ +/* in program/erase/write status register progress. */ +/* When WIP bit is 1, it means the device is busy in */ +/* program/erase/write status register progress. */ +/* When WIP bit is 0, it means the device is not in */ +/* program/erase/write status register progress. */ +/* The default value of WIP bit is 0. */ +#define WIP 0 + +#define MANUFACTURE_ID 0x39 +#define DEVICE_ID 0x3A +#define MEM_TYPE 0x3B +#define CAPACITY 0x3C + + + +#define XTAL_FRQ_SEL 0x3F +/* bit field positions */ +#define XTAL_FRQ_SEL_POS 5 +/* bit field values */ +#define XTAL_FRQ_19M2 (0 << XTAL_FRQ_SEL_POS) +#define XTAL_FRQ_27M (4 << XTAL_FRQ_SEL_POS) + +#define R_DSC_CTRL_0 0x40 +/* bit positions */ +#define READ_STATUS_EN 7 +#define CLK_1MEG_RB 6 /* 1MHz clock reset; 0=reset, 0=reset release */ +#define DSC_BIST_DONE 1 /* bit[5:1]: 1=DSC MBIST pass */ +#define DSC_EN 0x01 /* 1=DSC enabled, 0=DSC disabled */ + +#define INTERFACE_INTR_MASK 0x43 +#define RECEIVED_MSG_MASK 1 +#define RECEIVED_ACK_MASK 2 +#define VCONN_CHANGE_MASK 4 +#define VBUS_CHANGE_MASK 8 +#define CC_STATUS_CHANGE_MASK 16 +#define DATA_ROLE_CHANGE_MASK 32 + +#define INTERFACE_CHANGE_INT 0x44 +#define RECEIVED_MSG 0x01 +#define RECEIVED_ACK 0x02 +#define VCONN_CHANGE 0x04 +#define VBUS_CHANGE 0x08 +#define CC_STATUS_CHANGE 0x10 +#define DATA_ROLE_CHANGE 0x20 +#define PR_CONSUMER_GOT_POWER 0x40 +#define HPD_STATUS_CHANGE 0x80 + +#define SYSTEM_STSTUS 0x45 +/*0: VCONN off; 1: VCONN on*/ +#define VCONN_STATUS 0x04 +/*0: vbus off; 1: vbus on*/ +#define VBUS_STATUS 0x08 +/*0: host; 1:device*/ +#define DATA_ROLE 0x20 + +#define HPD_STATUS 0x80 +#define NEW_CC_STATUS 0x46 + +#define GPIO_CTRL_0 0x47 +/* bit positions */ +#define GPIO_3_DATA 7 +#define GPIO_3_OEN 6 +#define GPIO_2_DATA 5 +#define GPIO_2_OEN 4 +#define GPIO_1_DATA 3 +#define GPIO_1_OEN 2 +#define GPIO_0_DATA 1 +#define GPIO_0_OEN 0 + +#define GPIO_CTRL_1 0x48 +/* bit positions */ +/* bit[7:4] are reserved */ +/* When bonding with Flash, this register will control the flash WP pin */ +#define FLASH_WP 3 +/* 0 = write protect, 1 = no write protect*/ +#define WRITE_UNPROTECTED 1 +#define WRITE_PROTECTED 0 +/* bit[2:0] are reserved */ + + +#define GPIO_CTRL_2 0x49 +/* bit positions */ +#define HPD_SOURCE 6 +#define GPIO_10_DATA 5 +#define GPIO_10_OEN 4 +#define GPIO_9_DATA 3 +#define GPIO_9_OEN 2 +#define GPIO_8_DATA 1 +#define GPIO_8_OEN 0 +#define GPIO_STATUS_1 0x4B +/* bit positions */ +#define OCM_RESET 2 +#define INTERRUPT_POLARITY 1 +#define INTERRUPT_OPEN_DRAIN 0 +#define TOTAL_PIXEL_L_7E 0x50 +#define TOTAL_PIXEL_H_7E 0x51 /* note: bit[7:6] are reserved */ + +#define ACTIVE_PIXEL_L_7E 0x52 +#define ACTIVE_PIXEL_H_7E 0x53 /* note: bit[7:6] are reserved */ + +#define HORIZON_FRONT_PORCH_L_7E 0x54 +/* note: bit[7:4] are EEPROM Key 0, which is not used in MI-2 */ +#define HORIZON_FRONT_PORCH_H_7E 0x55 + +#define HORIZON_SYNC_WIDTH_L_7E 0x56 +#define HORIZON_SYNC_WIDTH_H_7E 0x57 + +#define HORIZON_BACK_PORCH_L_7E 0x58 +#define HORIZON_BACK_PORCH_H_7E 0x59 + +#define FLASH_READ_BUF_BASE_ADDR 0x60 +#define FLASH_READ_MAX_LENGTH 0x20 + +#define EEPROM_READ_BUF_BASE_ADDR 0x60 +#define EEPROM_READ_MAX_LENGTH 0x10 + +#define DSC_REG_ADDR_H_F_PORCH_H 0x55 +/* bit positions */ +#define KEY_0 4 +#define HORIZON_FRONT_PORCH_H 0 + +#define DSC_REG_ADDR_H_SYNC_CFG_H 0x57 +/* bit positions */ +#define KEY_1 4 +#define HORIZON_SYNC_WIDTH_H 0 + +#define DSC_REG_ADDR_H_PORCH_CFG_H 0x59 +/* bit positions */ +#define KEY_2 4 +#define HORIZON_BACK_PORCH_H 0 +#define R_I2C_0 0x80 +/* bit positions */ +#define COL_CORE_RESET 7 +#define I2C_ASSERT_DELAY 0 + +#define R_I2C_1 0x81 +/* bit positions */ +#define RATIO 6 +#define DEBUG_OE 5 +#define ADDR_80H_SEL 4 +#define WRITE_DELAY_COUNTER 0 + +#define OCM_DEBUG_REG_8 0x88 +/* bit positions */ +#define STOP_MAIN_OCM 6 +#define AP_AUX_ADDR_7_0 0x11 +#define AP_AUX_ADDR_15_8 0x12 +#define AP_AUX_ADDR_19_16 0x13 + +/* note: bit[0:3] AUX status, bit 4 op_en, bit 5 address only */ +#define AP_AUX_CTRL_STATUS 0x14 +#define AP_AUX_CTRL_OP_EN 0x10 +#define AP_AUX_CTRL_ADDRONLY 0x20 + +#define AP_AUX_BUFF_START 0x15 +#define PIXEL_CLOCK_L 0x25 +#define PIXEL_CLOCK_H 0x26 + +#define AP_AUX_COMMAND 0x27 /*com+len*/ +/*bit 0&1: 3D video structure */ +/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full)*/ +#define AP_AV_STATUS 0x28 +#define AP_VIDEO_CHG _BIT2 +#define AP_AUDIO_CHG _BIT3 +#define AP_MIPI_MUTE _BIT4 /* 1:MIPI input mute, 0: ummute*/ +#define AP_MIPI_RX_EN _BIT5 /* 1: MIPI RX input in 0: no RX in*/ +#define AP_DISABLE_PD _BIT6 +#define AP_DISABLE_DISPLAY _BIT7 +/***************************************************************/ +/*Register definition of device address 0x54*/ +#define TOTAL_LINES_L 0x12 +#define TOTAL_LINES_H 0x13 +#define ACTIVE_LINES_L 0x14 +#define ACTIVE_LINES_H 0x15 /* note: bit[7:6] are reserved */ +#define VERTICAL_FRONT_PORCH 0x16 +#define VERTICAL_SYNC_WIDTH 0x17 +#define VERTICAL_BACK_PORCH 0x18 + +#define HORIZONTAL_TOTAL_PIXELS_L 0x19 +#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* note: bit[7:6] are reserved */ +#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B +#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* note: bit[7:6] are reserved */ +#define HORIZONTAL_FRONT_PORCH_L 0x1D +#define HORIZONTAL_FRONT_PORCH_H 0x1E /* note: bit[7:4] are reserved */ +#define HORIZONTAL_SYNC_WIDTH_L 0x1F +#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* note: bit[7:4] are reserved */ +#define HORIZONTAL_BACK_PORCH_L 0x21 +#define HORIZONTAL_BACK_PORCH_H 0x22 /* note: bit[7:5] are reserved */ + +#define R_PPS_REG_0 0x80 +/***************************************************************/ +/*Register definition of device address 0x84*/ +#define MIPI_PHY_CONTROL_1 0x01 +/* bit positions */ +#define MIPI_PD_LPCD_3 7 +#define MIPI_PD_LPCD_2 6 +#define MIPI_PD_LPCD_1 5 +#define MIPI_PD_LPCD_0 4 +#define MIPI_PD_3 3 +#define MIPI_PD_2 2 +#define MIPI_PD_1 1 +#define MIPI_PD_0 0 + + +#define MIPI_PHY_CONTROL_3 0x03 +/* bit positions */ +#define MIPI_HS_PWD_CLK 7 +#define MIPI_HS_RT_CLK 6 +#define MIPI_PD_CLK 5 +#define MIPI_CLK_RT_MANUAL_PD_EN 4 +#define MIPI_CLK_HS_MANUAL_PD_EN 3 +#define MIPI_CLK_DET_DET_BYPASS 2 +#define MIPI_CLK_MISS_CTRL 1 +#define MIPI_PD_LPTX_CH_MANUAL_PD_EN 0 + + +#define MIPI_LANE_CTRL_0 0x05 +/* bit positions */ +#define MIPI_DATA_REVERSE 7 +#define MIPI_SYNC_LEAD_REVERSE 6 +#define MIPI_FORCE_TIME_LPX 5 +#define MIPI_BYPASS_WAKE_UP 4 +#define MIPI_DESKEW_EN 3 +#define MIPI_EOTP_EN 2 +#define MIPI_ACTIVE_LANE 0 +/* bit[1:0] - 00: 1 lane, 01: 2 lanes, 10: 3 lanes, 11: 4 lanes */ +#define MIPI_TIME_HS_PRPR 0x08 + +/* After MIPI RX protocol layer received this many video frames, */ +/* protocol layer starts to reconstruct video stream from PHY */ +#define MIPI_VIDEO_STABLE_CNT 0x0A + + +#define MIPI_LANE_CTRL_10 0x0F +/* bit positions */ +#define MIPI_SW_RESET_N 7 +#define MIPI_ERROR_REPORT_EN 6 +#define MIPI_LANE_SWAP 4 +/* bit[5:4] - Debug mode to swap MIPI data lanes */ +#define MIPI_POWER_DOWN 3 +#define MIPI_ECC_ERR_CLR 2 +#define MIPI_PD_LPRX_CLK 1 +#define MIPI_9B8B_EN 0 + +#define MIPI_RX_REG0 0x10 +#define MIPI_RX_REG1 0x11 +#define MIPI_RX_REG2 0x12 +#define MIPI_RX_REG3 0x13 +#define MIPI_RX_REG4 0x14 +/* bit positions */ +#define hsrx_rterm 4 +/* MIPI_RX_REGx[7:4] is hsrx_rterm[3:0], */ +/* x=0~4; x=0~3 are for data lanes 0~3, x=4 is for clock */ + +/* defines the HS RX termination impedance: */ +/* 0000: 125 Ohm */ +/* 0110: 100 Ohm (default) */ +/* 1111: 75 Ohm */ +#define sel_hs_dly 1 +/* MIPI_RX_REGx[3:1] is sel_hs_dly[2:0],*/ +/* x=0~4; x=0~3 are for data lanes 0~3, x=4 is for clock */ +/* defines the hsrx data delay; adjust the data/clock edge timing*/ +/* 000: no timing adjust*/ +/* 001: add 80ps delay */ +/* ... */ +/* 100: add 4*80ps delay (default)*/ +/* ... */ +/* 111: add 7*80ps delay */ +/* For every step, the adjust delay is about 80ps @TT, 130ps@SS, 54ps@FF */ +/* note: The descriptions above come from an internal documentation */ +/* mi2-analog_pinlist_v1.4.xlsx. */ +/* Test shows the actual delay is around 110ps per step for TT chips.*/ +/* MIPI_RX_REGx[0] is reserved. */ + +#define MIPI_RX_REG5 0x15 +/* bit positions */ +#define ref_sel_lpcd 6 /* MIPI_RX_REG5[7:6] is ref_sel_lpcd[1:0] */ +/* define the lpcd reference voltage */ +/* 00: 250 mV */ +/* 01: 300 mV (default) */ +/* 10: 350 mV */ +/* 11: 400 mV */ +#define ref_sel_lprx 4 /* MIPI_RX_REG5[5:4] is ref_sel_lprx[1:0] */ +/* define the lprx reference voltage */ +/* 00: 650 mV */ +/* 01: 700 mV (default) */ +/* 10: 750 mV */ +/* 11: 800 mV */ +/* MIPI_RX_REG5[3] is reserved. */ +#define sel_lptx_term 0 /* MIPI_RX_REG5[2:0] is sel_lptx_term[2:0] */ +/* define the lptx termination (thermal code) */ +/* 111: 120 Ohm */ +/* 011: 150 Ohm */ +/* 001: 200 Ohm (default) */ +/* 000: 310 Ohm */ + +#define MIPI_RX_REG7 0x17 +/* bit positions */ +#define dpi_ck_delay 5 /* MIPI_RX_REG7[7:5] is dpi ck delay config */ +/* 0xx no delay */ +/* 100 120 ps */ +/* 101 120*2 ps (default) */ +/* 110 120*3 ps */ +/* 111 120*4 ps */ +/* MIPI_RX_REG7[4] is test enable: 1: enable, 0: disable */ +#define test_enable 4 +/* MIPI_RX_REG7[3:0] is test signal selection */ +#define test_signal_sel 0 +/* dtesto / atesto */ +/* 0000 pd_rt_ch2 / vref_lprx */ +/* 0001 pd_lpcd_ch2 / vref_lpcd */ +/* 0010 pd_lprx_ch2 / vgate_lp */ +/* 0011 pd_lptx_ch2 / avss */ +/* 0100 swap_ch2 */ +/* 0101 pd_rx */ +/* 0110 pd_ch2 */ +/* 0111 pd_hsrx_ch2 */ +/* 1000 lp_outn_ch2 */ +/* 1001 lp_outp_ch2 */ +/* 1010 lpcd_outn_ch2 */ +/* 1011 lpcd_outp_ch2 */ +/* 1100 data_ch2<2> */ +/* 1101 data_ch2<1> */ +/* 1110 data_ch2<0> */ +/* 1111 ck_byte_r */ + +#define MIPI_DIGITAL_ADJ_1 0x1B +/* bit positions */ +/* bit[7:4]: Integral part ratio of the adjust loop */ +#define DIFF_I_RATIO 4 +/* 0~7: 1/(2^(n+3)) */ +/* >= 8: reserved */ +/* bit[3:0]: The total adjust loop ratio to the feedback block */ +#define DIFF_K_RATIO 0 +/* 0~0xF: 1/(2^n) */ + +#define MIPI_PLL_M_NUM_23_16 0x1E +#define MIPI_PLL_M_NUM_15_8 0x1F +#define MIPI_PLL_M_NUM_7_0 0x20 +#define MIPI_PLL_N_NUM_23_16 0x21 +#define MIPI_PLL_N_NUM_15_8 0x22 +#define MIPI_PLL_N_NUM_7_0 0x23 + +#define MIPI_DIGITAL_PLL_6 0x2A +/* bit positions */ +/* bit[7:6]: VCO band control, only effective */ +/* when MIPI_PLL_FORCE_BAND_EN (0x84:0x2B[6]) is 1 */ +#define MIPI_PLL_BAND_REG 6 +/* f_vco frequency: */ +/* band 0: 1 ~ 1.15 GHz */ +/* band 1: 1.15 ~ 1.3 GHz */ +/* band 2: 1.3 ~ 1.5 GHz */ +/* band 3: 1.5 ~ 2.0 GHz */ + +/* band 3 is usable but not recommended, as using band 3: */ +/* a) The power consumption is higher. */ +/* b) For SS corner chips, VCO may not work at 2GHz. */ +/* bit 5 is reserved */ +#define MIPI_M_NUM_READY 0x10 +#define MIPI_N_NUM_READY 0x08 +#define STABLE_INTEGER_CNT_EN 0x04 +#define MIPI_PLL_TEST_BIT 0 +/* bit[1:0]: test point output select - */ +/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */ + +#define MIPI_DIGITAL_PLL_7 0x2B +/* bit positions */ +#define MIPI_PLL_FORCE_N_EN 7 +#define MIPI_PLL_FORCE_BAND_EN 6 + +#define MIPI_PLL_VCO_TUNE_REG 4 +/* bit[5:4]: VCO metal capacitance - */ +/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */ +#define MIPI_PLL_VCO_TUNE_REG_VAL 0x30 +/* bit[5:4]: VCO metal capacitance */ + +#define MIPI_PLL_PLL_LDO_BIT 2 +/* bit[3:2]: vco_v2i power - */ +/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */ +#define MIPI_PLL_RESET_N 0x02 +#define MIPI_FRQ_FORCE_NDET 0 + + + +#define MIPI_ALERT_CLR_0 0x2D +/* bit positions */ +#define HS_link_error_clear 7 +/* This bit itself is S/C, and it clears 0x84:0x31[7] */ + + +#define MIPI_ALERT_OUT_0 0x31 +/* bit positions */ +#define check_sum_err_hs_sync 7 +/* This bit is cleared by 0x84:0x2D[7] */ + +#define MIPI_DIGITAL_PLL_8 0x33 +/* bit positions */ +#define MIPI_POST_DIV_VAL 4 +/* n means divided by (n+1), n = 0~15 */ +#define MIPI_EN_LOCK_FRZ 3 +#define MIPI_FRQ_COUNTER_RST 2 +#define MIPI_FRQ_SET_REG_8 1 +/* bit 0 is reserved */ + +#define MIPI_DIGITAL_PLL_9 0x34 + +#define MIPI_DIGITAL_PLL_16 0x3B +/* bit positions */ +#define MIPI_FRQ_FREEZE_NDET 7 +#define MIPI_FRQ_REG_SET_ENABLE 6 +#define MIPI_REG_FORCE_SEL_EN 5 +#define MIPI_REG_SEL_DIV_REG 4 +#define MIPI_REG_FORCE_PRE_DIV_EN 3 +/* bit 2 is reserved */ +#define MIPI_FREF_D_IND 1 +#define REF_CLK_27000kHz 1 +#define REF_CLK_19200kHz 0 +#define MIPI_REG_PLL_PLL_TEST_ENABLE 0 + +#define MIPI_DIGITAL_PLL_18 0x3D +/* bit positions */ +#define FRQ_COUNT_RB_SEL 7 +#define REG_FORCE_POST_DIV_EN 6 +#define MIPI_DPI_SELECT 5 +#define SELECT_DSI 1 +#define SELECT_DPI 0 +#define REG_BAUD_DIV_RATIO 0 + +#define H_BLANK_L 0x3E +/* for DSC only */ + +#define H_BLANK_H 0x3F +/* for DSC only; note: bit[7:6] are reserved */ + +#define MIPI_SWAP 0x4A +/* bit positions */ +#define MIPI_SWAP_CH0 7 +#define MIPI_SWAP_CH1 6 +#define MIPI_SWAP_CH2 5 +#define MIPI_SWAP_CH3 4 +#define MIPI_SWAP_CLK 3 +/* bit[2:0] are reserved */ + +#define MIPI_HS_FIRST_PACKET_HEADER 0x67 +#define MIPI_HS_FIRST_PACKET_WIDTH_L 0x68 +#define MIPI_HS_FIRST_PACKET_WIDTH_H 0x69 +#define MIPI_HS_FIRST_PACKET 0x6A + + + +/*========================= END of I2C Address 0x84 ================*/ + + +/* DEV_ADDR = 0x7A or 0x7B , MIPI Rx Registers*/ +/* DEV_ADDR = 0x7A or 0x7B , MIPI Rx Registers*/ +#define MIPI_ANALOG_PWD_CTRL0 0x00 +#define MIPI_ANALOG_PWD_CTRL1 0x01 +#define MIPI_ANALOG_PWD_CTRL2 0x02 + + + +/*DPCD regs*/ +#define DPCD_DPCD_REV 0x00 +#define DPCD_MAX_LINK_RATE 0x01 +#define DPCD_MAX_LANE_COUNT 0x02 + + +/***************************************************************/ + +/*Comands status*/ +enum interface_status { + CMD_SUCCESS, + CMD_REJECT, + CMD_FAIL, + CMD_BUSY, + CMD_STATUS +}; + +enum PD_MSG_TYPE { + TYPE_PWR_SRC_CAP = 0x00, + TYPE_PWR_SNK_CAP = 0x01, + TYPE_DP_SNK_IDENTITY = 0x02, + TYPE_SVID = 0x03, + TYPE_GET_DP_SNK_CAP = 0x04, + TYPE_ACCEPT = 0x05, + TYPE_REJECT = 0x06, + TYPE_PSWAP_REQ = 0x10, + TYPE_DSWAP_REQ = 0x11, + TYPE_GOTO_MIN_REQ = 0x12, + TYPE_VCONN_SWAP_REQ = 0x13, + TYPE_VDM = 0x14, + TYPE_DP_SNK_CFG = 0x15, + TYPE_PWR_OBJ_REQ = 0x16, + TYPE_PD_STATUS_REQ = 0x17, + TYPE_DP_ALT_ENTER = 0x19, + TYPE_DP_ALT_EXIT = 0x1A, + TYPE_GET_SNK_CAP = 0x1B, + TYPE_SOP_PRIME = 0x1C, + TYPE_SOP_DOUBLE_PRIME = 0x1D, + TYPE_RESPONSE_TO_REQ = 0xF0, + TYPE_SOFT_RST = 0xF1, + TYPE_HARD_RST = 0xF2, + TYPE_RESTART = 0xF3, + TYPE_EXT_SRC_CAP = 0xA1, /* Source_Capabilities_Extended*/ + TYPE_EXT_SRC_STS = 0xA2, /* Source_Status*/ + TYPE_EXT_GET_BATT_CAP = 0xA3, /* Get_Battery_Cap*/ + TYPE_EXT_GET_BATT_STS = 0xA4, /* Get_Battery_ Status*/ + TYPE_EXT_BATT_CAP = 0xA5, /* Battery_Capabilities*/ + TYPE_EXT_GET_MFR_INFO = 0xA6, /* Get_Manufacturer_Info*/ + TYPE_EXT_MFR_INFO = 0xA7, /* Manufacturer_Info*/ + TYPE_EXT_PDFU_REQUEST = 0xA8, /* FW update Request*/ + TYPE_EXT_PDFU_RESPONSE = 0xA9, /* FW update Response*/ + TYPE_EXT_BATT_STS = 0xAA, /* PD_DATA_BATTERY_STATUS*/ + TYPE_EXT_ALERT = 0xAB, /* PD_DATA_ALERT*/ + TYPE_EXT_NOT_SUPPORTED = 0xAC, /* PD_CTRL_NOT_SUPPORTED*/ + TYPE_EXT_GET_SRC_CAP = 0xAD, /* PD_CTRL_GET_SOURCE_CAP_EXTENDED*/ + TYPE_EXT_GET_SRC_STS = 0xAE, /* PD_CTRL_GET_STATUS*/ + TYPE_EXT_FR_SWAP = 0xAF, /* PD_CTRL_FR_SWAP*/ + TYPE_FR_SWAP_SIGNAL = 0xB0, /* Fast Role Swap signal*/ +}; + +/* PDO : Power Data Object + * 1. The vSafe5V Fixed Supply Object shall always be the first object. + * 2. The remaining Fixed Supply Objects, + * if present, shall be sent in voltage order; lowest to highest. + * 3. The Battery Supply Objects, + * if present shall be sent in Minimum Voltage order; lowest to highest. + * 4. The Variable Supply (non battery) Objects, + * if present, shall be sent in Minimum Voltage order; lowest to highest. + */ +#define PDO_TYPE_FIXED ((u32)0 << 30) +#define PDO_TYPE_BATTERY ((u32)1 << 30) +#define PDO_TYPE_VARIABLE ((u32)2 << 30) +#define PDO_TYPE_MASK ((u32)3 << 30) +#define PDO_FIXED_DUAL_ROLE ((u32)1 << 29) /* Dual role device */ +#define PDO_FIXED_SUSPEND ((u32)1 << 28) /* USB Suspend supported */ +#define PDO_FIXED_EXTERNAL ((u32)1 << 27) /* Externally powered */ +#define PDO_FIXED_COMM_CAP ((u32)1 << 26) /* USB Communications Capable */ +#define PDO_FIXED_DATA_SWAP ((u32)1 << 25) /* Data role swap command */ +#define PDO_FIXED_PEAK_CURR ((u32)1 << 20) /* [21..20] Peak current */ +/* Voltage in 50mV units */ +#define PDO_FIXED_VOLT(mv) (u32)((((u32)mv)/50) << 10) +/* Max current in 10mA units */ +#define PDO_FIXED_CURR(ma) (u32)((((u32)ma)/10)) + +/*build a fixed PDO packet*/ +#define PDO_FIXED(mv, ma, flags) \ + (PDO_FIXED_VOLT(mv)\ + | PDO_FIXED_CURR(ma)\ + | (flags)) + +/*Pos in Data Object, the first index number begin from 0 */ +#define PDO_INDEX(n, dat) (dat << (n * PD_ONE_DATA_OBJECT_SIZE*sizeof(u8))) +#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & 0x3FF) << 20) +#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & 0x3FF) << 10) +#define PDO_VAR_OP_CURR(ma) ((((ma) / 10) & 0x3FF) << 0) + +#define PDO_VAR(min_mv, max_mv, op_ma) \ + (PDO_VAR_MIN_VOLT(min_mv) | PDO_VAR_MAX_VOLT(max_mv) | \ + PDO_VAR_OP_CURR(op_ma) | PDO_TYPE_VARIABLE) +#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & 0x3FF) << 20) +#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & 0x3FF) << 10) +#define PDO_BATT_OP_POWER(mw) ((((mw) / 250) & 0x3FF) << 0) +#define PDO_BATT(min_mv, max_mv, op_mw) \ + (PDO_BATT_MIN_VOLT(min_mv)\ + | PDO_BATT_MAX_VOLT(max_mv)\ + | PDO_BATT_OP_POWER(op_mw)\ + | PDO_TYPE_BATTERY) + +#define GET_PDO_TYPE(PDO) ((PDO & PDO_TYPE_MASK) >> 30) +#define GET_PDO_FIXED_DUAL_ROLE(PDO) ((PDO & PDO_FIXED_DUAL_ROLE) >> 29) +#define GET_PDO_FIXED_SUSPEND(PDO) ((PDO & PDO_FIXED_SUSPEND) >> 28) +#define GET_PDO_FIXED_EXTERNAL(PDO) ((PDO & PDO_FIXED_EXTERNAL) >> 27) +#define GET_PDO_FIXED_COMM_CAP(PDO) ((PDO & PDO_FIXED_COMM_CAP) >> 26) +#define GET_PDO_FIXED_DATA_SWAP(PDO) ((PDO & PDO_FIXED_DATA_SWAP) >> 25) +#define GET_PDO_FIXED_PEAK_CURR(PDO) ((PDO >> 20) & 0x03) + +#define GET_PDO_FIXED_VOLT(PDO) (((PDO >> 10) & 0x3FF) * 50) +#define GET_PDO_FIXED_CURR(PDO) ((PDO & 0x3FF) * 10) +#define GET_VAR_MAX_VOLT(PDO) (((PDO >> 20) & 0x3FF) * 50) +#define GET_VAR_MIN_VOLT(PDO) (((PDO >> 10) & 0x3FF) * 50) +#define GET_VAR_MAX_CURR(PDO) ((PDO & 0x3FF) * 10) +#define GET_BATT_MAX_VOLT(PDO) (((PDO >> 20) & 0x3FF) * 50) +#define GET_BATT_MIN_VOLT(PDO) (((PDO >> 10) & 0x3FF) * 50) +#define GET_BATT_OP_POWER(PDO) (((PDO) & 0x3FF) * 250) + +#define INTERFACE_TIMEOUT 30 +#define InterfaceSendBuf_Addr 0xc0 +#define InterfaceRecvBuf_Addr 0xe0 +#define YES 1 +#define NO 0 +#define ERR_CABLE_UNPLUG -1 +#define PD_ONE_DATA_OBJECT_SIZE 4 +#define PD_MAX_DATA_OBJECT_NUM 7 +#define VDO_SIZE (PD_ONE_DATA_OBJECT_SIZE * PD_MAX_DATA_OBJECT_NUM) +#define PDO_FIXED_FLAGS (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP) + +/*5000mv voltage*/ +#define PD_VOLTAGE_5V 5000 + +#define PD_MAX_VOLTAGE_20V 20000 +#define PD_MAX_VOLTAGE_21V 21000 + +/*0.9A current */ +#define PD_CURRENT_900MA 900 +#define PD_CURRENT_1500MA 1500 + +#define PD_CURRENT_3A 3000 + +#define PD_POWER_15W 15000 + +#define PD_POWER_60W 60000 + +/* RDO : Request Data Object */ +#define RDO_OBJ_POS(n) (((u32)(n) & 0x7) << 28) +#define RDO_POS(rdo) ((((32)rdo) >> 28) & 0x7) +#define RDO_GIVE_BACK ((u32)1 << 27) +#define RDO_CAP_MISMATCH ((u32)1 << 26) +#define RDO_COMM_CAP ((u32)1 << 25) +#define RDO_NO_SUSPEND ((u32)1 << 24) +#define RDO_FIXED_VAR_OP_CURR(ma) (((((u32)ma) / 10) & 0x3FF) << 10) +#define RDO_FIXED_VAR_MAX_CURR(ma) (((((u32)ma) / 10) & 0x3FF) << 0) + +#define RDO_BATT_OP_POWER(mw) (((((u32)mw) / 250) & 0x3FF) << 10) +#define RDO_BATT_MAX_POWER(mw) (((((u32)mw) / 250) & 0x3FF) << 10) + +#define RDO_FIXED(n, op_ma, max_ma, flags) \ + (RDO_OBJ_POS(n) | (flags) | \ + RDO_FIXED_VAR_OP_CURR(op_ma) | \ + RDO_FIXED_VAR_MAX_CURR(max_ma)) + +#ifdef DEBUG_LOG_OUTPUT +#define TRACE pr_info +#define TRACE1 pr_info +#define TRACE2 pr_info +#define TRACE3 pr_info +#else +#define TRACE(fmt, arg...) +#define TRACE1(fmt, arg...) +#define TRACE2(fmt, arg...) +#define TRACE3(fmt, arg...) +#endif + +#define BYTE unsigned char +#define unchar unsigned char +#define uint unsigned int +#define ulong unsigned long + +enum { + VIDEO_3D_NONE = 0x00, + VIDEO_3D_FRAME_PACKING = 0x01, + VIDEO_3D_TOP_AND_BOTTOM = 0x02, + VIDEO_3D_SIDE_BY_SIDE = 0x03, +}; + +struct RegisterValueConfig { + unsigned char slave_addr; + unsigned char reg; + unsigned char val; +}; + +#define AUX_ERR 1 +#define AUX_OK 0 + +#define MAX_BUF_CNT 6 +#define INTR_MASK_SETTING 0x0 + +enum HDCP_CAP_TYPE { + NO_HDCP_SUPPORT = 0x00, + HDCP14_SUPPORT = 0x01, + HDCP22_SUPPORT = 0x02, + HDCP_ALL_SUPPORT = 0x03 +}; + +#define XTAL_FRQ 27000000UL /* MI-2 clock frequency in Hz: 27 MHz*/ + +#define FLASH_LOAD_STA 0x05 +#define FLASH_LOAD_STA_CHK (1<<7) + +#endif /* __MI2_REG_H__ */ diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 39ac15ce47023055f5a2badb7e5a99c59e19b3dd..9e2ae02f31e08fbad87669a126761c015d25db44 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, return -EINVAL; /* overflow checks for 32bit size calculations */ - /* NOTE: DIV_ROUND_UP() can overflow */ + if (args->bpp > U32_MAX - 8) + return -EINVAL; cpp = DIV_ROUND_UP(args->bpp, 8); - if (!cpp || cpp > 0xffffffffU / args->width) + if (cpp > U32_MAX / args->width) return -EINVAL; stride = cpp * args->width; - if (args->height > 0xffffffffU / stride) + if (args->height > U32_MAX / stride) return -EINVAL; /* test for wrap-around */ diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index b3c6e997ccdb0920c47fb19077ee9bd75ae91ab8..03244b3c985d7abe6d2c2743bf52b701fa01a7db 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) return -ENOMEM; filp->private_data = priv; + filp->f_mode |= FMODE_UNSIGNED_OFFSET; priv->filp = filp; priv->pid = get_pid(task_pid(current)); priv->minor = minor; diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 48ff29351c17c0b0b4b8dbf4409c30adc10734ab..0e34e2afbde76c185ea6aaec8eeceb802c86fda4 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -669,6 +669,9 @@ static int dp_audio_notify(struct dp_audio_private *audio, u32 state) int rc = 0; struct msm_ext_disp_init_data *ext = &audio->ext_audio_data; + if (!ext->intf_ops.audio_notify) + goto end; + rc = ext->intf_ops.audio_notify(audio->ext_pdev, &ext->codec, state); if (rc) { @@ -710,12 +713,14 @@ static int dp_audio_on(struct dp_audio *dp_audio) audio->session_on = true; - rc = ext->intf_ops.audio_config(audio->ext_pdev, - &ext->codec, - EXT_DISPLAY_CABLE_CONNECT); - if (rc) { - pr_err("failed to config audio, err=%d\n", rc); - goto end; + if (ext->intf_ops.audio_config) { + rc = ext->intf_ops.audio_config(audio->ext_pdev, + &ext->codec, + EXT_DISPLAY_CABLE_CONNECT); + if (rc) { + pr_err("failed to config audio, err=%d\n", rc); + goto end; + } } rc = dp_audio_notify(audio, EXT_DISPLAY_CABLE_CONNECT); @@ -752,11 +757,13 @@ static int dp_audio_off(struct dp_audio *dp_audio) pr_debug("success\n"); end: - rc = ext->intf_ops.audio_config(audio->ext_pdev, - &ext->codec, - EXT_DISPLAY_CABLE_DISCONNECT); - if (rc) - pr_err("failed to config audio, err=%d\n", rc); + if (ext->intf_ops.audio_config) { + rc = ext->intf_ops.audio_config(audio->ext_pdev, + &ext->codec, + EXT_DISPLAY_CABLE_DISCONNECT); + if (rc) + pr_err("failed to config audio, err=%d\n", rc); + } audio->session_on = false; audio->engine_on = false; diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 7731b96aac30ca454410be29ff16a3b5b2855ba8..8fe0ec856e5260faa1c838cd3ce8fc3a9304d089 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -371,11 +371,8 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux, bool i2c_read = input_msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); - if (!i2c_mot || !i2c_read || (input_msg->size == 0)) { - /* reset the offset for all other transaction types */ - aux->offset = 0; + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) return; - } /* * Sending the segment value and EDID offset will be performed @@ -498,12 +495,6 @@ static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux, goto address_error; } - if ((msg->size + aux->offset) > SZ_256) { - pr_err("invalid edid access: offset=0x%x, size=0x%x\n", - aux->offset, msg->size); - goto address_error; - } - if (aux->native) { if (aux->read) { aux->dp_aux.reg = msg->address; diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 6519a035d831794fca819f417131db1a51ab66e8..24d597b8932f5be7802ae3654efa768486e41686 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -1573,6 +1573,57 @@ static void dp_catalog_ctrl_channel_alloc(struct dp_catalog_ctrl *ctrl, dp_write(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); } +static void dp_catalog_ctrl_channel_dealloc(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_slot, u32 tot_slot_cnt) +{ + struct dp_catalog_private *catalog; + struct dp_io_data *io_data = NULL; + u32 i, slot_reg_1, slot_reg_2, slot; + u32 reg_off = 0; + + if (!ctrl || ch >= DP_STREAM_MAX) { + pr_err("invalid input. ch %d\n", ch); + return; + } + + if (ch_start_slot > DP_MAX_TIME_SLOTS || + (ch_start_slot + tot_slot_cnt > DP_MAX_TIME_SLOTS)) { + pr_err("invalid slots start %d, tot %d\n", + ch_start_slot, tot_slot_cnt); + return; + } + + catalog = dp_catalog_get_priv(ctrl); + + io_data = catalog->io.dp_link; + + pr_debug("dealloc ch %d, start_slot %d, tot_slot %d\n", + ch, ch_start_slot, tot_slot_cnt); + + if (ch == DP_STREAM_1) + reg_off = DP_DP1_TIMESLOT_1_32 - DP_DP0_TIMESLOT_1_32; + + slot_reg_1 = dp_read(catalog, io_data, DP_DP0_TIMESLOT_1_32 + reg_off); + slot_reg_2 = dp_read(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off); + + ch_start_slot = ch_start_slot - 1; + for (i = 0; i < tot_slot_cnt; i++) { + if (ch_start_slot < 33) { + slot_reg_1 &= ~BIT(ch_start_slot); + } else { + slot = ch_start_slot - 33; + slot_reg_2 &= ~BIT(slot); + } + ch_start_slot++; + } + + pr_debug("dealloc ch:%d slot_reg_1:%d, slot_reg_2:%d\n", ch, + slot_reg_1, slot_reg_2); + + dp_write(catalog, io_data, DP_DP0_TIMESLOT_1_32 + reg_off, slot_reg_1); + dp_write(catalog, io_data, DP_DP0_TIMESLOT_33_63 + reg_off, slot_reg_2); +} + static void dp_catalog_ctrl_update_rg(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int, u32 y_frac_enum) { @@ -2111,6 +2162,7 @@ struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_parser *parser) .read_act_complete_sts = dp_catalog_ctrl_read_act_complete_sts, .channel_alloc = dp_catalog_ctrl_channel_alloc, .update_rg = dp_catalog_ctrl_update_rg, + .channel_dealloc = dp_catalog_ctrl_channel_dealloc, }; struct dp_catalog_audio audio = { .init = dp_catalog_audio_init, diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h index 22dd72c16a0f1b2ece85ebd4b64bbd33f31f999a..c9f1995930ad012caa5a285bfd14029d19fbade0 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.h +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -125,6 +125,8 @@ struct dp_catalog_ctrl { u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); void (*update_rg)(struct dp_catalog_ctrl *ctrl, u32 ch, u32 x_int, u32 y_frac_enum); + void (*channel_dealloc)(struct dp_catalog_ctrl *ctrl, + u32 ch, u32 ch_start_timeslot, u32 tot_ch_cnt); }; #define HEADER_BYTE_2_BIT 0 diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ba9566e9170c528a4d9749ba39e847eadb9c3e37..779451c2a99de559b5320b3906d6c32fcbb9d88d 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "dp_ctrl.h" @@ -147,7 +148,7 @@ static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl, enum dp_stream_id strm) if (!wait_for_completion_timeout(&ctrl->idle_comp, idle_pattern_completion_timeout_ms)) - pr_warn("PUSH_IDLE pattern timedout\n"); + pr_warn("PUSH_IDLE time out\n"); pr_debug("mainlink off done\n"); } @@ -179,11 +180,11 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) ret = wait_for_completion_timeout(&ctrl->video_comp, HZ / 2); if (ret <= 0) { - pr_err("Link Train timedout\n"); - ret = -EINVAL; + pr_err("SEND_VIDEO time out (%d)\n", ret); + return -EINVAL; } - return ret; + return 0; } static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl, @@ -838,21 +839,97 @@ static void dp_ctrl_reset(struct dp_ctrl *dp_ctrl) ctrl->catalog->reset(ctrl->catalog); } +static void dp_ctrl_send_video(struct dp_ctrl_private *ctrl) +{ + ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); +} + +static void dp_ctrl_mst_calculate_rg(struct dp_ctrl_private *ctrl, + struct dp_panel *panel, u32 *p_x_int, u32 *p_y_frac_enum) +{ + u64 min_slot_cnt, max_slot_cnt; + u64 raw_target_sc, target_sc_fixp; + u64 ts_denom, ts_enum, ts_int; + u64 pclk = panel->pinfo.pixel_clk_khz; + u64 lclk = panel->link_info.rate; + u64 lanes = panel->link_info.num_lanes; + u64 bpp = panel->pinfo.bpp; + u64 pbn = panel->pbn; + u64 numerator, denominator, temp, temp1, temp2; + u32 x_int = 0, y_frac_enum = 0; + u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp; + + /* min_slot_cnt */ + numerator = pclk * bpp * 64 * 1000; + denominator = lclk * lanes * 8 * 1000; + min_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* max_slot_cnt */ + numerator = pbn * 54 * 1000; + denominator = lclk * lanes; + max_slot_cnt = drm_fixp_from_fraction(numerator, denominator); + + /* raw_target_sc */ + numerator = max_slot_cnt + min_slot_cnt; + denominator = drm_fixp_from_fraction(2, 1); + raw_target_sc = drm_fixp_div(numerator, denominator); + + /* target_sc */ + temp = drm_fixp_from_fraction(256 * lanes, 1); + numerator = drm_fixp_mul(raw_target_sc, temp); + denominator = drm_fixp_from_fraction(256 * lanes, 1); + target_sc_fixp = drm_fixp_div(numerator, denominator); + + ts_enum = 256 * lanes; + ts_denom = drm_fixp_from_fraction(256 * lanes, 1); + ts_int = drm_fixp2int(target_sc_fixp); + + temp = drm_fixp2int_ceil(raw_target_sc); + if (temp != ts_int) { + temp = drm_fixp_from_fraction(ts_int, 1); + temp1 = raw_target_sc - temp; + temp2 = drm_fixp_mul(temp1, ts_denom); + ts_enum = drm_fixp2int(temp2); + } + + /* target_strm_sym */ + ts_int_fixp = drm_fixp_from_fraction(ts_int, 1); + ts_frac_fixp = drm_fixp_from_fraction(ts_enum, drm_fixp2int(ts_denom)); + temp = ts_int_fixp + ts_frac_fixp; + temp1 = drm_fixp_from_fraction(lanes, 1); + target_strm_sym = drm_fixp_mul(temp, temp1); + + /* x_int */ + x_int = drm_fixp2int(target_strm_sym); + + /* y_enum_frac */ + temp = drm_fixp_from_fraction(x_int, 1); + temp1 = target_strm_sym - temp; + temp2 = drm_fixp_from_fraction(256, 1); + y_frac_enum_fixp = drm_fixp_mul(temp1, temp2); + + temp1 = drm_fixp2int(y_frac_enum_fixp); + temp2 = drm_fixp2int_ceil(y_frac_enum_fixp); + + y_frac_enum = (u32)((temp1 == temp2) ? temp1 : temp1 + 1); + + *p_x_int = x_int; + *p_y_frac_enum = y_frac_enum; + + pr_debug("x_int: %d, y_frac_enum: %d\n", x_int, y_frac_enum); +} + static int dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl, struct dp_panel *panel) { u32 x_int, y_frac_enum, lanes, bw_code; bool act_complete; - if (!ctrl->mst_mode) { - ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); + if (!ctrl->mst_mode) return 0; - } DP_MST_DEBUG("mst stream channel allocation\n"); - panel->hw_cfg(panel); - ctrl->catalog->channel_alloc(ctrl->catalog, panel->stream_id, panel->channel_start_slot, @@ -861,9 +938,7 @@ static int dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl, lanes = ctrl->link->link_params.lane_count; bw_code = ctrl->link->link_params.bw_code; - x_int = (u32)(lanes * panel->channel_total_slots); - y_frac_enum = (u32)(256 * ((lanes * lanes * - panel->channel_total_slots) - x_int)); + dp_ctrl_mst_calculate_rg(ctrl, panel, &x_int, &y_frac_enum); ctrl->catalog->update_rg(ctrl->catalog, panel->stream_id, x_int, y_frac_enum); @@ -875,8 +950,6 @@ static int dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl, DP_MST_DEBUG("mst lane_cnt:%d, bw:%d, x_int:%d, y_frac:%d\n", lanes, bw_code, x_int, y_frac_enum); - ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO); - ctrl->catalog->trigger_act(ctrl->catalog); msleep(20); /* needs 1 frame time */ @@ -897,8 +970,7 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) struct dp_ctrl_private *ctrl; if (!dp_ctrl || !panel) { - rc = -EINVAL; - goto end; + return -EINVAL; } ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); @@ -906,7 +978,7 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) rc = dp_ctrl_enable_stream_clocks(ctrl, panel); if (rc) { pr_err("failure on stream clock enable\n"); - goto end; + return rc; } if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { @@ -914,18 +986,65 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) return 0; } + rc = panel->hw_cfg(panel); + if (rc) + return rc; + + dp_ctrl_send_video(ctrl); + rc = dp_ctrl_mst_stream_setup(ctrl, panel); if (rc) - goto end; + return rc; + + rc = dp_ctrl_wait4video_ready(ctrl); + if (rc) + return rc; - dp_ctrl_wait4video_ready(ctrl); link_ready = ctrl->catalog->mainlink_ready(ctrl->catalog); pr_debug("mainlink %s\n", link_ready ? "READY" : "NOT READY"); -end: return rc; } +static void dp_ctrl_mst_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + struct dp_ctrl_private *ctrl; + bool act_complete; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + if (!ctrl->mst_mode) + return; + + ctrl->catalog->channel_dealloc(ctrl->catalog, + panel->stream_id, + panel->channel_start_slot, + panel->channel_total_slots); + + ctrl->catalog->trigger_act(ctrl->catalog); + msleep(20); /* needs 1 frame time */ + ctrl->catalog->read_act_complete_sts(ctrl->catalog, &act_complete); + + if (!act_complete) + pr_err("mst stream_off act trigger complete failed\n"); + else + DP_MST_DEBUG("mst stream_off ACT trigger complete SUCCESS\n"); +} + +static void dp_ctrl_stream_pre_off(struct dp_ctrl *dp_ctrl, + struct dp_panel *panel) +{ + if (!dp_ctrl || !panel) { + pr_err("invalid input\n"); + return; + } + + dp_ctrl_push_idle(dp_ctrl, panel->stream_id); + + dp_ctrl_mst_stream_pre_off(dp_ctrl, panel); +} + static void dp_ctrl_stream_off(struct dp_ctrl *dp_ctrl, struct dp_panel *panel) { struct dp_ctrl_private *ctrl; @@ -1101,6 +1220,7 @@ struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in) dp_ctrl->process_phy_test_request = dp_ctrl_process_phy_test_request; dp_ctrl->stream_on = dp_ctrl_stream_on; dp_ctrl->stream_off = dp_ctrl_stream_off; + dp_ctrl->stream_pre_off = dp_ctrl_stream_pre_off; return dp_ctrl; error: diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index e36543ae4a7b34a4392dbd72b27ba5b9e35039df..716fad77e7348416fb4000a7f83be7b151831371 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -36,6 +36,7 @@ struct dp_ctrl { int (*link_maintenance)(struct dp_ctrl *dp_ctrl); int (*stream_on)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); void (*stream_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); + void (*stream_pre_off)(struct dp_ctrl *dp_ctrl, struct dp_panel *panel); }; struct dp_ctrl_in { diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index c39051b8bd4a3dfdf930b58e1c6e05d127d188df..875be87084f5b59a0608dd174cf7511083728115 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -399,6 +399,72 @@ static ssize_t dp_debug_mst_mode_write(struct file *file, return len; } +static ssize_t dp_debug_max_pclk_khz_write(struct file *file, + const char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char buf[SZ_8]; + size_t len = 0; + u32 max_pclk = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + len = min_t(size_t, count, SZ_8 - 1); + if (copy_from_user(buf, user_buff, len)) + return 0; + + buf[len] = '\0'; + + if (kstrtoint(buf, 10, &max_pclk) != 0) + return 0; + + if (max_pclk > debug->parser->max_pclk_khz) + pr_err("requested: %d, max_pclk_khz:%d\n", max_pclk, + debug->parser->max_pclk_khz); + else + debug->dp_debug.max_pclk_khz = max_pclk; + + pr_debug("max_pclk_khz: %d\n", max_pclk); + + return len; +} + +static ssize_t dp_debug_max_pclk_khz_read(struct file *file, + char __user *user_buff, size_t count, loff_t *ppos) +{ + struct dp_debug_private *debug = file->private_data; + char *buf; + u32 len = 0; + + if (!debug) + return -ENODEV; + + if (*ppos) + return 0; + + buf = kzalloc(SZ_4K, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf + len, (SZ_4K - len), + "max_pclk_khz = %d, org: %d\n", + debug->dp_debug.max_pclk_khz, + debug->parser->max_pclk_khz); + + if (copy_to_user(user_buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + kfree(buf); + return len; +} + static ssize_t dp_debug_mst_sideband_mode_write(struct file *file, const char __user *user_buff, size_t count, loff_t *ppos) { @@ -1154,6 +1220,12 @@ static const struct file_operations mst_sideband_mode_fops = { .write = dp_debug_mst_sideband_mode_write, }; +static const struct file_operations max_pclk_khz_fops = { + .open = simple_open, + .write = dp_debug_max_pclk_khz_write, + .read = dp_debug_max_pclk_khz_read, +}; + static int dp_debug_init(struct dp_debug *dp_debug) { int rc = 0; @@ -1309,6 +1381,14 @@ static int dp_debug_init(struct dp_debug *dp_debug) DEBUG_NAME, rc); } + file = debugfs_create_file("max_pclk_khz", 0644, dir, + debug, &max_pclk_khz_fops); + if (IS_ERR_OR_NULL(file)) { + rc = PTR_ERR(file); + pr_err("[%s] debugfs max_pclk_khz failed, rc=%d\n", + DEBUG_NAME, rc); + } + return 0; error_remove_dir: diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index ef989628e7b956322c627a180a8ba77d4b2ef395..a38f1c1e1643a3acdfdf56df66c55061561e6157 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -27,6 +27,7 @@ * @hdisplay: used to filter out hdisplay value * @vrefresh: used to filter out vrefresh value * @tpg_state: specifies whether tpg feature is enabled + * @max_pclk_khz: max pclk supported */ struct dp_debug { bool debug_en; @@ -37,6 +38,7 @@ struct dp_debug { int hdisplay; int vrefresh; bool tpg_state; + u32 max_pclk_khz; u8 *(*get_edid)(struct dp_debug *dp_debug); }; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 89cd4cc6091ae25487e8bdd744f5806da2e6d9d8..4f55f1bfb589bba7b033879f9e45ae665ffb071b 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -564,7 +564,12 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) dp->link->process_request(dp->link); dp->panel->handle_sink_request(dp->panel); - dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz; + if (dp->debug->max_pclk_khz) + dp->dp_display.max_pclk_khz = dp->debug->max_pclk_khz; + else + dp->dp_display.max_pclk_khz = dp->parser->max_pclk_khz; + + pr_debug("dp max_pclk_khz = %d\n", dp->dp_display.max_pclk_khz); dp_display_process_mst_hpd_high(dp); notify: @@ -632,8 +637,11 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp) int rc = 0, idx; struct dp_panel *dp_panel; + mutex_lock(&dp->session_lock); + if (!dp->dp_display.is_connected) { pr_debug("HPD already off\n"); + mutex_unlock(&dp->session_lock); return 0; } @@ -646,10 +654,14 @@ static int dp_display_process_hpd_low(struct dp_display_private *dp) dp_panel = dp->active_panels[idx]; - if (dp_panel->audio_supported) + if (dp_panel->audio_supported) { dp_panel->audio->off(dp_panel->audio); + dp_panel->audio_supported = false; + } } + mutex_unlock(&dp->session_lock); + dp_display_process_mst_hpd_low(dp); rc = dp_display_send_hpd_notification(dp, false); @@ -786,12 +798,17 @@ static void dp_display_handle_maintenance_req(struct dp_display_private *dp) int idx; struct dp_panel *dp_panel; + mutex_lock(&dp->session_lock); + for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) { if (!dp->active_panels[idx]) continue; dp_panel = dp->active_panels[idx]; + dp->ctrl->stream_pre_off(dp->ctrl, dp_panel); + dp->ctrl->stream_off(dp->ctrl, dp_panel); + mutex_lock(&dp_panel->audio->ops_lock); if (dp_panel->audio_supported) @@ -806,11 +823,15 @@ static void dp_display_handle_maintenance_req(struct dp_display_private *dp) dp_panel = dp->active_panels[idx]; + dp->ctrl->stream_on(dp->ctrl, dp_panel); + if (dp_panel->audio_supported) dp_panel->audio->on(dp_panel->audio); mutex_unlock(&dp_panel->audio->ops_lock); } + + mutex_unlock(&dp->session_lock); } static void dp_display_mst_attention(struct dp_display_private *dp) @@ -829,11 +850,6 @@ static void dp_display_attention_work(struct work_struct *work) if (!dp->power_on) goto mst_attention; - if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) { - if (!dp->hdcp.ops->cp_irq(dp->hdcp.data)) - goto mst_attention; - } - if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) { dp_display_handle_disconnect(dp); @@ -861,16 +877,17 @@ static void dp_display_attention_work(struct work_struct *work) goto mst_attention; } - if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) { + if (dp->link->sink_request & DP_TEST_LINK_TRAINING) { + dp->link->send_test_response(dp->link); dp_display_handle_maintenance_req(dp); goto mst_attention; } - if (dp->link->sink_request & DP_TEST_LINK_TRAINING) { - dp->link->send_test_response(dp->link); + if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) dp_display_handle_maintenance_req(dp); - } + if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq) + dp->hdcp.ops->cp_irq(dp->hdcp.data); mst_attention: dp_display_mst_attention(dp); } @@ -1040,6 +1057,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp) panel_in.catalog = &dp->catalog->panel; panel_in.link = dp->link; panel_in.connector = dp->dp_display.base_connector; + panel_in.base_panel = NULL; dp->panel = dp_panel_get(&panel_in); if (IS_ERR(dp->panel)) { @@ -1190,14 +1208,45 @@ static int dp_display_set_mode(struct dp_display *dp_display, void *panel, return 0; } -static int dp_display_prepare(struct dp_display *dp, void *panel) +static int dp_display_prepare(struct dp_display *dp_display, void *panel) { + struct dp_display_private *dp; + struct dp_panel *dp_panel; + + if (!dp_display || !panel) { + pr_err("invalid input\n"); + return -EINVAL; + } + + dp_panel = panel; + if (!dp_panel->connector) { + pr_err("invalid connector input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + if (atomic_read(&dp->aborted)) + goto end; + + dp->aux->init(dp->aux, dp->parser->aux_cfg); + + if (dp->debug->psm_enabled) { + dp->link->psm_config(dp->link, &dp->panel->link_info, false); + dp->debug->psm_enabled = false; + } + +end: + mutex_unlock(&dp->session_lock); + return 0; } static int dp_display_set_stream_info(struct dp_display *dp_display, void *panel, u32 ch_id, u32 ch_start_slot, - u32 ch_tot_slots) + u32 ch_tot_slots, u32 pbn) { int rc = 0; struct dp_panel *dp_panel; @@ -1209,7 +1258,7 @@ static int dp_display_set_stream_info(struct dp_display *dp_display, dp_panel = panel; dp_panel->set_stream_info(dp_panel, ch_id, - ch_start_slot, ch_tot_slots); + ch_start_slot, ch_tot_slots, pbn); return rc; } @@ -1258,16 +1307,11 @@ static int dp_display_enable(struct dp_display *dp_display, void *panel) goto end; } - dp->aux->init(dp->aux, dp->parser->aux_cfg); - - if (dp->debug->psm_enabled) { - dp->link->psm_config(dp->link, &dp->panel->link_info, false); - dp->debug->psm_enabled = false; - } - rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active); - if (!rc) - dp->power_on = true; + if (rc) + goto end; + + dp->power_on = true; stream_setup: rc = dp_display_stream_enable(dp, panel); @@ -1282,7 +1326,6 @@ static void dp_display_stream_post_enable(struct dp_display_private *dp, { dp_panel->spd_config(dp_panel); dp_panel->setup_hdr(dp_panel, NULL); - dp_panel->hw_cfg(dp_panel); dp_panel->audio->register_ext_disp(dp_panel->audio); } @@ -1346,9 +1389,8 @@ static int dp_display_post_enable(struct dp_display *dp_display, void *panel) static int dp_display_stream_pre_disable(struct dp_display_private *dp, struct dp_panel *dp_panel) { - dp->ctrl->push_idle(dp->ctrl, dp_panel->stream_id); - dp_panel->audio->deregister_ext_disp(dp_panel->audio); + dp->ctrl->stream_pre_off(dp->ctrl, dp_panel); return 0; } @@ -1381,17 +1423,17 @@ static int dp_display_pre_disable(struct dp_display *dp_display, void *panel) dp->hdcp.ops->off(dp->hdcp.data); } - if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) && - dp->usbpd->alt_mode_cfg_done) { - if (dp_panel->audio_supported) - dp_panel->audio->off(dp_panel->audio); + if (dp_panel->audio_supported) + dp_panel->audio->off(dp_panel->audio); + + rc = dp_display_stream_pre_disable(dp, dp_panel); + if (dp->usbpd->hpd_high && !dp_display_is_sink_count_zero(dp) && + dp->usbpd->alt_mode_cfg_done && !dp->mst.mst_active) { dp->link->psm_config(dp->link, &dp->panel->link_info, true); dp->debug->psm_enabled = true; } - rc = dp_display_stream_pre_disable(dp, dp_panel); - end: mutex_unlock(&dp->session_lock); return 0; @@ -1446,10 +1488,7 @@ static int dp_display_disable(struct dp_display *dp_display, void *panel) dp->dp_display.is_sst_connected = false; } - dp->aux->deinit(dp->aux); dp->power_on = false; - dp->aux->state = DP_STATE_CTRL_POWERED_OFF; - complete_all(&dp->notification_comp); end: mutex_unlock(&dp->session_lock); return 0; @@ -1500,8 +1539,35 @@ static struct dp_debug *dp_get_debug(struct dp_display *dp_display) return dp->debug; } -static int dp_display_unprepare(struct dp_display *dp, void *panel) +static int dp_display_unprepare(struct dp_display *dp_display, void *panel) { + struct dp_display_private *dp; + + if (!dp_display || !panel) { + pr_err("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->session_lock); + + if (dp->active_stream_cnt) + goto end; + + if (atomic_read(&dp->aborted)) + goto end; + + if (!dp->mst.mst_active) { + dp->aux->deinit(dp->aux); + dp->aux->state = DP_STATE_CTRL_POWERED_OFF; + } + + complete_all(&dp->notification_comp); + +end: + mutex_unlock(&dp->session_lock); + return 0; } @@ -1730,6 +1796,7 @@ static int dp_display_mst_connector_install(struct dp_display *dp_display, panel_in.catalog = &dp->catalog->panel; panel_in.link = dp->link; panel_in.connector = connector; + panel_in.base_panel = dp->panel; dp_panel = dp_panel_get(&panel_in); if (IS_ERR(dp_panel)) { @@ -1975,6 +2042,7 @@ static struct platform_driver dp_display_driver = { .driver = { .name = "msm-dp-display", .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 6c878d1c356a4ed3fe768341eb8df2f89cd8124a..3f34d35566b44675816ad76360d0e41a967f9241 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -87,7 +87,7 @@ struct dp_display { struct dp_mst_caps *mst_caps); int (*set_stream_info)(struct dp_display *dp_display, void *panel, u32 ch_id, u32 ch_start_slot, - u32 ch_tot_slots); + u32 ch_tot_slots, u32 pbn); }; int dp_display_get_num_of_displays(void); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index a985969dc3af91e41ebda009b2759d07674df8df..d2fe1105fcfe8fc51d3d27e95ead6cedcb8f88d9 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -155,7 +155,7 @@ static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge) } /* for SST force stream id, start slot and total slots to 0 */ - dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0); + dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0); rc = dp->enable(dp, bridge->dp_panel); if (rc) { @@ -219,6 +219,11 @@ static void dp_bridge_disable(struct drm_bridge *drm_bridge) dp = bridge->display; + if (!dp) { + pr_err("dp is null\n"); + return; + } + if (dp) sde_connector_helper_bridge_disable(bridge->connector); diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c index c9726552e216887e07c4bb7f384908e097bd5228..4633c633b6b967f4fb5162f53e82488efed3b7ac 100644 --- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c @@ -58,6 +58,10 @@ struct dp_drm_mst_fw_helper_ops { int slots); int (*get_ch_start_slot)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int channel_id); + void (*reset_vcpi_slots)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); + void (*deallocate_vcpi)(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); }; struct dp_mst_sim_port { @@ -192,6 +196,16 @@ static int drm_dp_sim_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) return 0; } +static void drm_dp_sim_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ +} + +static void drm_dp_sim_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ +} + static enum drm_connector_status drm_dp_sim_mst_detect_port( struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, @@ -268,6 +282,8 @@ static const struct dp_drm_mst_fw_helper_ops drm_dp_mst_fw_helper_ops = { .topology_mgr_set_mst = drm_dp_mst_topology_mgr_set_mst, .get_ch_start_slot = _drm_dp_mst_get_ch_start_slot, .atomic_release_vcpi_slots = drm_dp_atomic_release_vcpi_slots, + .reset_vcpi_slots = drm_dp_mst_reset_vcpi_slots, + .deallocate_vcpi = drm_dp_mst_deallocate_vcpi, }; static const struct dp_drm_mst_fw_helper_ops drm_dp_sim_mst_fw_helper_ops = { @@ -282,6 +298,8 @@ static const struct dp_drm_mst_fw_helper_ops drm_dp_sim_mst_fw_helper_ops = { .topology_mgr_set_mst = drm_dp_sim_mst_topology_mgr_set_mst, .get_ch_start_slot = _drm_dp_sim_mst_get_ch_start_slot, .atomic_release_vcpi_slots = drm_dp_sim_atomic_release_vcpi_slots, + .reset_vcpi_slots = drm_dp_sim_reset_vcpi_slots, + .deallocate_vcpi = drm_dp_sim_deallocate_vcpi, }; /* DP MST Bridge OPs */ @@ -352,7 +370,7 @@ static bool _dp_mst_compute_config(struct dp_mst_bridge *dp_bridge) DP_MST_DEBUG("enter\n"); - bpp = 24; + bpp = dp_bridge->dp_mode.timing.bpp; mst_pbn = mst->mst_fw_cbs->calc_pbn_mode( dp_bridge->drm_mode.crtc_clock, bpp); @@ -425,6 +443,44 @@ static void _dp_mst_bridge_pre_enable_part2(struct dp_mst_bridge *dp_bridge) dp_bridge->id); } +static void _dp_mst_bridge_pre_disable_part1(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct drm_dp_mst_port *port = c_conn->mst_port; + + DP_MST_DEBUG("enter\n"); + + mst->mst_fw_cbs->reset_vcpi_slots(&mst->mst_mgr, port); + + mst->mst_fw_cbs->update_payload_part1(&mst->mst_mgr); + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-1 complete\n", + dp_bridge->id); +} + +static void _dp_mst_bridge_pre_disable_part2(struct dp_mst_bridge *dp_bridge) +{ + struct dp_display *dp_display = dp_bridge->display; + struct dp_mst_private *mst = dp_display->dp_mst_prv_info; + struct sde_connector *c_conn = + to_sde_connector(dp_bridge->connector); + struct drm_dp_mst_port *port = c_conn->mst_port; + + DP_MST_DEBUG("enter\n"); + + mst->mst_fw_cbs->check_act_status(&mst->mst_mgr); + + mst->mst_fw_cbs->update_payload_part2(&mst->mst_mgr); + + mst->mst_fw_cbs->deallocate_vcpi(&mst->mst_mgr, port); + + DP_MST_DEBUG("mst bridge [%d] _pre disable part-2 complete\n", + dp_bridge->id); +} + static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge) { int rc = 0; @@ -454,9 +510,6 @@ static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge) return; } - _dp_mst_compute_config(bridge); - _dp_mst_bridge_pre_enable_part1(bridge); - rc = dp->prepare(dp, bridge->dp_panel); if (rc) { pr_err("[%d] DP display prepare failed, rc=%d\n", @@ -464,8 +517,11 @@ static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge) return; } + _dp_mst_compute_config(bridge); + _dp_mst_bridge_pre_enable_part1(bridge); + dp->set_stream_info(dp, bridge->dp_panel, bridge->id, - bridge->start_slot, bridge->slots); + bridge->start_slot, bridge->slots, bridge->pbn); rc = dp->enable(dp, bridge->dp_panel); if (rc) { @@ -533,6 +589,8 @@ static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge) sde_connector_helper_bridge_disable(bridge->connector); + _dp_mst_bridge_pre_disable_part1(bridge); + rc = dp->pre_disable(dp, bridge->dp_panel); if (rc) { pr_err("[%d] DP display pre disable failed, rc=%d\n", @@ -540,6 +598,8 @@ static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge) return; } + _dp_mst_bridge_pre_disable_part2(bridge); + DP_MST_DEBUG("mst bridge [%d] disable complete\n", bridge->id); } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 28ef79db94b5bf770e7a9a74d58733b9bf272cbe..86744209216f454f5c06c4525e9d4edca1007158 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -1188,7 +1188,7 @@ static void dp_panel_edid_deregister(struct dp_panel_private *panel) static int dp_panel_set_stream_info(struct dp_panel *dp_panel, enum dp_stream_id stream_id, u32 ch_start_slot, - u32 ch_tot_slots) + u32 ch_tot_slots, u32 pbn) { if (!dp_panel || stream_id > DP_STREAM_MAX) { pr_err("invalid input. stream_id: %d\n", stream_id); @@ -1198,6 +1198,7 @@ static int dp_panel_set_stream_info(struct dp_panel *dp_panel, dp_panel->stream_id = stream_id; dp_panel->channel_start_slot = ch_start_slot; dp_panel->channel_total_slots = ch_tot_slots; + dp_panel->pbn = pbn; return 0; } @@ -1257,7 +1258,7 @@ static int dp_panel_deinit_panel_info(struct dp_panel *dp_panel) if (!panel->custom_edid) sde_free_edid((void **)&dp_panel->edid_ctrl); - dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0); + dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0); memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo)); memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta)); panel->panel_on = false; @@ -1643,6 +1644,13 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in) dp_panel->stream_id = DP_STREAM_MAX; dp_panel->connector = in->connector; + if (in->base_panel) { + memcpy(dp_panel->dpcd, in->base_panel->dpcd, + DP_RECEIVER_CAP_SIZE + 1); + memcpy(&dp_panel->link_info, &in->base_panel->link_info, + sizeof(dp_panel->link_info)); + } + dp_panel->init = dp_panel_init_panel_info; dp_panel->deinit = dp_panel_deinit_panel_info; dp_panel->hw_cfg = dp_panel_hw_cfg; diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 600af83c09ff10b7016041812620ecea941602df..f9c95441f6e02e197151262f9c6bc43bdcf1ecc9 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -53,12 +53,15 @@ struct dp_display_mode { u32 capabilities; }; +struct dp_panel; + struct dp_panel_in { struct device *dev; struct dp_aux *aux; struct dp_link *link; struct dp_catalog_panel *catalog; struct drm_connector *connector; + struct dp_panel *base_panel; }; struct dp_audio; @@ -87,6 +90,7 @@ struct dp_panel { u32 channel_start_slot; u32 channel_total_slots; + u32 pbn; /* DRM connector assosiated with this panel */ struct drm_connector *connector; @@ -115,7 +119,7 @@ struct dp_panel { int (*set_stream_info)(struct dp_panel *dp_panel, enum dp_stream_id stream_id, u32 ch_start_slot, - u32 ch_tot_slots); + u32 ch_tot_slots, u32 pbn); int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size); int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 3263b55424b1b073f3b34b1c836b87de4af16b8e..2fbf7b9f24e4faea025ca57beedc2507f999be43 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -280,12 +280,12 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, switch (op) { case DSI_CTRL_OP_POWER_STATE_CHANGE: if (state->power_state == op_state) { - pr_debug("[%d] No change in state, pwr_state=%d\n", + pr_err("[%d] No change in state, pwr_state=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if (state->power_state == DSI_CTRL_POWER_VREG_ON) { if (state->vid_engine_state == DSI_CTRL_ENGINE_ON) { - pr_debug("[%d]State error: op=%d: %d\n", + pr_err("[%d]State error: op=%d: %d\n", dsi_ctrl->cell_index, op_state, state->vid_engine_state); @@ -295,12 +295,12 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, break; case DSI_CTRL_OP_CMD_ENGINE: if (state->cmd_engine_state == op_state) { - pr_debug("[%d] No change in state, cmd_state=%d\n", + pr_err("[%d] No change in state, cmd_state=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) || (state->controller_state != DSI_CTRL_ENGINE_ON)) { - pr_debug("[%d]State error: op=%d: %d, %d\n", + pr_err("[%d]State error: op=%d: %d, %d\n", dsi_ctrl->cell_index, op, state->power_state, @@ -310,12 +310,12 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, break; case DSI_CTRL_OP_VID_ENGINE: if (state->vid_engine_state == op_state) { - pr_debug("[%d] No change in state, cmd_state=%d\n", + pr_err("[%d] No change in state, cmd_state=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) || (state->controller_state != DSI_CTRL_ENGINE_ON)) { - pr_debug("[%d]State error: op=%d: %d, %d\n", + pr_err("[%d]State error: op=%d: %d, %d\n", dsi_ctrl->cell_index, op, state->power_state, @@ -325,11 +325,11 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, break; case DSI_CTRL_OP_HOST_ENGINE: if (state->controller_state == op_state) { - pr_debug("[%d] No change in state, ctrl_state=%d\n", + pr_err("[%d] No change in state, ctrl_state=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if (state->power_state != DSI_CTRL_POWER_VREG_ON) { - pr_debug("[%d]State error (link is off): op=%d:, %d\n", + pr_err("[%d]State error (link is off): op=%d:, %d\n", dsi_ctrl->cell_index, op_state, state->power_state); @@ -337,7 +337,7 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, } else if ((op_state == DSI_CTRL_ENGINE_OFF) && ((state->cmd_engine_state != DSI_CTRL_ENGINE_OFF) || (state->vid_engine_state != DSI_CTRL_ENGINE_OFF))) { - pr_debug("[%d]State error (eng on): op=%d: %d, %d\n", + pr_err("[%d]State error (eng on): op=%d: %d, %d\n", dsi_ctrl->cell_index, op_state, state->cmd_engine_state, @@ -349,7 +349,7 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, if ((state->power_state != DSI_CTRL_POWER_VREG_ON) || (state->host_initialized != true) || (state->cmd_engine_state != DSI_CTRL_ENGINE_ON)) { - pr_debug("[%d]State error: op=%d: %d, %d, %d\n", + pr_err("[%d]State error: op=%d: %d, %d, %d\n", dsi_ctrl->cell_index, op, state->power_state, @@ -360,23 +360,23 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, break; case DSI_CTRL_OP_HOST_INIT: if (state->host_initialized == op_state) { - pr_debug("[%d] No change in state, host_init=%d\n", + pr_err("[%d] No change in state, host_init=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if (state->power_state != DSI_CTRL_POWER_VREG_ON) { - pr_debug("[%d]State error: op=%d: %d\n", + pr_err("[%d]State error: op=%d: %d\n", dsi_ctrl->cell_index, op, state->power_state); rc = -EINVAL; } break; case DSI_CTRL_OP_TPG: if (state->tpg_enabled == op_state) { - pr_debug("[%d] No change in state, tpg_enabled=%d\n", + pr_err("[%d] No change in state, tpg_enabled=%d\n", dsi_ctrl->cell_index, op_state); rc = -EINVAL; } else if ((state->power_state != DSI_CTRL_POWER_VREG_ON) || (state->controller_state != DSI_CTRL_ENGINE_ON)) { - pr_debug("[%d]State error: op=%d: %d, %d\n", + pr_err("[%d]State error: op=%d: %d, %d\n", dsi_ctrl->cell_index, op, state->power_state, @@ -386,7 +386,7 @@ static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, break; case DSI_CTRL_OP_PHY_SW_RESET: if (state->power_state != DSI_CTRL_POWER_VREG_ON) { - pr_debug("[%d]State error: op=%d: %d\n", + pr_err("[%d]State error: op=%d: %d\n", dsi_ctrl->cell_index, op, state->power_state); rc = -EINVAL; } @@ -1232,7 +1232,7 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, } } - if (dsi_hw_ops.mask_error_intr) + if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway) dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw, BIT(DSI_FIFO_OVERFLOW), false); dsi_hw_ops.reset_cmd_fifo(&dsi_ctrl->hw); @@ -1371,6 +1371,13 @@ static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl, pr_err("Message transmission failed, rc=%d\n", rc); goto error; } + /* + * wait before reading rdbk_data register, if any delay is + * required after sending the read command. + */ + if (msg->wait_ms) + usleep_range(msg->wait_ms * 1000, + ((msg->wait_ms * 1000) + 10)); dlen = dsi_ctrl->hw.ops.get_cmd_read_data(&dsi_ctrl->hw, buff, total_bytes_read, @@ -1442,8 +1449,7 @@ static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl) u32 lanes = 0; u32 ulps_lanes; - if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) - lanes = dsi_ctrl->host_config.common_config.data_lanes; + lanes = dsi_ctrl->host_config.common_config.data_lanes; rc = dsi_ctrl->hw.ops.wait_for_lane_idle(&dsi_ctrl->hw, lanes); if (rc) { @@ -1484,9 +1490,7 @@ static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl) return 0; } - if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) - lanes = dsi_ctrl->host_config.common_config.data_lanes; - + lanes = dsi_ctrl->host_config.common_config.data_lanes; lanes |= DSI_CLOCK_LANE; ulps_lanes = dsi_ctrl->hw.ops.ulps_ops.get_lanes_in_ulps(&dsi_ctrl->hw); @@ -1776,6 +1780,7 @@ static struct platform_driver dsi_ctrl_driver = { .driver = { .name = "drm_dsi_ctrl", .of_match_table = msm_dsi_of_match, + .suppress_bind_attrs = true, }, }; @@ -2128,11 +2133,14 @@ int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi, } mutex_lock(&dsi_ctrl->ctrl_lock); - if (!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) { + if ((!dsi_rect_is_equal(&dsi_ctrl->roi, roi)) || + dsi_ctrl->modeupdated) { *changed = true; memcpy(&dsi_ctrl->roi, roi, sizeof(dsi_ctrl->roi)); + dsi_ctrl->modeupdated = false; } else *changed = false; + mutex_unlock(&dsi_ctrl->ctrl_lock); return rc; } @@ -2158,6 +2166,35 @@ int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable) return 0; } +static bool dsi_ctrl_check_for_spurious_error_interrupts( + struct dsi_ctrl *dsi_ctrl) +{ + const unsigned long intr_check_interval = msecs_to_jiffies(1000); + const unsigned int interrupt_threshold = 15; + unsigned long jiffies_now = jiffies; + + if (!dsi_ctrl) { + pr_err("Invalid DSI controller structure\n"); + return false; + } + + if (dsi_ctrl->jiffies_start == 0) + dsi_ctrl->jiffies_start = jiffies; + + dsi_ctrl->error_interrupt_count++; + + if ((jiffies_now - dsi_ctrl->jiffies_start) < intr_check_interval) { + if (dsi_ctrl->error_interrupt_count > interrupt_threshold) { + pr_warn("Detected spurious interrupts on dsi ctrl\n"); + return true; + } + } else { + dsi_ctrl->jiffies_start = jiffies; + dsi_ctrl->error_interrupt_count = 1; + } + return false; +} + static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl, unsigned long int error) { @@ -2229,6 +2266,19 @@ static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl, if (error & 0xF) pr_err("ack error: 0x%lx\n", error); + /* + * DSI Phy can go into bad state during ESD influence. This can + * manifest as various types of spurious error interrupts on + * DSI controller. This check will allow us to handle afore mentioned + * case and prevent us from re enabling interrupts until a full ESD + * recovery is completed. + */ + if (dsi_ctrl_check_for_spurious_error_interrupts(dsi_ctrl) && + dsi_ctrl->esd_check_underway) { + dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); + return; + } + /* enable back DSI interrupts */ if (dsi_ctrl->hw.ops.error_intr_ctrl) dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, true); @@ -2461,6 +2511,31 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl) return 0; } +/** + * dsi_ctrl_update_host_init_state() - Update the host initialization state. + * @dsi_ctrl: DSI controller handle. + * @enable: boolean signifying host state. + * + * Update the host initialization status only while exiting from ulps during + * suspend state. + * + * Return: error code. + */ +int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool enable) +{ + int rc = 0; + u32 state = enable ? 0x1 : 0x0; + + rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, state); + if (rc) { + pr_err("[DSI_%d] Controller state check failed, rc=%d\n", + dsi_ctrl->cell_index, rc); + return rc; + } + dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, state); + return rc; +} + /** * dsi_ctrl_host_init() - Initialize DSI host hardware. * @dsi_ctrl: DSI controller handle. @@ -2688,6 +2763,7 @@ int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl, ctrl->mode_bounds.w = ctrl->host_config.video_timing.h_active; ctrl->mode_bounds.h = ctrl->host_config.video_timing.v_active; memcpy(&ctrl->roi, &ctrl->mode_bounds, sizeof(ctrl->mode_bounds)); + ctrl->modeupdated = true; ctrl->roi.x = 0; error: mutex_unlock(&ctrl->ctrl_lock); @@ -2714,9 +2790,6 @@ int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl, return -EINVAL; } - mutex_lock(&dsi_ctrl->ctrl_lock); - mutex_unlock(&dsi_ctrl->ctrl_lock); - return rc; } @@ -2836,7 +2909,8 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) dsi_ctrl->cell_index); } } - if (dsi_ctrl->hw.ops.mask_error_intr) + if (dsi_ctrl->hw.ops.mask_error_intr && + !dsi_ctrl->esd_check_underway) dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw, BIT(DSI_FIFO_OVERFLOW), false); @@ -3331,7 +3405,8 @@ u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl) return misr; } -void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl) +void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx, + bool mask_enable) { if (!dsi_ctrl || !dsi_ctrl->hw.ops.error_intr_ctrl || !dsi_ctrl->hw.ops.clear_error_status) { @@ -3344,9 +3419,23 @@ void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl) * register */ mutex_lock(&dsi_ctrl->ctrl_lock); - dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, false); - dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + if (idx & BIT(DSI_ERR_INTR_ALL)) { + /* + * The behavior of mask_enable is different in ctrl register + * and mask register and hence mask_enable is manipulated for + * selective error interrupt masking vs total error interrupt + * masking. + */ + + dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, !mask_enable); + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, DSI_ERROR_INTERRUPT_COUNT); + } else { + dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw, idx, + mask_enable); + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + DSI_ERROR_INTERRUPT_COUNT); + } mutex_unlock(&dsi_ctrl->ctrl_lock); } diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index edb1b31f323833b1eca169682d2085d69a71f8e5..751c3e11882447d1a06a60e719260e53dab8b7d5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -204,6 +204,7 @@ struct dsi_ctrl_interrupts { * @cmd_buffer_size: Size of command buffer. * @vaddr: CPU virtual address of cmd buffer. * @secure_mode: Indicates if secure-session is in progress + * @esd_check_underway: Indicates if esd status check is in progress * @debugfs_root: Root for debugfs entries. * @misr_enable: Frame MISR enable/disable * @misr_cache: Cached Frame MISR value @@ -211,6 +212,7 @@ struct dsi_ctrl_interrupts { * dsi controller and run only dsi controller. * @null_insertion_enabled: A boolean property to allow dsi controller to * insert null packet. + * @modeupdated: Boolean to send new roi if mode is updated. */ struct dsi_ctrl { struct platform_device *pdev; @@ -248,6 +250,7 @@ struct dsi_ctrl { u32 cmd_len; void *vaddr; bool secure_mode; + bool esd_check_underway; /* Debug Information */ struct dentry *debugfs_root; @@ -256,8 +259,13 @@ struct dsi_ctrl { bool misr_enable; u32 misr_cache; + /* Check for spurious interrupts */ + unsigned long jiffies_start; + unsigned int error_interrupt_count; + bool phy_isolation_enabled; bool null_insertion_enabled; + bool modeupdated; }; /** @@ -730,8 +738,11 @@ void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable); * dsi_ctrl_mask_error_status_interrupts() - API to mask dsi ctrl error status * interrupts * @dsi_ctrl: DSI controller handle. + * @idx: id indicating which interrupts to enable/disable. + * @mask_enable: boolean to enable/disable masking. */ -void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl); +void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx, + bool mask_enable); /** * dsi_ctrl_irq_update() - Put a irq vote to process DSI error @@ -753,5 +764,9 @@ int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl, * @dsi_ctrl: DSI controller handle. */ int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl); +/** + * dsi_ctrl_update_host_init_state() - Set the host initialization state + */ +int dsi_ctrl_update_host_init_state(struct dsi_ctrl *dsi_ctrl, bool en); #endif /* _DSI_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c index 53717a50ccc8acd7c9c2365d51655eb8dca9955d..fd799e6a1001dd86dcf30535788013d70c804ae1 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c @@ -455,6 +455,11 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl, /* Disable Timing double buffering */ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0); + if (cfg->force_clk_lane_hs) { + reg = DSI_R32(ctrl, DSI_LANE_CTRL); + reg |= BIT(28); + DSI_W32(ctrl, DSI_LANE_CTRL, reg); + } pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index); } @@ -1418,17 +1423,20 @@ void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en) reg = DSI_R32(ctrl, 0x10c); if (idx & BIT(DSI_FIFO_OVERFLOW)) { - if (en) - reg |= (0xf << 16); - else - reg &= ~(0xf << 16); + if (en) { + reg |= (0x1f << 16); + reg |= BIT(9); + } else { + reg &= ~(0x1f << 16); + reg &= ~BIT(9); + } } if (idx & BIT(DSI_FIFO_UNDERFLOW)) { if (en) - reg |= (0xf << 26); + reg |= (0x1b << 26); else - reg &= ~(0xf << 26); + reg &= ~(0x1b << 26); } if (idx & BIT(DSI_LP_Rx_TIMEOUT)) { diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h index 42d7cc0f27c524da328e1c7e549d242ac5c6a0cf..d287184e8016ea4368e61067d26179405e2f1c7e 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -408,6 +408,7 @@ struct dsi_mode_info { * @ignore_rx_eot: Ignore Rx EOT packets if set to true. * @append_tx_eot: Append EOT packets for forward transmissions if set to * true. + * @ext_bridge_mode: External bridge is connected. */ struct dsi_host_common_cfg { enum dsi_pixel_format dst_format; @@ -426,6 +427,7 @@ struct dsi_host_common_cfg { u32 t_clk_pre; bool ignore_rx_eot; bool append_tx_eot; + bool ext_bridge_mode; }; /** @@ -451,6 +453,7 @@ struct dsi_video_engine_cfg { bool hsa_lp11_en; bool eof_bllp_lp11_en; bool bllp_lp11_en; + bool force_clk_lane_hs; enum dsi_video_traffic_mode traffic_mode; u32 vc_id; }; @@ -599,6 +602,7 @@ enum dsi_error_status { DSI_FIFO_OVERFLOW = 1, DSI_FIFO_UNDERFLOW, DSI_LP_Rx_TIMEOUT, + DSI_ERR_INTR_ALL, }; #endif /* _DSI_DEFS_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index d0ca32c99468ca76ec1810bbd3476cca1137d904..069bc9882f68c7afdfb7a9f2cdcacb3a90791b78 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -53,7 +53,8 @@ static const struct of_device_id dsi_display_dt_match[] = { {} }; -static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display) +static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display, + u32 mask, bool enable) { int i; struct dsi_display_ctrl *ctrl; @@ -66,7 +67,25 @@ static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display) ctrl = &display->ctrl[i]; if (!ctrl) continue; - dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl); + dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl, mask, enable); + } +} + +static void dsi_display_set_ctrl_esd_check_flag(struct dsi_display *display, + bool enable) +{ + int i; + struct dsi_display_ctrl *ctrl; + + if (!display) + return; + + for (i = 0; (i < display->ctrl_count) && + (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) { + ctrl = &display->ctrl[i]; + if (!ctrl) + continue; + ctrl->ctrl->esd_check_underway = enable; } } @@ -484,7 +503,6 @@ static bool dsi_display_validate_reg_read(struct dsi_panel *panel) int i, j = 0; int len = 0, *lenp; int group = 0, count = 0; - struct dsi_display_mode *mode; struct drm_panel_esd_config *config; if (!panel) @@ -493,8 +511,7 @@ static bool dsi_display_validate_reg_read(struct dsi_panel *panel) config = &(panel->esd_config); lenp = config->status_valid_params ?: config->status_cmds_rlen; - mode = panel->cur_mode; - count = mode->priv_info->cmd_sets[DSI_CMD_SET_PANEL_STATUS].count; + count = config->status_cmd.count; for (i = 0; i < count; i++) len += lenp[i]; @@ -557,27 +574,26 @@ static int dsi_display_read_status(struct dsi_display_ctrl *ctrl, if (dsi_ctrl_validate_host_state(ctrl->ctrl)) return 1; - /* acquire panel_lock to make sure no commands are in progress */ - dsi_panel_acquire_panel_lock(panel); - config = &(panel->esd_config); lenp = config->status_valid_params ?: config->status_cmds_rlen; count = config->status_cmd.count; cmds = config->status_cmd.cmds; - if (cmds->last_command) { - cmds->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND; - flags |= DSI_CTRL_CMD_LAST_COMMAND; - } flags |= (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ); for (i = 0; i < count; ++i) { memset(config->status_buf, 0x0, SZ_4K); + if (cmds[i].last_command) { + cmds[i].msg.flags |= MIPI_DSI_MSG_LASTCOMMAND; + flags |= DSI_CTRL_CMD_LAST_COMMAND; + } + if (config->status_cmd.state == DSI_CMD_SET_STATE_LP) + cmds[i].msg.flags |= MIPI_DSI_MSG_USE_LPM; cmds[i].msg.rx_buf = config->status_buf; cmds[i].msg.rx_len = config->status_cmds_rlen[i]; rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i].msg, flags); if (rc <= 0) { pr_err("rx cmd transfer failed rc=%d\n", rc); - goto error; + return rc; } memcpy(config->return_buf + start, @@ -585,9 +601,6 @@ static int dsi_display_read_status(struct dsi_display_ctrl *ctrl, start += lenp[i]; } -error: - /* release panel_lock */ - dsi_panel_release_panel_lock(panel); return rc; } @@ -655,16 +668,12 @@ static int dsi_display_status_reg_read(struct dsi_display *display) rc = dsi_display_validate_status(ctrl, display->panel); if (rc <= 0) { - pr_err("[%s] read status failed on master,rc=%d\n", + pr_err("[%s] read status failed on slave,rc=%d\n", display->name, rc); goto exit; } } exit: - /* mask only error interrupts */ - if (rc <= 0) - dsi_display_mask_ctrl_error_interrupts(display); - dsi_display_cmd_engine_disable(display); done: return rc; @@ -690,7 +699,7 @@ static int dsi_display_status_check_te(struct dsi_display *display) reinit_completion(&display->esd_te_gate); if (!wait_for_completion_timeout(&display->esd_te_gate, esd_te_timeout)) { - pr_err("ESD check failed\n"); + pr_err("TE check failed\n"); rc = -EINVAL; } @@ -699,31 +708,47 @@ static int dsi_display_status_check_te(struct dsi_display *display) return rc; } -int dsi_display_check_status(struct drm_connector *connector, void *display) +int dsi_display_check_status(struct drm_connector *connector, void *display, + bool te_check_override) { struct dsi_display *dsi_display = display; struct dsi_panel *panel; u32 status_mode; int rc = 0x1; + u32 mask; - if (dsi_display == NULL) + if (!dsi_display || !dsi_display->panel) return -EINVAL; panel = dsi_display->panel; - status_mode = panel->esd_config.status_mode; - - mutex_lock(&dsi_display->display_lock); + dsi_panel_acquire_panel_lock(panel); if (!panel->panel_initialized) { pr_debug("Panel not initialized\n"); - mutex_unlock(&dsi_display->display_lock); + dsi_panel_release_panel_lock(panel); return rc; } + /* Prevent another ESD check,when ESD recovery is underway */ + if (panel->esd_recovery_pending) { + dsi_panel_release_panel_lock(panel); + return rc; + } + + if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio)) + status_mode = ESD_MODE_PANEL_TE; + else + status_mode = panel->esd_config.status_mode; + dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON); + /* Mask error interrupts before attempting ESD read */ + mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW); + dsi_display_set_ctrl_esd_check_flag(dsi_display, true); + dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, true); + if (status_mode == ESD_MODE_REG_READ) { rc = dsi_display_status_reg_read(dsi_display); } else if (status_mode == ESD_MODE_SW_BTA) { @@ -735,9 +760,19 @@ int dsi_display_check_status(struct drm_connector *connector, void *display) panel->esd_config.esd_enabled = false; } + /* Unmask error interrupts */ + if (rc > 0) { + dsi_display_set_ctrl_esd_check_flag(dsi_display, false); + dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, + false); + } else { + /* Handle Panel failures during display disable sequence */ + panel->esd_recovery_pending = true; + } + dsi_display_clk_ctrl(dsi_display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_OFF); - mutex_unlock(&dsi_display->display_lock); + dsi_panel_release_panel_lock(panel); return rc; } @@ -753,7 +788,7 @@ static int dsi_display_cmd_prepare(const char *cmd_buf, u32 cmd_buf_len, cmd->msg.channel = cmd_buf[2]; cmd->msg.flags = cmd_buf[3]; cmd->msg.ctrl = 0; - cmd->post_wait_ms = cmd_buf[4]; + cmd->post_wait_ms = cmd->msg.wait_ms = cmd_buf[4]; cmd->msg.tx_len = ((cmd_buf[5] << 8) | (cmd_buf[6])); if (cmd->msg.tx_len > payload_len) { @@ -2225,18 +2260,34 @@ static int dsi_display_ctrl_init(struct dsi_display *display) int i; struct dsi_display_ctrl *ctrl; - for (i = 0 ; i < display->ctrl_count; i++) { - ctrl = &display->ctrl[i]; - rc = dsi_ctrl_host_init(ctrl->ctrl, - display->is_cont_splash_enabled); - if (rc) { - pr_err("[%s] failed to init host_%d, rc=%d\n", - display->name, i, rc); - goto error_host_deinit; + /* when ULPS suspend feature is enabled, we will keep the lanes in + * ULPS during suspend state and clamp DSI phy. Hence while resuming + * we will programe DSI controller as part of core clock enable. + * After that we should not re-configure DSI controller again here for + * usecases where we are resuming from ulps suspend as it might put + * the HW in bad state. + */ + if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) { + for (i = 0 ; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + rc = dsi_ctrl_host_init(ctrl->ctrl, + display->is_cont_splash_enabled); + if (rc) { + pr_err("[%s] failed to init host_%d, rc=%d\n", + display->name, i, rc); + goto error_host_deinit; + } + } + } else { + for (i = 0 ; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + rc = dsi_ctrl_update_host_init_state(ctrl->ctrl, true); + if (rc) + pr_debug("host init update failed rc=%d\n", rc); } } - return 0; + return rc; error_host_deinit: for (i = i - 1; i >= 0; i--) { ctrl = &display->ctrl[i]; @@ -2595,7 +2646,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct dsi_display *display = to_dsi_display(host); - int rc = 0; + int rc = 0, ret = 0; if (!host || !msg) { pr_err("Invalid params\n"); @@ -2653,13 +2704,17 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, } error_disable_cmd_engine: - (void)dsi_display_cmd_engine_disable(display); + ret = dsi_display_cmd_engine_disable(display); + if (ret) { + pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n", + display->name, ret); + } error_disable_clks: - rc = dsi_display_clk_ctrl(display->dsi_clk_handle, + ret = dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_OFF); - if (rc) { + if (ret) { pr_err("[%s] failed to disable all DSI clocks, rc=%d\n", - display->name, rc); + display->name, ret); } error: return rc; @@ -3321,6 +3376,9 @@ static int dsi_display_parse_dt(struct dsi_display *display) /* Parse TE gpio */ dsi_display_parse_te_gpio(display); + /* Parse external bridge from port 0, reg 0 */ + display->ext_bridge_of = of_graph_get_remote_node(of_node, 0, 0); + pr_debug("success\n"); error: return rc; @@ -4515,6 +4573,7 @@ static struct platform_driver dsi_display_driver = { .driver = { .name = "msm-dsi-display", .of_match_table = dsi_display_dt_match, + .suppress_bind_attrs = true, }, }; @@ -4798,6 +4857,285 @@ int dsi_display_drm_bridge_deinit(struct dsi_display *display) return rc; } +/* Hook functions to call external connector, pointer validation is + * done in dsi_display_drm_ext_bridge_init. + */ +static enum drm_connector_status dsi_display_drm_ext_detect( + struct drm_connector *connector, + bool force, + void *disp) +{ + struct dsi_display *display = disp; + + return display->ext_conn->funcs->detect(display->ext_conn, force); +} + +static int dsi_display_drm_ext_get_modes( + struct drm_connector *connector, void *disp) +{ + struct dsi_display *display = disp; + struct drm_display_mode *pmode, *pt; + int count; + + count = display->ext_conn->helper_private->get_modes( + display->ext_conn); + + list_for_each_entry_safe(pmode, pt, + &display->ext_conn->probed_modes, head) { + list_move_tail(&pmode->head, &connector->probed_modes); + } + + connector->display_info = display->ext_conn->display_info; + + return count; +} + +static enum drm_mode_status dsi_display_drm_ext_mode_valid( + struct drm_connector *connector, + struct drm_display_mode *mode, + void *disp) +{ + struct dsi_display *display = disp; + + return display->ext_conn->helper_private->mode_valid( + display->ext_conn, mode); +} + +static int dsi_display_drm_ext_atomic_check(struct drm_connector *connector, + void *disp, + struct drm_connector_state *c_state) +{ + struct dsi_display *display = disp; + + return display->ext_conn->helper_private->atomic_check( + display->ext_conn, c_state); +} + +static int dsi_display_ext_get_info(struct drm_connector *connector, + struct msm_display_info *info, void *disp) +{ + struct dsi_display *display; + int i; + + if (!info || !disp) { + pr_err("invalid params\n"); + return -EINVAL; + } + + display = disp; + if (!display->panel) { + pr_err("invalid display panel\n"); + return -EINVAL; + } + + mutex_lock(&display->display_lock); + + memset(info, 0, sizeof(struct msm_display_info)); + + info->intf_type = DRM_MODE_CONNECTOR_DSI; + info->num_of_h_tiles = display->ctrl_count; + for (i = 0; i < info->num_of_h_tiles; i++) + info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index; + + info->is_connected = connector->status != connector_status_disconnected; + info->is_primary = true; + info->capabilities |= (MSM_DISPLAY_CAP_VID_MODE | + MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_HOT_PLUG); + + mutex_unlock(&display->display_lock); + return 0; +} + +static int dsi_display_ext_get_mode_info(struct drm_connector *connector, + const struct drm_display_mode *drm_mode, + struct msm_mode_info *mode_info, + u32 max_mixer_width, void *display) +{ + struct msm_display_topology *topology; + + if (!drm_mode || !mode_info) + return -EINVAL; + + memset(mode_info, 0, sizeof(*mode_info)); + mode_info->frame_rate = drm_mode->vrefresh; + mode_info->vtotal = drm_mode->vtotal; + + topology = &mode_info->topology; + topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ? 2 : 1; + topology->num_enc = 0; + topology->num_intf = topology->num_lm; + + mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE; + + return 0; +} + +static int dsi_host_ext_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + struct dsi_display *display = to_dsi_display(host); + struct dsi_panel *panel; + + if (!host || !dsi || !display->panel) { + pr_err("Invalid param\n"); + return -EINVAL; + } + + pr_debug("DSI[%s]: channel=%d, lanes=%d, format=%d, mode_flags=%lx\n", + dsi->name, dsi->channel, dsi->lanes, + dsi->format, dsi->mode_flags); + + panel = display->panel; + panel->host_config.data_lanes = 0; + if (dsi->lanes > 0) + panel->host_config.data_lanes |= DSI_DATA_LANE_0; + if (dsi->lanes > 1) + panel->host_config.data_lanes |= DSI_DATA_LANE_1; + if (dsi->lanes > 2) + panel->host_config.data_lanes |= DSI_DATA_LANE_2; + if (dsi->lanes > 3) + panel->host_config.data_lanes |= DSI_DATA_LANE_3; + + switch (dsi->format) { + case MIPI_DSI_FMT_RGB888: + panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB888; + break; + case MIPI_DSI_FMT_RGB666: + panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666_LOOSE; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666; + break; + case MIPI_DSI_FMT_RGB565: + default: + panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB565; + break; + } + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + panel->panel_mode = DSI_OP_VIDEO_MODE; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + panel->video_config.traffic_mode = + DSI_VIDEO_TRAFFIC_BURST_MODE; + else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + panel->video_config.traffic_mode = + DSI_VIDEO_TRAFFIC_SYNC_PULSES; + else + panel->video_config.traffic_mode = + DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS; + + panel->video_config.hsa_lp11_en = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA; + panel->video_config.hbp_lp11_en = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP; + panel->video_config.hfp_lp11_en = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP; + panel->video_config.pulse_mode_hsa_he = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE; + panel->video_config.bllp_lp11_en = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BLLP; + panel->video_config.eof_bllp_lp11_en = + dsi->mode_flags & MIPI_DSI_MODE_VIDEO_EOF_BLLP; + } else { + panel->panel_mode = DSI_OP_CMD_MODE; + pr_err("command mode not supported by ext bridge\n"); + return -ENOTSUPP; + } + + panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN; + + return 0; +} + +static struct mipi_dsi_host_ops dsi_host_ext_ops = { + .attach = dsi_host_ext_attach, + .detach = dsi_host_detach, + .transfer = dsi_host_transfer, +}; + +int dsi_display_drm_ext_bridge_init(struct dsi_display *display, + struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *drm = encoder->dev; + struct drm_bridge *bridge = encoder->bridge; + struct drm_bridge *ext_bridge; + struct drm_connector *ext_conn; + struct sde_connector *sde_conn = to_sde_connector(connector); + int rc; + + /* check if ext_bridge is already attached */ + if (display->ext_bridge) + return 0; + + /* check if there is no external bridge defined */ + if (!display->ext_bridge_of) + return 0; + + ext_bridge = of_drm_find_bridge(display->ext_bridge_of); + if (IS_ERR_OR_NULL(ext_bridge)) { + rc = PTR_ERR(ext_bridge); + pr_err("failed to find ext bridge\n"); + goto error; + } + + rc = drm_bridge_attach(bridge->encoder, ext_bridge, bridge); + if (rc) { + pr_err("[%s] ext brige attach failed, %d\n", + display->name, rc); + goto error; + } + + display->ext_bridge = ext_bridge; + + /* ext bridge will init its own connector during attach, + * we need to extract it out of the connector list + */ + spin_lock_irq(&drm->mode_config.connector_list_lock); + ext_conn = list_last_entry(&drm->mode_config.connector_list, + struct drm_connector, head); + if (ext_conn && ext_conn != connector && + ext_conn->encoder_ids[0] == bridge->encoder->base.id) { + list_del_init(&ext_conn->head); + display->ext_conn = ext_conn; + } + spin_unlock_irq(&drm->mode_config.connector_list_lock); + + /* if there is no valid external connector created, we'll use default + * setting from panel defined in DT file. + */ + if (!display->ext_conn || + !display->ext_conn->funcs || + !display->ext_conn->helper_private) { + display->ext_conn = NULL; + return 0; + } + + /* otherwise, hook up the functions to use external connector */ + sde_conn->ops.detect = + display->ext_conn->funcs->detect ? + dsi_display_drm_ext_detect : NULL; + sde_conn->ops.get_modes = + display->ext_conn->helper_private->get_modes ? + dsi_display_drm_ext_get_modes : NULL; + sde_conn->ops.mode_valid = + display->ext_conn->helper_private->mode_valid ? + dsi_display_drm_ext_mode_valid : NULL; + sde_conn->ops.atomic_check = + display->ext_conn->helper_private->atomic_check ? + dsi_display_drm_ext_atomic_check : NULL; + sde_conn->ops.get_info = + dsi_display_ext_get_info; + sde_conn->ops.get_mode_info = + dsi_display_ext_get_mode_info; + + /* add support to attach/detach */ + display->host.ops = &dsi_host_ext_ops; + return 0; +error: + return rc; +} + int dsi_display_get_info(struct drm_connector *connector, struct msm_display_info *info, void *disp) { @@ -5031,6 +5369,53 @@ int dsi_display_get_modes(struct dsi_display *display, return rc; } +int dsi_display_get_panel_vfp(void *dsi_display, + int h_active, int v_active) +{ + int i, rc = 0; + u32 count, refresh_rate = 0; + struct dsi_dfps_capabilities dfps_caps; + struct dsi_display *display = (struct dsi_display *)dsi_display; + + if (!display) + return -EINVAL; + + rc = dsi_display_get_mode_count(display, &count); + if (rc) + return rc; + + mutex_lock(&display->display_lock); + + if (display->panel && display->panel->cur_mode) + refresh_rate = display->panel->cur_mode->timing.refresh_rate; + + dsi_panel_get_dfps_caps(display->panel, &dfps_caps); + if (dfps_caps.dfps_support) + refresh_rate = dfps_caps.max_refresh_rate; + + if (!refresh_rate) { + mutex_unlock(&display->display_lock); + pr_err("Null Refresh Rate\n"); + return -EINVAL; + } + + h_active *= display->ctrl_count; + + for (i = 0; i < count; i++) { + struct dsi_display_mode *m = &display->modes[i]; + + if (m && v_active == m->timing.v_active && + h_active == m->timing.h_active && + refresh_rate == m->timing.refresh_rate) { + rc = m->timing.v_front_porch; + break; + } + } + mutex_unlock(&display->display_lock); + + return rc; +} + int dsi_display_find_mode(struct dsi_display *display, const struct dsi_display_mode *cmp, struct dsi_display_mode **out_mode) @@ -5047,6 +5432,14 @@ int dsi_display_find_mode(struct dsi_display *display, if (rc) return rc; + if (!display->modes) { + struct dsi_display_mode *m; + + rc = dsi_display_get_modes(display, &m); + if (rc) + return rc; + } + mutex_lock(&display->display_lock); for (i = 0; i < count; i++) { struct dsi_display_mode *m = &display->modes[i]; @@ -5590,6 +5983,8 @@ int dsi_display_prepare(struct dsi_display *display) mode = display->panel->cur_mode; + dsi_display_set_ctrl_esd_check_flag(display, false); + if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) { if (display->is_cont_splash_enabled) { pr_err("DMS is not supposed to be set on first frame\n"); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index c58b41770f10e12eaf1412ed6cf8c08cf2a82503..74652df844a51b9f35b824120e7d80f5a2a69e36 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -128,6 +128,7 @@ struct dsi_display_clk_info { * @pdev: Pointer to platform device. * @drm_dev: DRM device associated with the display. * @drm_conn: Pointer to DRM connector associated with the display + * @ext_conn: Pointer to external connector attached to DSI connector * @name: Name of the display. * @display_type: Display type as defined in device tree. * @list: List pointer. @@ -142,6 +143,7 @@ struct dsi_display_clk_info { * @ctrl: Controller information for DSI display. * @panel: Handle to DSI panel. * @panel_of: pHandle to DSI panel. + * @ext_bridge_of: pHandle to external DSI bridge. * @modes: Array of probed DSI modes * @type: DSI display type. * @clk_master_idx: The master controller for controlling clocks. This is an @@ -161,6 +163,7 @@ struct dsi_display_clk_info { * @phy_idle_power_off: PHY power state. * @host: DRM MIPI DSI Host. * @bridge: Pointer to DRM bridge object. + * @ext_bridge: Pointer to external bridge object attached to DSI bridge. * @cmd_engine_refcount: Reference count enforcing single instance of cmd eng * @clk_mngr: DSI clock manager. * @dsi_clk_handle: DSI clock handle. @@ -174,6 +177,7 @@ struct dsi_display { struct platform_device *pdev; struct drm_device *drm_dev; struct drm_connector *drm_conn; + struct drm_connector *ext_conn; const char *name; const char *display_type; @@ -193,6 +197,7 @@ struct dsi_display { struct device_node *disp_node; struct device_node *panel_of; struct device_node *parser_node; + struct device_node *ext_bridge_of; struct dsi_display_mode *modes; @@ -222,6 +227,7 @@ struct dsi_display { struct mipi_dsi_host host; struct dsi_bridge *bridge; + struct drm_bridge *ext_bridge; u32 cmd_engine_refcount; struct sde_power_handle *phandle; @@ -302,6 +308,19 @@ int dsi_display_drm_bridge_init(struct dsi_display *display, */ int dsi_display_drm_bridge_deinit(struct dsi_display *display); +/** + * dsi_display_drm_ext_bridge_init() - initializes DRM bridge for ext bridge + * @display: Handle to the display. + * @enc: Pointer to the encoder object which is connected to the + * display. + * @connector: Pointer to the connector object which is connected to + * the display. + * + * Return: error code. + */ +int dsi_display_drm_ext_bridge_init(struct dsi_display *display, + struct drm_encoder *enc, struct drm_connector *connector); + /** * dsi_display_get_info() - returns the display properties * @connector: Pointer to drm connector structure @@ -562,8 +581,10 @@ int dsi_display_set_backlight(struct drm_connector *connector, * dsi_display_check_status() - check if panel is dead or alive * @connector: Pointer to drm connector structure * @display: Handle to display. + * @te_check_override: Whether check for TE from panel or default check */ -int dsi_display_check_status(struct drm_connector *connector, void *display); +int dsi_display_check_status(struct drm_connector *connector, void *display, + bool te_check_override); /** * dsi_display_cmd_transfer() - transfer command to the panel @@ -635,4 +656,14 @@ enum dsi_pixel_format dsi_display_get_dst_format( * Return: Zero on Success */ int dsi_display_cont_splash_config(void *display); +/* + * dsi_display_get_panel_vfp - get panel vsync + * @display: Pointer to private display structure + * @h_active: width + * @v_active: height + * Returns: v_front_porch on success error code on failure + */ +int dsi_display_get_panel_vfp(void *display, + int h_active, int v_active); + #endif /* _DSI_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index 18933ce402e793b843c4c16fc766c6781c8ce2ca..e79f200e6bc88c1a37b160ee5bb95448579ca030 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -25,6 +25,18 @@ #define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base) #define to_dsi_state(x) container_of((x), struct dsi_connector_state, base) +#define DEFAULT_PANEL_JITTER_NUMERATOR 2 +#define DEFAULT_PANEL_JITTER_DENOMINATOR 1 +#define DEFAULT_PANEL_JITTER_ARRAY_SIZE 2 +#define DEFAULT_PANEL_PREFILL_LINES 25 + +static struct dsi_display_mode_priv_info default_priv_info = { + .panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR, + .panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR, + .panel_prefill_lines = DEFAULT_PANEL_PREFILL_LINES, + .dsc_enabled = false, +}; + static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode, struct dsi_display_mode *dsi_mode) { @@ -63,6 +75,11 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode, dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS; if (msm_is_mode_seamless_vrr(drm_mode)) dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR; + + dsi_mode->timing.h_sync_polarity = + !!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC); + dsi_mode->timing.v_sync_polarity = + !!(drm_mode->flags & DRM_MODE_FLAG_PVSYNC); } void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode, @@ -101,6 +118,11 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode, if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR) drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR; + if (dsi_mode->timing.h_sync_polarity) + drm_mode->flags |= DRM_MODE_FLAG_PHSYNC; + if (dsi_mode->timing.v_sync_polarity) + drm_mode->flags |= DRM_MODE_FLAG_PVSYNC; + drm_mode_set_name(drm_mode); } @@ -129,8 +151,12 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge) return; } - if (!c_bridge || !c_bridge->display) + if (!c_bridge || !c_bridge->display || !c_bridge->display->panel) { pr_err("Incorrect bridge details\n"); + return; + } + + c_bridge->display->panel->esd_recovery_pending = false; /* By this point mode should have been validated through mode_fixup */ rc = dsi_display_set_mode(c_bridge->display, @@ -176,6 +202,7 @@ static void dsi_bridge_enable(struct drm_bridge *bridge) { int rc = 0; struct dsi_bridge *c_bridge = to_dsi_bridge(bridge); + struct dsi_display *display; if (!bridge) { pr_err("Invalid params\n"); @@ -187,11 +214,15 @@ static void dsi_bridge_enable(struct drm_bridge *bridge) pr_debug("[%d] seamless enable\n", c_bridge->id); return; } + display = c_bridge->display; - rc = dsi_display_post_enable(c_bridge->display); + rc = dsi_display_post_enable(display); if (rc) pr_err("[%d] DSI display post enabled failed, rc=%d\n", c_bridge->id, rc); + + if (display && display->drm_conn) + sde_connector_helper_bridge_enable(display->drm_conn); } static void dsi_bridge_disable(struct drm_bridge *bridge) @@ -286,6 +317,17 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge, return false; } + /* + * if no timing defined in panel, it must be external mode + * and we'll use empty priv info to populate the mode + */ + if (display->panel && !display->panel->num_timing_nodes) { + *adjusted_mode = *mode; + adjusted_mode->private = (int *)&default_priv_info; + adjusted_mode->private_flags = 0; + return true; + } + convert_to_dsi_mode(mode, &dsi_mode); /* diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c index b4c26514ec223d9de3a154849a25a5c2e62e92c9..45cf593f9198105d5635c1992c801840949b7934 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c @@ -412,6 +412,9 @@ static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable) int rc = 0; struct pinctrl_state *state; + if (panel->host_config.ext_bridge_mode) + return 0; + if (enable) state = panel->pinctrl.active; else @@ -552,6 +555,9 @@ static int dsi_panel_pinctrl_init(struct dsi_panel *panel) { int rc = 0; + if (panel->host_config.ext_bridge_mode) + return 0; + /* TODO: pinctrl is defined in dsi dt node */ panel->pinctrl.pinctrl = devm_pinctrl_get(panel->parent); if (IS_ERR_OR_NULL(panel->pinctrl.pinctrl)) { @@ -643,6 +649,9 @@ static int dsi_panel_bl_register(struct dsi_panel *panel) int rc = 0; struct dsi_backlight_config *bl = &panel->bl_config; + if (panel->host_config.ext_bridge_mode) + return 0; + switch (bl->type) { case DSI_BACKLIGHT_WLED: rc = dsi_panel_wled_register(panel, bl); @@ -664,6 +673,9 @@ static int dsi_panel_bl_unregister(struct dsi_panel *panel) int rc = 0; struct dsi_backlight_config *bl = &panel->bl_config; + if (panel->host_config.ext_bridge_mode) + return 0; + switch (bl->type) { case DSI_BACKLIGHT_WLED: break; @@ -999,6 +1011,9 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host, host->append_tx_eot = utils->read_bool(utils->data, "qcom,mdss-dsi-tx-eot-append"); + host->ext_bridge_mode = utils->read_bool(utils->data, + "qcom,mdss-dsi-ext-bridge-mode"); + return 0; } @@ -1193,6 +1208,9 @@ static int dsi_panel_parse_video_host_config(struct dsi_video_engine_cfg *cfg, cfg->bllp_lp11_en = utils->read_bool(utils->data, "qcom,mdss-dsi-bllp-power-mode"); + cfg->force_clk_lane_hs = of_property_read_bool(utils->data, + "qcom,mdss-dsi-force-clock-lane-hs"); + traffic_mode = utils->get_property(utils->data, "qcom,mdss-dsi-traffic-mode", NULL); @@ -1472,7 +1490,7 @@ static int dsi_panel_create_cmd_packets(const char *data, cmd[i].msg.channel = data[2]; cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0); cmd[i].msg.ctrl = 0; - cmd[i].post_wait_ms = data[4]; + cmd[i].post_wait_ms = cmd[i].msg.wait_ms = data[4]; cmd[i].msg.tx_len = ((data[5] << 8) | (data[6])); size = cmd[i].msg.tx_len * sizeof(u8); @@ -1788,7 +1806,8 @@ static int dsi_panel_parse_gpios(struct dsi_panel *panel) panel->reset_config.reset_gpio = utils->get_named_gpio(utils->data, "qcom,platform-reset-gpio", 0); - if (!gpio_is_valid(panel->reset_config.reset_gpio)) { + if (!gpio_is_valid(panel->reset_config.reset_gpio) && + !panel->host_config.ext_bridge_mode) { pr_err("[%s] failed get reset gpio, rc=%d\n", panel->name, rc); rc = -EINVAL; goto error; @@ -2011,9 +2030,8 @@ int dsi_dsc_populate_static_param(struct msm_display_dsc_info *dsc) int final_value, final_scale; int ratio_index; - dsc->version = 0x11; - dsc->scr_rev = 0; dsc->rc_model_size = 8192; + if (dsc->version == 0x11 && dsc->scr_rev == 0x1) dsc->first_line_bpg_offset = 15; else @@ -2175,7 +2193,7 @@ static int dsi_panel_parse_phy_timing(struct dsi_display_mode *mode, priv_info->phy_timing_len = len; }; - mode->pixel_clk_khz = (mode->timing.h_active * + mode->pixel_clk_khz = (DSI_H_TOTAL(&mode->timing) * DSI_V_TOTAL(&mode->timing) * mode->timing.refresh_rate) / 1000; return rc; @@ -2206,6 +2224,36 @@ static int dsi_panel_parse_dsc_params(struct dsi_display_mode *mode, return 0; } + rc = utils->read_u32(utils->data, "qcom,mdss-dsc-version", &data); + if (rc) { + priv_info->dsc.version = 0x11; + rc = 0; + } else { + priv_info->dsc.version = data & 0xff; + /* only support DSC 1.1 rev */ + if (priv_info->dsc.version != 0x11) { + pr_err("%s: DSC version:%d not supported\n", __func__, + priv_info->dsc.version); + rc = -EINVAL; + goto error; + } + } + + rc = utils->read_u32(utils->data, "qcom,mdss-dsc-scr-version", &data); + if (rc) { + priv_info->dsc.scr_rev = 0x0; + rc = 0; + } else { + priv_info->dsc.scr_rev = data & 0xff; + /* only one scr rev supported */ + if (priv_info->dsc.scr_rev > 0x1) { + pr_err("%s: DSC scr version:%d not supported\n", + __func__, priv_info->dsc.scr_rev); + rc = -EINVAL; + goto error; + } + } + rc = utils->read_u32(utils->data, "qcom,mdss-dsc-slice-height", &data); if (rc) { pr_err("failed to parse qcom,mdss-dsc-slice-height\n"); @@ -3022,7 +3070,7 @@ int dsi_panel_get_mode_count(struct dsi_panel *panel) timings_np = utils->get_child_by_name(utils->data, "qcom,mdss-dsi-display-timings"); - if (!timings_np) { + if (!timings_np && !panel->host_config.ext_bridge_mode) { pr_err("no display timing nodes defined\n"); rc = -EINVAL; goto error; @@ -3055,11 +3103,7 @@ int dsi_panel_get_phy_props(struct dsi_panel *panel, return -EINVAL; } - mutex_lock(&panel->panel_lock); - memcpy(phy_props, &panel->phy_props, sizeof(*phy_props)); - - mutex_unlock(&panel->panel_lock); return rc; } @@ -3073,11 +3117,7 @@ int dsi_panel_get_dfps_caps(struct dsi_panel *panel, return -EINVAL; } - mutex_lock(&panel->panel_lock); - memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps)); - - mutex_unlock(&panel->panel_lock); return rc; } @@ -3431,6 +3471,7 @@ static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel_cmd_set *set, set->cmds[0].msg.tx_buf = caset; set->cmds[0].msg.rx_len = 0; set->cmds[0].msg.rx_buf = 0; + set->cmds[0].msg.wait_ms = 0; set->cmds[0].last_command = 0; set->cmds[0].post_wait_ms = 0; @@ -3442,6 +3483,7 @@ static int dsi_panel_roi_prepare_dcs_cmds(struct dsi_panel_cmd_set *set, set->cmds[1].msg.tx_buf = paset; set->cmds[1].msg.rx_len = 0; set->cmds[1].msg.rx_buf = 0; + set->cmds[1].msg.wait_ms = 0; set->cmds[1].last_command = 1; set->cmds[1].post_wait_ms = 0; @@ -3655,11 +3697,14 @@ int dsi_panel_disable(struct dsi_panel *panel) mutex_lock(&panel->panel_lock); - rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF); - if (rc) { - pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n", - panel->name, rc); - goto error; + /* Avoid sending panel off commands when ESD recovery is underway */ + if (!panel->esd_recovery_pending) { + rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF); + if (rc) { + pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n", + panel->name, rc); + goto error; + } } panel->panel_initialized = false; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h index 07a141caaacd561338a0c5482886d8772adcbf2a..1ae2288e264957e4e230ad86e41c09a769b70b44 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h @@ -176,6 +176,7 @@ struct dsi_panel { bool ulps_enabled; bool ulps_suspend_enabled; bool allow_phy_power_off; + bool esd_recovery_pending; bool panel_initialized; bool te_using_watchdog_timer; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index df69e70038453c2792866d46d8b7cad98a1ce048..285329c5e6cc2ac5dc86f7c3c1315b24977f1ad0 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -311,6 +311,9 @@ static int dsi_phy_settings_init(struct platform_device *pdev, "qcom,dsi-phy-regulator-min-datarate-bps", &phy->regulator_min_datarate_bps); + phy->cfg.force_clk_lane_hs = of_property_read_bool(pdev->dev.of_node, + "qcom,panel-force-clock-lane-hs"); + return 0; err: lane->count_per_lane = 0; @@ -705,8 +708,7 @@ static int dsi_phy_enable_ulps(struct msm_dsi_phy *phy, u32 lanes = 0; u32 ulps_lanes; - if (config->panel_mode == DSI_OP_CMD_MODE) - lanes = config->common_config.data_lanes; + lanes = config->common_config.data_lanes; lanes |= DSI_CLOCK_LANE; /* @@ -741,8 +743,7 @@ static int dsi_phy_disable_ulps(struct msm_dsi_phy *phy, { u32 ulps_lanes, lanes = 0; - if (config->panel_mode == DSI_OP_CMD_MODE) - lanes = config->common_config.data_lanes; + lanes = config->common_config.data_lanes; lanes |= DSI_CLOCK_LANE; ulps_lanes = phy->hw.ops.ulps_ops.get_lanes_in_ulps(&phy->hw); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h index 5d0a8e4fb6b3ca9a79fc1074a0b7ba455d28b64d..e25784a7a83732996a89d18534135b51249bcd2f 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h @@ -92,6 +92,7 @@ struct dsi_phy_per_lane_cfgs { * @regulators: Regulator settings for lanes. * @pll_source: PLL source. * @lane_map: DSI logical to PHY lane mapping. + * @force_clk_lane_hs:Boolean whether to force clock lane in HS mode. */ struct dsi_phy_cfg { struct dsi_phy_per_lane_cfgs lanecfg; @@ -101,6 +102,7 @@ struct dsi_phy_cfg { struct dsi_phy_per_lane_cfgs regulators; enum dsi_phy_pll_source pll_source; struct dsi_lane_map lane_map; + bool force_clk_lane_hs; }; struct dsi_phy_hw; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c index 4ed484a49e5cacfdb803a5b4d82fb6bfc728dd79..9123cf852c33ccd937d4a99fa4fb309c0bd12820 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c @@ -143,6 +143,12 @@ static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy, DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]); } + if (cfg->force_clk_lane_hs) { + u32 reg = DSI_R32(phy, DSIPHY_CMN_LANE_CTRL1); + + reg |= BIT(5) | BIT(6); + DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg); + } } /** @@ -335,6 +341,9 @@ void dsi_phy_hw_v4_0_ulps_request(struct dsi_phy_hw *phy, if (lanes & DSI_DATA_LANE_3) reg |= BIT(3); + if (cfg->force_clk_lane_hs) + reg |= BIT(5) | BIT(6); + /* * ULPS entry request. Wait for short time to make sure * that the lanes enter ULPS. Recommended as per HPG. @@ -409,6 +418,11 @@ void dsi_phy_hw_v4_0_ulps_exit(struct dsi_phy_hw *phy, DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg); DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0); usleep_range(100, 110); + + if (cfg->force_clk_lane_hs) { + reg = BIT(5) | BIT(6); + DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg); + } } u32 dsi_phy_hw_v4_0_get_lanes_in_ulps(struct dsi_phy_hw *phy) diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 98742d7af6dcb21506434c31977941c5cfdea877..f40c4c524fff36059da94aee0f9b4d7072e9ce31 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -172,6 +172,7 @@ static struct platform_driver dsi_driver = { .name = "msm_dsi", .of_match_table = dt_match, .pm = &dsi_pm_ops, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c1d261294f05cb5826715fe2ff1cc0520e21c3f9..aeafe9738e104ac24a65377b367d1abbbe6e1949 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -740,7 +740,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( switch (mipi_fmt) { case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; case MIPI_DSI_FMT_RGB666_PACKED: - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; default: return CMD_DST_FORMAT_RGB888; } diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index 59d8f1eb279da7dfaa6c606ea72b07758fc2fc65..b80ab1ae0e041e2a8095fc44d669710147737e2a 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -128,6 +128,7 @@ static struct platform_driver edp_driver = { .driver = { .name = "msm_edp", .of_match_table = dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 7b893d456beccc03da24b034e3f7e57960b7f3c3..f3509e723c0f3161628f47817a265f45bf42328e 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -648,6 +648,7 @@ static struct platform_driver msm_hdmi_driver = { .driver = { .name = "hdmi_msm", .of_match_table = msm_hdmi_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 16e6a00fce4af4ab8a5b0e1bb2687a482f03f153..f25601b33ca92cc854f4d4ba72fe248a3934b063 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -1051,6 +1051,7 @@ static struct platform_driver mdp5_driver = { .name = "msm_mdp", .of_match_table = mdp5_dt_match, .pm = &mdp5_pm_ops, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 9e4c7b1e713fc92a3b64bb35527e08a377a3031b..60d55d7e66d43afc99298443548ecfa6d66735c0 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2014 Red Hat * Author: Rob Clark * @@ -25,6 +25,8 @@ #include "msm_fence.h" #include "sde_trace.h" +#define MULTIPLE_CONN_DETECTED(x) (x > 1) + struct msm_commit { struct drm_device *dev; struct drm_atomic_state *state; @@ -111,6 +113,66 @@ static void commit_destroy(struct msm_commit *c) kfree(c); } +static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state, + struct drm_crtc_state *crtc_state, bool enable) +{ + struct drm_connector *connector = NULL; + struct drm_connector_state *conn_state = NULL; + int i = 0; + int conn_cnt = 0; + + if (msm_is_mode_seamless(&crtc_state->mode) || + msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode)) + return true; + + if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable) + return true; + + if (!crtc_state->mode_changed && crtc_state->connectors_changed) { + for_each_connector_in_state(state, connector, conn_state, i) { + if ((conn_state->crtc == crtc_state->crtc) || + (connector->state->crtc == + crtc_state->crtc)) + conn_cnt++; + + if (MULTIPLE_CONN_DETECTED(conn_cnt)) + return true; + } + } + + return false; +} + +static inline bool _msm_seamless_for_conn(struct drm_connector *connector, + struct drm_connector_state *old_conn_state, bool enable) +{ + if (!old_conn_state || !old_conn_state->crtc) + return false; + + if (!old_conn_state->crtc->state->mode_changed && + !old_conn_state->crtc->state->active_changed && + old_conn_state->crtc->state->connectors_changed) { + if (old_conn_state->crtc == connector->state->crtc) + return true; + } + + if (enable) + return false; + + if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode)) + return true; + + if (msm_is_mode_seamless_vrr( + &connector->encoder->crtc->state->adjusted_mode)) + return true; + + if (msm_is_mode_seamless_dms( + &connector->encoder->crtc->state->adjusted_mode)) + return true; + + return false; +} + static void msm_atomic_wait_for_commit_done( struct drm_device *dev, struct drm_atomic_state *old_state) @@ -169,14 +231,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (WARN_ON(!encoder)) continue; - if (msm_is_mode_seamless( - &connector->encoder->crtc->state->mode) || - msm_is_mode_seamless_vrr( - &connector->encoder->crtc->state->adjusted_mode)) - continue; - - if (msm_is_mode_seamless_dms( - &connector->encoder->crtc->state->adjusted_mode)) + if (_msm_seamless_for_conn(connector, old_conn_state, false)) continue; funcs = encoder->helper_private; @@ -218,11 +273,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_crtc_state->active) continue; - if (msm_is_mode_seamless(&crtc->state->mode) || - msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode)) - continue; - - if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode)) + if (_msm_seamless_for_crtc(old_state, crtc->state, false)) continue; funcs = crtc->helper_private; @@ -281,8 +332,14 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) mode = &new_crtc_state->mode; adjusted_mode = &new_crtc_state->adjusted_mode; - if (!new_crtc_state->mode_changed) + if (!new_crtc_state->mode_changed && + new_crtc_state->connectors_changed) { + if (_msm_seamless_for_conn(connector, + old_conn_state, false)) + continue; + } else if (!new_crtc_state->mode_changed) { continue; + } DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", encoder->base.id, encoder->name); @@ -362,9 +419,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, if (!new_crtc_state->active) continue; - if (msm_is_mode_seamless(&new_crtc_state->mode) || - msm_is_mode_seamless_vrr( - &new_crtc_state->adjusted_mode)) + if (_msm_seamless_for_crtc(old_state, crtc->state, true)) continue; funcs = crtc->helper_private; @@ -389,6 +444,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, new_conn_state, i) { const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; + struct drm_connector_state *old_conn_state; if (!new_conn_state->best_encoder) continue; @@ -398,18 +454,26 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, new_conn_state->crtc->state)) continue; - encoder = new_conn_state->best_encoder; + old_conn_state = drm_atomic_get_old_connector_state( + old_state, connector); + if (_msm_seamless_for_conn(connector, old_conn_state, true)) + continue; + + encoder = connector->state->best_encoder; funcs = encoder->helper_private; DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name); - blank = MSM_DRM_BLANK_UNBLANK; - notifier_data.data = ␣ - notifier_data.id = - new_conn_state->crtc->index; - msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, + if (connector->state->crtc->state->active_changed) { + blank = MSM_DRM_BLANK_UNBLANK; + notifier_data.data = ␣ + notifier_data.id = + connector->state->crtc->index; + DRM_DEBUG_ATOMIC("Notify early unblank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK, ¬ifier_data); + } /* * Each encoder has at most one connector (since we always steal * it away), so we won't call enable hooks twice. @@ -437,6 +501,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { struct drm_encoder *encoder; + struct drm_connector_state *old_conn_state; if (!new_conn_state->best_encoder) continue; @@ -446,14 +511,22 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev, new_conn_state->crtc->state)) continue; - encoder = new_conn_state->best_encoder; + old_conn_state = drm_atomic_get_old_connector_state( + old_state, connector); + if (_msm_seamless_for_conn(connector, old_conn_state, true)) + continue; + + encoder = connector->state->best_encoder; DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n", encoder->base.id, encoder->name); drm_bridge_enable(encoder->bridge); - msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, + if (connector->state->crtc->state->active_changed) { + DRM_DEBUG_ATOMIC("Notify unblank\n"); + msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK, ¬ifier_data); + } } SDE_ATRACE_END("msm_enable"); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 95d9955e89db27a9bb00aa8f1b1c5f3d4b46fa9a..2552e6c3822931148669da1458c3fb4635052a9e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -979,9 +979,7 @@ static int msm_disable_all_modes( drm_modeset_backoff(ctx); } - /* on successful atomic commit state ownership transfers to framework */ - if (ret != 0) - drm_atomic_state_put(state); + drm_atomic_state_put(state); return ret; } @@ -2046,6 +2044,7 @@ static struct platform_driver msm_platform_driver = { .name = "msm_drm", .of_match_table = dt_match, .pm = &msm_pm_ops, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b19c4532a8dd8979f200194bb39e7d1876111220..1695a3c40c9b9a45ae8edfd3236cee52b0a975f2 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -161,6 +161,7 @@ enum msm_mdp_crtc_property { CRTC_PROP_SECURITY_LEVEL, CRTC_PROP_IDLE_TIMEOUT, CRTC_PROP_DEST_SCALER, + CRTC_PROP_CAPTURE_OUTPUT, /* total # of properties */ CRTC_PROP_COUNT diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index f8f95deb137dee1012cfea2b467f964c4e495268..6923ec2d76444513666975a8a0da780a41ba0dc3 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); - ret = PTR_ERR(fb); - goto fail; + return PTR_ERR(fb); } bo = msm_framebuffer_bo(fb, 0); @@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, fail_unlock: mutex_unlock(&dev->struct_mutex); -fail: - - if (ret) { - if (fb) - drm_framebuffer_remove(fb); - } - + drm_framebuffer_remove(fb); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 766221196c32a3932682b367bcc640f3ba29d3e8..9839cbe6368c6ca806bde5fab23833b1f021fae7 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -490,7 +490,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, *iova = vma->iova; - if (aspace && aspace->domain_attached) { + if (aspace && !msm_obj->in_active_list) { mutex_lock(&aspace->list_lock); msm_gem_add_obj_to_aspace_active_list(aspace, obj); mutex_unlock(&aspace->list_lock); @@ -1035,6 +1035,7 @@ static int msm_gem_new_impl(struct drm_device *dev, INIT_LIST_HEAD(&msm_obj->vmas); INIT_LIST_HEAD(&msm_obj->iova_list); msm_obj->aspace = NULL; + msm_obj->in_active_list = false; if (struct_mutex_locked) { WARN_ON(!mutex_is_locked(&dev->struct_mutex)); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 903a53b4277737a2dd7e2131dcad20e1be584a02..d3a0c97613d5cf3286bb0d4509cc276330d576dc 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -134,6 +134,7 @@ struct msm_gem_object { struct list_head iova_list; struct msm_gem_address_space *aspace; + bool in_active_list; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 58fe9bc328d6e488ca10ae605fce9eb9cf676749..62c2233616eb2d61c43c2e8ac75210ea16a0c7f9 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -68,6 +68,7 @@ static void smmu_aspace_add_to_active( { WARN_ON(!mutex_is_locked(&aspace->list_lock)); list_move_tail(&msm_obj->iova_list, &aspace->active_list); + msm_obj->in_active_list = true; } static void smmu_aspace_remove_from_active( @@ -81,6 +82,7 @@ static void smmu_aspace_remove_from_active( list_for_each_entry_safe(msm_obj, next, &aspace->active_list, iova_list) { if (msm_obj == obj) { + msm_obj->in_active_list = false; list_del(&msm_obj->iova_list); break; } diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index adc440cee46b26207d3c0b4c32b282a30af152e5..c6fc7486348fa5b62d67a7da9ffe734de69989cb 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -173,13 +173,12 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); - size_t ret; - - ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl, - sgt->nents, prot); - WARN_ON(ret < 0); + size_t ret = 0; if (sgt && sgt->sgl) { + ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl, + sgt->nents, prot); + WARN_ON(ret < 0); DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address, sgt->sgl->dma_length, prot); SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, @@ -489,6 +488,13 @@ static int _msm_smmu_create_mapping(struct msm_smmu_client *client, } } + if (!client->dev->dma_parms) + client->dev->dma_parms = devm_kzalloc(client->dev, + sizeof(*client->dev->dma_parms), GFP_KERNEL); + + dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64)); + iommu_set_fault_handler(client->mmu_mapping->domain, msm_smmu_fault_handler, (void *)client); @@ -565,6 +571,7 @@ static struct platform_driver msm_smmu_driver = { .driver = { .name = "msmdrm_smmu", .of_match_table = msm_smmu_dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index 02050a695fa245635bd50da1ef4d48998622525d..66a35a94568e8e102935366b03a61327b2004370 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -1176,6 +1176,7 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = NULL; struct sde_cp_node *prop_node = NULL, *n = NULL; + bool ad_suspend = false; if (!crtc) { DRM_ERROR("crtc %pK\n", crtc); @@ -1198,8 +1199,12 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc) active_list) { sde_cp_update_list(prop_node, sde_crtc, true); list_del_init(&prop_node->active_list); + ad_suspend = true; } mutex_unlock(&sde_crtc->crtc_cp_lock); + + if (ad_suspend) + sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND); } void sde_cp_crtc_resume(struct drm_crtc *crtc) @@ -1721,25 +1726,21 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en, goto exit; } - node = _sde_cp_get_intr_node(DRM_EVENT_AD_BACKLIGHT, crtc); + node = container_of(ad_irq, struct sde_crtc_irq_info, irq); if (!en) { - if (node) { - spin_lock_irqsave(&node->state_lock, flags); - if (node->state == IRQ_ENABLED) { - ret = sde_core_irq_disable(kms, &irq_idx, 1); - if (ret) - DRM_ERROR("disable irq %d error %d\n", - irq_idx, ret); - else - node->state = IRQ_NOINIT; - } else { + spin_lock_irqsave(&node->state_lock, flags); + if (node->state == IRQ_ENABLED) { + ret = sde_core_irq_disable(kms, &irq_idx, 1); + if (ret) + DRM_ERROR("disable irq %d error %d\n", + irq_idx, ret); + else node->state = IRQ_NOINIT; - } - spin_unlock_irqrestore(&node->state_lock, flags); } else { - DRM_ERROR("failed to get node from crtc event list\n"); + node->state = IRQ_NOINIT; } + spin_unlock_irqrestore(&node->state_lock, flags); sde_core_irq_unregister_callback(kms, irq_idx, ad_irq); ret = 0; goto exit; @@ -1753,32 +1754,18 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en, goto exit; } - if (node) { - /* device resume or resume from IPC cases */ - spin_lock_irqsave(&node->state_lock, flags); - if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) { - ret = sde_core_irq_enable(kms, &irq_idx, 1); - if (ret) { - DRM_ERROR("enable irq %d error %d\n", - irq_idx, ret); - sde_core_irq_unregister_callback(kms, - irq_idx, ad_irq); - } else { - node->state = IRQ_ENABLED; - } - } - spin_unlock_irqrestore(&node->state_lock, flags); - } else { - /* request from userspace to register the event - * in this case, node has not been added into the event list - */ + spin_lock_irqsave(&node->state_lock, flags); + if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) { ret = sde_core_irq_enable(kms, &irq_idx, 1); if (ret) { - DRM_ERROR("failed to enable irq ret %d\n", ret); - sde_core_irq_unregister_callback(kms, - irq_idx, ad_irq); + DRM_ERROR("enable irq %d error %d\n", irq_idx, ret); + sde_core_irq_unregister_callback(kms, irq_idx, ad_irq); + } else { + node->state = IRQ_ENABLED; } } + spin_unlock_irqrestore(&node->state_lock, flags); + exit: return ret; } @@ -1859,7 +1846,7 @@ static void sde_cp_hist_interrupt_cb(void *arg, int irq_idx) spin_unlock_irqrestore(&crtc->spin_lock, flags); if (!node) { - DRM_ERROR("cannot find histogram event node in crtc\n"); + DRM_DEBUG_DRIVER("cannot find histogram event node in crtc\n"); return; } @@ -1983,26 +1970,29 @@ int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en, goto exit; } - node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc); + node = container_of(hist_irq, struct sde_crtc_irq_info, irq); /* deregister histogram irq */ if (!en) { - if (node) { - /* device suspend case or suspend to IPC cases */ + spin_lock_irqsave(&node->state_lock, flags); + if (node->state == IRQ_ENABLED) { + node->state = IRQ_DISABLING; + spin_unlock_irqrestore(&node->state_lock, flags); + ret = sde_core_irq_disable(kms, &irq_idx, 1); spin_lock_irqsave(&node->state_lock, flags); - if (node->state == IRQ_ENABLED) { - ret = sde_core_irq_disable(kms, &irq_idx, 1); - if (ret) - DRM_ERROR("disable irq %d error %d\n", - irq_idx, ret); - else - node->state = IRQ_NOINIT; + if (ret) { + DRM_ERROR("disable irq %d error %d\n", + irq_idx, ret); + node->state = IRQ_ENABLED; } else { node->state = IRQ_NOINIT; } spin_unlock_irqrestore(&node->state_lock, flags); + } else if (node->state == IRQ_DISABLED) { + node->state = IRQ_NOINIT; + spin_unlock_irqrestore(&node->state_lock, flags); } else { - DRM_ERROR("failed to get node from crtc event list\n"); + spin_unlock_irqrestore(&node->state_lock, flags); } sde_core_irq_unregister_callback(kms, irq_idx, hist_irq); @@ -2018,32 +2008,19 @@ int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en, goto exit; } - if (node) { - /* device resume or resume from IPC cases */ - spin_lock_irqsave(&node->state_lock, flags); - if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) { - ret = sde_core_irq_enable(kms, &irq_idx, 1); - if (ret) { - DRM_ERROR("enable irq %d error %d\n", - irq_idx, ret); - sde_core_irq_unregister_callback(kms, - irq_idx, hist_irq); - } else { - node->state = IRQ_ENABLED; - } - } - spin_unlock_irqrestore(&node->state_lock, flags); - } else { - /* request from userspace to register the event - * in this case, node has not been added into the event list - */ + spin_lock_irqsave(&node->state_lock, flags); + if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) { ret = sde_core_irq_enable(kms, &irq_idx, 1); if (ret) { - DRM_ERROR("failed to enable irq ret %d\n", ret); + DRM_ERROR("enable irq %d error %d\n", irq_idx, ret); sde_core_irq_unregister_callback(kms, irq_idx, hist_irq); + } else { + node->state = IRQ_ENABLED; } } + spin_unlock_irqrestore(&node->state_lock, flags); + exit: return ret; } diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 54f38cf509ec260a614585740cf4f8b9ac527642..7433aedac66cc54e1c2beec59d0de914ec6fbd30 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -643,6 +643,7 @@ int sde_connector_pre_kickoff(struct drm_connector *connector) void sde_connector_helper_bridge_disable(struct drm_connector *connector) { int rc; + struct sde_connector *c_conn = NULL; if (!connector) return; @@ -653,6 +654,34 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector) connector->base.id, rc); SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR); } + + /* Disable ESD thread */ + sde_connector_schedule_status_work(connector, false); + + c_conn = to_sde_connector(connector); + if (c_conn->panel_dead) { + c_conn->bl_device->props.power = FB_BLANK_POWERDOWN; + c_conn->bl_device->props.state |= BL_CORE_FBBLANK; + backlight_update_status(c_conn->bl_device); + } +} + +void sde_connector_helper_bridge_enable(struct drm_connector *connector) +{ + struct sde_connector *c_conn = NULL; + + if (!connector) + return; + + c_conn = to_sde_connector(connector); + + /* Special handling for ESD recovery case */ + if (c_conn->panel_dead) { + c_conn->bl_device->props.power = FB_BLANK_UNBLANK; + c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK; + backlight_update_status(c_conn->bl_device); + c_conn->panel_dead = false; + } } int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable) @@ -1219,7 +1248,7 @@ void sde_connector_prepare_fence(struct drm_connector *connector) } void sde_connector_complete_commit(struct drm_connector *connector, - ktime_t ts) + ktime_t ts, enum sde_fence_event fence_event) { if (!connector) { SDE_ERROR("invalid connector\n"); @@ -1227,7 +1256,8 @@ void sde_connector_complete_commit(struct drm_connector *connector, } /* signal connector's retire fence */ - sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, false); + sde_fence_signal(&to_sde_connector(connector)->retire_fence, + ts, fence_event); } void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts) @@ -1238,7 +1268,8 @@ void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts) } /* signal connector's retire fence */ - sde_fence_signal(&to_sde_connector(connector)->retire_fence, ts, true); + sde_fence_signal(&to_sde_connector(connector)->retire_fence, + ts, SDE_FENCE_RESET_TIMELINE); } static void sde_connector_update_hdr_props(struct drm_connector *connector) @@ -1389,6 +1420,28 @@ int sde_connector_helper_reset_custom_properties( return 0; } +int sde_connector_get_panel_vfp(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct sde_connector *c_conn; + int vfp = -EINVAL; + + if (!connector || !mode) { + SDE_ERROR("invalid connector\n"); + return vfp; + } + c_conn = to_sde_connector(connector); + if (!c_conn->ops.get_panel_vfp) + return vfp; + + vfp = c_conn->ops.get_panel_vfp(c_conn->display, + mode->hdisplay, mode->vdisplay); + if (vfp <= 0) + SDE_ERROR("Failed get_panel_vfp %d\n", vfp); + + return vfp; +} + static int _sde_debugfs_conn_cmd_tx_open(struct inode *inode, struct file *file) { /* non-seekable */ @@ -1725,12 +1778,68 @@ static int sde_connector_atomic_check(struct drm_connector *connector, return 0; } +static void _sde_connector_report_panel_dead(struct sde_connector *conn) +{ + struct drm_event event; + + if (!conn) + return; + + /* Panel dead notification can come: + * 1) ESD thread + * 2) Commit thread (if TE stops coming) + * So such case, avoid failure notification twice. + */ + if (conn->panel_dead) + return; + + conn->panel_dead = true; + event.type = DRM_EVENT_PANEL_DEAD; + event.length = sizeof(bool); + msm_mode_object_event_notify(&conn->base.base, + conn->base.dev, &event, (u8 *)&conn->panel_dead); + sde_encoder_display_failure_notification(conn->encoder); + SDE_EVT32(SDE_EVTLOG_ERROR); + SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n", + conn->base.base.id, conn->encoder->base.id); +} + +int sde_connector_esd_status(struct drm_connector *conn) +{ + struct sde_connector *sde_conn = NULL; + int ret = 0; + + if (!conn) + return ret; + + sde_conn = to_sde_connector(conn); + if (!sde_conn || !sde_conn->ops.check_status) + return ret; + + /* protect this call with ESD status check call */ + mutex_lock(&sde_conn->lock); + ret = sde_conn->ops.check_status(&sde_conn->base, sde_conn->display, + true); + mutex_unlock(&sde_conn->lock); + + if (ret <= 0) { + /* cancel if any pending esd work */ + sde_connector_schedule_status_work(conn, false); + _sde_connector_report_panel_dead(sde_conn); + ret = -ETIMEDOUT; + } else { + SDE_DEBUG("Successfully received TE from panel\n"); + ret = 0; + } + SDE_EVT32(ret); + + return ret; +} + static void sde_connector_check_status_work(struct work_struct *work) { struct sde_connector *conn; - struct drm_event event; int rc = 0; - bool panel_dead = false; conn = container_of(to_delayed_work(work), struct sde_connector, status_work); @@ -1747,7 +1856,7 @@ static void sde_connector_check_status_work(struct work_struct *work) return; } - rc = conn->ops.check_status(&conn->base, conn->display); + rc = conn->ops.check_status(&conn->base, conn->display, false); mutex_unlock(&conn->lock); if (conn->force_panel_dead) { @@ -1771,15 +1880,7 @@ static void sde_connector_check_status_work(struct work_struct *work) } status_dead: - SDE_EVT32(rc, SDE_EVTLOG_ERROR); - SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n", - conn->base.base.id, conn->encoder->base.id); - panel_dead = true; - event.type = DRM_EVENT_PANEL_DEAD; - event.length = sizeof(bool); - msm_mode_object_event_notify(&conn->base.base, - conn->base.dev, &event, (u8 *)&panel_dead); - sde_encoder_display_failure_notification(conn->encoder); + _sde_connector_report_panel_dead(conn); } static const struct drm_connector_helper_funcs sde_connector_helper_ops = { diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index 220b0cab53add79b4611ae4e50bc8318d120cc35..6436b45843f61c2d65e8563bf566176358dd7df4 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -236,9 +236,11 @@ struct sde_connector_ops { * check_status - check status of connected display panel * @connector: Pointer to drm connector structure * @display: Pointer to private display handle + * @te_check_override: Whether check TE from panel or default check * Returns: positive value for success, negetive or zero for failure */ - int (*check_status)(struct drm_connector *connector, void *display); + int (*check_status)(struct drm_connector *connector, void *display, + bool te_check_override); /** * cmd_transfer - Transfer command to the connected display panel @@ -299,6 +301,15 @@ struct sde_connector_ops { * Returns: zero for success, negetive for failure */ int (*cont_splash_config)(void *display); + + /** + * get_panel_vfp - returns original panel vfp + * @display: Pointer to private display handle + * @h_active: width + * @v_active: height + * Returns: v_front_porch on success error-code on failure + */ + int (*get_panel_vfp)(void *display, int h_active, int v_active); }; /** @@ -358,6 +369,8 @@ struct sde_connector_evt { * @status_work: work object to perform status checks * @force_panel_dead: variable to trigger forced ESD recovery * @esd_status_interval: variable to change ESD check interval in millisec + * @panel_dead: Flag to indicate if panel has gone bad + * @esd_status_check: Flag to indicate if ESD thread is scheduled or not * @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed * @bl_scale: BL scale value for ABA feature * @bl_scale_ad: BL scale value for AD feature @@ -403,7 +416,7 @@ struct sde_connector { struct delayed_work status_work; u32 force_panel_dead; u32 esd_status_interval; - + bool panel_dead; bool esd_status_check; bool bl_scale_dirty; @@ -632,8 +645,10 @@ void sde_connector_prepare_fence(struct drm_connector *connector); * sde_connector_complete_commit - signal completion of current commit * @connector: Pointer to drm connector object * @ts: timestamp to be updated in the fence signalling + * @fence_event: enum value to indicate nature of fence event */ -void sde_connector_complete_commit(struct drm_connector *connector, ktime_t ts); +void sde_connector_complete_commit(struct drm_connector *connector, + ktime_t ts, enum sde_fence_event fence_event); /** * sde_connector_commit_reset - reset the completion signal @@ -826,5 +841,25 @@ void sde_connector_destroy(struct drm_connector *connector); */ int sde_connector_event_notify(struct drm_connector *connector, uint32_t type, uint32_t len, uint32_t val); +/** + * sde_connector_helper_bridge_enable - helper function for drm bridge enable + * @connector: Pointer to DRM connector object + */ +void sde_connector_helper_bridge_enable(struct drm_connector *connector); + +/** + * sde_connector_get_panel_vfp - helper to get panel vfp + * @connector: pointer to drm connector + * @h_active: panel width + * @v_active: panel heigth + * Returns: v_front_porch on success error-code on failure + */ +int sde_connector_get_panel_vfp(struct drm_connector *connector, + struct drm_display_mode *mode); +/** + * sde_connector_esd_status - helper function to check te status + * @connector: Pointer to DRM connector object + */ +int sde_connector_esd_status(struct drm_connector *connector); #endif /* _SDE_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 310d6498df61eea1412ac7aead38f9304dc23592..a0283bfcb200b4892d91af6a57fea532f8085aab 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -937,7 +937,9 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct sde_crtc_state *crtc_state; struct sde_rect *crtc_roi; - int i, num_attached_conns = 0; + struct msm_mode_info mode_info; + int i = 0; + int rc; bool is_crtc_roi_dirty; bool is_any_conn_roi_dirty; @@ -959,13 +961,14 @@ static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc, if (!conn_state || conn_state->crtc != crtc) continue; - if (num_attached_conns) { - SDE_ERROR( - "crtc%d: unsupported: roi on crtc w/ >1 connectors\n", - DRMID(crtc)); + rc = sde_connector_get_mode_info(conn_state, &mode_info); + if (rc) { + SDE_ERROR("failed to get mode info\n"); return -EINVAL; } - ++num_attached_conns; + + if (!mode_info.roi_caps.enabled) + continue; sde_conn = to_sde_connector(conn_state->connector); sde_conn_state = to_sde_connector_state(conn_state); @@ -1286,13 +1289,6 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc, sde_crtc = to_sde_crtc(crtc); sde_crtc_state = to_sde_crtc_state(state); - if (hweight_long(state->connector_mask) != 1) { - SDE_ERROR("invalid connector count(%d) for crtc: %d\n", - (int)hweight_long(state->connector_mask), - crtc->base.id); - return -EINVAL; - } - /* * check connector array cached at modeset time since incoming atomic * state may not include any connectors if they aren't modified @@ -1308,50 +1304,40 @@ static int _sde_crtc_check_rois(struct drm_crtc *crtc, SDE_ERROR("failed to get mode info\n"); return -EINVAL; } - break; - } - if (!mode_info.roi_caps.enabled) - return 0; + if (!mode_info.roi_caps.enabled) + continue; - if (sde_crtc_state->user_roi_list.num_rects > - mode_info.roi_caps.num_roi) { - SDE_ERROR("roi count is more than supported limit, %d > %d\n", - sde_crtc_state->user_roi_list.num_rects, - mode_info.roi_caps.num_roi); - return -E2BIG; - } + if (sde_crtc_state->user_roi_list.num_rects > + mode_info.roi_caps.num_roi) { + SDE_ERROR("roi count is exceeding limit, %d > %d\n", + sde_crtc_state->user_roi_list.num_rects, + mode_info.roi_caps.num_roi); + return -E2BIG; + } - /** - * TODO: Need to check against ROI alignment restrictions if partial - * update support is added for destination scalar configurations - */ - if (sde_crtc_state->num_ds_enabled) { - SDE_ERROR("DS and PU concurrency is not supported\n"); - return -EINVAL; - } + rc = _sde_crtc_set_crtc_roi(crtc, state); + if (rc) + return rc; - rc = _sde_crtc_set_crtc_roi(crtc, state); - if (rc) - return rc; + rc = _sde_crtc_check_autorefresh(crtc, state); + if (rc) + return rc; - rc = _sde_crtc_check_autorefresh(crtc, state); - if (rc) - return rc; + for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) { + rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx); + if (rc) + return rc; + } - for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) { - rc = _sde_crtc_set_lm_roi(crtc, state, lm_idx); + rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state); if (rc) return rc; - } - rc = _sde_crtc_check_rois_centered_and_symmetric(crtc, state); - if (rc) - return rc; - - rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state); - if (rc) - return rc; + rc = _sde_crtc_check_planes_within_crtc_roi(crtc, state); + if (rc) + return rc; + } return 0; } @@ -2126,6 +2112,55 @@ static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc) } } +static void sde_crtc_frame_event_cb(void *data, u32 event) +{ + struct drm_crtc *crtc = (struct drm_crtc *)data; + struct sde_crtc *sde_crtc; + struct msm_drm_private *priv; + struct sde_crtc_frame_event *fevent; + struct sde_crtc_frame_event_cb_data *cb_data; + unsigned long flags; + u32 crtc_id; + + cb_data = (struct sde_crtc_frame_event_cb_data *)data; + if (!data) { + SDE_ERROR("invalid parameters\n"); + return; + } + + crtc = cb_data->crtc; + if (!crtc || !crtc->dev || !crtc->dev->dev_private) { + SDE_ERROR("invalid parameters\n"); + return; + } + sde_crtc = to_sde_crtc(crtc); + priv = crtc->dev->dev_private; + crtc_id = drm_crtc_index(crtc); + + SDE_DEBUG("crtc%d\n", crtc->base.id); + SDE_EVT32_VERBOSE(DRMID(crtc), event); + + spin_lock_irqsave(&sde_crtc->spin_lock, flags); + fevent = list_first_entry_or_null(&sde_crtc->frame_event_list, + struct sde_crtc_frame_event, list); + if (fevent) + list_del_init(&fevent->list); + spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); + + if (!fevent) { + SDE_ERROR("crtc%d event %d overflow\n", + crtc->base.id, event); + SDE_EVT32(DRMID(crtc), event); + return; + } + + fevent->event = event; + fevent->crtc = crtc; + fevent->connector = cb_data->connector; + fevent->ts = ktime_get(); + kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); +} + void sde_crtc_prepare_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -2133,10 +2168,8 @@ void sde_crtc_prepare_commit(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; struct drm_connector *conn; + struct drm_encoder *encoder; struct drm_connector_list_iter conn_iter; - struct sde_crtc_retire_event *retire_event = NULL; - unsigned long flags; - int i; if (!crtc || !crtc->state) { SDE_ERROR("invalid crtc\n"); @@ -2155,38 +2188,24 @@ void sde_crtc_prepare_commit(struct drm_crtc *crtc, drm_for_each_connector_iter(conn, &conn_iter) if (conn->state && conn->state->crtc == crtc && cstate->num_connectors < MAX_CONNECTORS) { + encoder = conn->state->best_encoder; + if (encoder) + sde_encoder_register_frame_event_callback( + encoder, + sde_crtc_frame_event_cb, + crtc); + cstate->connectors[cstate->num_connectors++] = conn; sde_connector_prepare_fence(conn); } drm_connector_list_iter_end(&conn_iter); - for (i = 0; i < SDE_CRTC_FRAME_EVENT_SIZE; i++) { - retire_event = &sde_crtc->retire_events[i]; - if (list_empty(&retire_event->list)) - break; - retire_event = NULL; - } - - if (retire_event) { - retire_event->num_connectors = cstate->num_connectors; - for (i = 0; i < cstate->num_connectors; i++) - retire_event->connectors[i] = cstate->connectors[i]; - - spin_lock_irqsave(&sde_crtc->spin_lock, flags); - list_add_tail(&retire_event->list, - &sde_crtc->retire_event_list); - spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); - } else { - SDE_ERROR("crtc%d retire event overflow\n", crtc->base.id); - SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR); - } - /* prepare main output fence */ sde_fence_prepare(&sde_crtc->output_fence); } /** - * _sde_crtc_complete_flip - signal pending page_flip events + * sde_crtc_complete_flip - signal pending page_flip events * Any pending vblank events are added to the vblank_event_list * so that the next vblank interrupt shall signal them. * However PAGE_FLIP events are not handled through the vblank_event_list. @@ -2196,7 +2215,7 @@ void sde_crtc_prepare_commit(struct drm_crtc *crtc, * @crtc: Pointer to drm crtc structure * @file: Pointer to drm file */ -static void _sde_crtc_complete_flip(struct drm_crtc *crtc, +void sde_crtc_complete_flip(struct drm_crtc *crtc, struct drm_file *file) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); @@ -2206,19 +2225,23 @@ static void _sde_crtc_complete_flip(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); event = sde_crtc->event; - if (event) { - /* if regular vblank case (!file) or if cancel-flip from - * preclose on file that requested flip, then send the - * event: - */ - if (!file || (event->base.file_priv == file)) { - sde_crtc->event = NULL; - DRM_DEBUG_VBL("%s: send event: %pK\n", - sde_crtc->name, event); - SDE_EVT32_VERBOSE(DRMID(crtc)); - drm_crtc_send_vblank_event(crtc, event); - } + if (!event) + goto end; + + /* + * if regular vblank case (!file) or if cancel-flip from + * preclose on file that requested flip, then send the + * event: + */ + if (!file || (event->base.file_priv == file)) { + sde_crtc->event = NULL; + DRM_DEBUG_VBL("%s: send event: %pK\n", + sde_crtc->name, event); + SDE_EVT32_VERBOSE(DRMID(crtc)); + drm_crtc_send_vblank_event(crtc, event); } + +end: spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -2231,9 +2254,16 @@ enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc) return INTF_MODE_NONE; } - drm_for_each_encoder(encoder, crtc->dev) - if (encoder->crtc == crtc) - return sde_encoder_get_intf_mode(encoder); + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + /* continue if copy encoder is encountered */ + if (sde_encoder_in_clone_mode(encoder)) + continue; + + return sde_encoder_get_intf_mode(encoder); + } return INTF_MODE_NONE; } @@ -2252,44 +2282,21 @@ static void sde_crtc_vblank_cb(void *data) sde_crtc->vblank_last_cb_time = ktime_get(); sysfs_notify_dirent(sde_crtc->vsync_event_sf); - _sde_crtc_complete_flip(crtc, NULL); drm_crtc_handle_vblank(crtc); DRM_DEBUG_VBL("crtc%d\n", crtc->base.id); SDE_EVT32_VERBOSE(DRMID(crtc)); } -static void _sde_crtc_retire_event(struct drm_crtc *crtc, ktime_t ts) +static void _sde_crtc_retire_event(struct drm_connector *connector, + ktime_t ts, bool is_error) { - struct sde_crtc_retire_event *retire_event; - struct sde_crtc *sde_crtc; - unsigned long flags; - int i; - - if (!crtc) { + if (!connector) { SDE_ERROR("invalid param\n"); return; } - sde_crtc = to_sde_crtc(crtc); - spin_lock_irqsave(&sde_crtc->spin_lock, flags); - retire_event = list_first_entry_or_null(&sde_crtc->retire_event_list, - struct sde_crtc_retire_event, list); - if (retire_event) - list_del_init(&retire_event->list); - spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); - - if (!retire_event) { - SDE_ERROR("crtc%d retire event without kickoff\n", - crtc->base.id); - SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR); - return; - } - SDE_ATRACE_BEGIN("signal_retire_fence"); - for (i = 0; (i < retire_event->num_connectors) && - retire_event->connectors[i]; ++i) - sde_connector_complete_commit( - retire_event->connectors[i], ts); + sde_connector_complete_commit(connector, ts, is_error); SDE_ATRACE_END("signal_retire_fence"); } @@ -2301,6 +2308,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; unsigned long flags; + bool in_clone_mode = false; if (!work) { SDE_ERROR("invalid work handle\n"); @@ -2329,10 +2337,11 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_ENTRY); - if (fevent->event & (SDE_ENCODER_FRAME_EVENT_DONE - | SDE_ENCODER_FRAME_EVENT_ERROR - | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) { + in_clone_mode = sde_encoder_in_clone_mode(fevent->connector->encoder); + if (!in_clone_mode && (fevent->event & (SDE_ENCODER_FRAME_EVENT_ERROR + | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD + | SDE_ENCODER_FRAME_EVENT_DONE))) { if (atomic_read(&sde_crtc->frame_pending) < 1) { /* this should not happen */ SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n", @@ -2357,13 +2366,17 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE) { SDE_ATRACE_BEGIN("signal_release_fence"); - sde_fence_signal(&sde_crtc->output_fence, fevent->ts, false); + sde_fence_signal(&sde_crtc->output_fence, fevent->ts, + (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) + ? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL); SDE_ATRACE_END("signal_release_fence"); } if (fevent->event & SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE) /* this api should be called without spin_lock */ - _sde_crtc_retire_event(crtc, fevent->ts); + _sde_crtc_retire_event(fevent->connector, fevent->ts, + (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) + ? SDE_FENCE_SIGNAL_ERROR : SDE_FENCE_SIGNAL); if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD) SDE_ERROR("crtc%d ts:%lld received panel dead event\n", @@ -2375,46 +2388,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) SDE_ATRACE_END("crtc_frame_event"); } -static void sde_crtc_frame_event_cb(void *data, u32 event) -{ - struct drm_crtc *crtc = (struct drm_crtc *)data; - struct sde_crtc *sde_crtc; - struct msm_drm_private *priv; - struct sde_crtc_frame_event *fevent; - unsigned long flags; - u32 crtc_id; - - if (!crtc || !crtc->dev || !crtc->dev->dev_private) { - SDE_ERROR("invalid parameters\n"); - return; - } - sde_crtc = to_sde_crtc(crtc); - priv = crtc->dev->dev_private; - crtc_id = drm_crtc_index(crtc); - - SDE_DEBUG("crtc%d\n", crtc->base.id); - SDE_EVT32_VERBOSE(DRMID(crtc), event); - - spin_lock_irqsave(&sde_crtc->spin_lock, flags); - fevent = list_first_entry_or_null(&sde_crtc->frame_event_list, - struct sde_crtc_frame_event, list); - if (fevent) - list_del_init(&fevent->list); - spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); - - if (!fevent) { - SDE_ERROR("crtc%d event %d overflow\n", - crtc->base.id, event); - SDE_EVT32(DRMID(crtc), event); - return; - } - - fevent->event = event; - fevent->crtc = crtc; - fevent->ts = ktime_get(); - kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); -} - void sde_crtc_complete_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -2986,6 +2959,10 @@ static void _sde_crtc_setup_mixers(struct drm_crtc *crtc) if (enc->crtc != crtc) continue; + /* avoid overwriting mixers info from a copy encoder */ + if (sde_encoder_in_clone_mode(enc)) + continue; + _sde_crtc_setup_mixer_for_encoder(crtc, enc); } @@ -3054,7 +3031,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct drm_encoder *encoder; struct drm_device *dev; - unsigned long flags; struct sde_kms *sde_kms; if (!crtc) { @@ -3088,14 +3064,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, _sde_crtc_setup_lm_bounds(crtc, crtc->state); } - if (sde_crtc->event) { - WARN_ON(sde_crtc->event); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - sde_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; @@ -3127,7 +3095,7 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, * apply color processing properties only if * smmu state is attached, */ - if (sde_kms_is_cp_operation_allowed(sde_kms)) + if (sde_kms_is_cp_operation_allowed(sde_kms) && sde_crtc->enabled) sde_cp_crtc_apply_properties(crtc); /* @@ -3148,7 +3116,6 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_plane *plane; struct msm_drm_private *priv; struct msm_drm_thread *event_thread; - unsigned long flags; struct sde_crtc_state *cstate; struct sde_kms *sde_kms; int idle_time = 0; @@ -3190,14 +3157,6 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, event_thread = &priv->event_thread[crtc->index]; idle_time = sde_crtc_get_property(cstate, CRTC_PROP_IDLE_TIMEOUT); - if (sde_crtc->event) { - SDE_DEBUG("already received sde_crtc->event\n"); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - sde_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - /* * If no mixers has been allocated in sde_crtc_atomic_check(), * it means we are trying to flush a CRTC whose state is disabled: @@ -3638,6 +3597,7 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, struct sde_kms *sde_kms; struct sde_crtc_state *cstate; bool is_error, reset_req, recovery_events; + unsigned long flags; if (!crtc) { SDE_ERROR("invalid argument\n"); @@ -3738,6 +3698,15 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc, sde_encoder_kickoff(encoder, false); } + /* store the event after frame trigger */ + if (sde_crtc->event) { + WARN_ON(sde_crtc->event); + } else { + spin_lock_irqsave(&dev->event_lock, flags); + sde_crtc->event = crtc->state->event; + spin_unlock_irqrestore(&dev->event_lock, flags); + } + SDE_ATRACE_END("crtc_commit"); } @@ -4198,7 +4167,8 @@ static void sde_crtc_disable(struct drm_crtc *crtc) * reset the fence timeline if crtc will not be enabled for this commit */ if (!crtc->state->active || !crtc->state->enable) { - sde_fence_signal(&sde_crtc->output_fence, ktime_get(), true); + sde_fence_signal(&sde_crtc->output_fence, + ktime_get(), SDE_FENCE_RESET_TIMELINE); for (i = 0; i < cstate->num_connectors; ++i) sde_connector_commit_reset(cstate->connectors[i], ktime_get()); @@ -4264,7 +4234,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, - sde_crtc_frame_event_cb, (void *)crtc); + sde_crtc_frame_event_cb, crtc); } if (!sde_crtc->enabled && !sde_crtc->suspend && @@ -4907,16 +4877,6 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en) return 0; } -void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) -{ - struct sde_crtc *sde_crtc = to_sde_crtc(crtc); - - SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file); - _sde_crtc_complete_flip(crtc, file); -} - - - /** * sde_crtc_install_properties - install all drm properties for crtc * @crtc: Pointer to drm crtc structure @@ -4933,6 +4893,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, {SDE_DRM_SEC_ONLY, "sec_only"}, }; + static const struct drm_prop_enum_list e_cwb_data_points[] = { + {CAPTURE_MIXER_OUT, "capture_mixer_out"}, + {CAPTURE_DSPP_OUT, "capture_pp_out"}, + }; + SDE_DEBUG("\n"); if (!crtc || !catalog) { @@ -5008,6 +4973,12 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, "idle_time", 0, 0, U64_MAX, 0, CRTC_PROP_IDLE_TIMEOUT); + if (catalog->has_cwb_support) + msm_property_install_enum(&sde_crtc->property_info, + "capture_mode", 0, 0, e_cwb_data_points, + ARRAY_SIZE(e_cwb_data_points), + CRTC_PROP_CAPTURE_OUTPUT); + msm_property_install_blob(&sde_crtc->property_info, "capabilities", DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO); @@ -5928,10 +5899,6 @@ static int _sde_crtc_init_events(struct sde_crtc *sde_crtc) list_add_tail(&sde_crtc->event_cache[i].list, &sde_crtc->event_free_list); - INIT_LIST_HEAD(&sde_crtc->retire_event_list); - for (i = 0; i < ARRAY_SIZE(sde_crtc->retire_events); i++) - INIT_LIST_HEAD(&sde_crtc->retire_events[i].list); - return rc; } @@ -6102,10 +6069,10 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; - node->event = event; INIT_LIST_HEAD(&node->list); node->func = custom_events[i].func; node->event = event; + node->state = IRQ_NOINIT; spin_lock_init(&node->state_lock); break; } @@ -6136,8 +6103,6 @@ static int _sde_crtc_event_enable(struct sde_kms *kms, if (!ret) { spin_lock_irqsave(&crtc->spin_lock, flags); - /* irq is regiestered and enabled and set the state */ - node->state = IRQ_ENABLED; list_add_tail(&node->list, &crtc->user_event_list); spin_unlock_irqrestore(&crtc->spin_lock, flags); } else { @@ -6161,6 +6126,7 @@ static int _sde_crtc_event_disable(struct sde_kms *kms, spin_lock_irqsave(&crtc->spin_lock, flags); list_for_each_entry(node, &crtc->user_event_list, list) { if (node->event == event) { + list_del(&node->list); found = true; break; } @@ -6176,7 +6142,6 @@ static int _sde_crtc_event_disable(struct sde_kms *kms, * no need to disable/de-register. */ if (!crtc_drm->enabled) { - list_del(&node->list); kfree(node); return 0; } @@ -6185,13 +6150,11 @@ static int _sde_crtc_event_disable(struct sde_kms *kms, if (ret) { SDE_ERROR("failed to enable power resource %d\n", ret); SDE_EVT32(ret, SDE_EVTLOG_ERROR); - list_del(&node->list); kfree(node); return ret; } ret = node->func(crtc_drm, false, &node->irq); - list_del(&node->list); kfree(node); sde_power_resource_enable(&priv->phandle, kms->core_client, false); return ret; diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 37e197e1e37a296210097544bb741e7241bbbf88..8343d5a4e7fc5da17bd08860ff861c386c301903 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -31,7 +31,8 @@ #define SDE_CRTC_NAME_SIZE 12 /* define the maximum number of in-flight frame events */ -#define SDE_CRTC_FRAME_EVENT_SIZE 4 +/* Expand it to 2x for handling atleast 2 connectors safely */ +#define SDE_CRTC_FRAME_EVENT_SIZE (4 * 2) /** * enum sde_crtc_client_type: crtc client type @@ -47,6 +48,16 @@ enum sde_crtc_client_type { RT_RSC_CLIENT, }; +/** + * enum sde_crtc_output_capture_point + * @MIXER_OUT : capture mixer output + * @DSPP_OUT : capture output of dspp + */ +enum sde_crtc_output_capture_point { + CAPTURE_MIXER_OUT, + CAPTURE_DSPP_OUT +}; + /** * @connectors : Currently associated drm connectors for retire event * @num_connectors: Number of associated drm connectors for retire event @@ -76,10 +87,21 @@ struct sde_crtc_mixer { u32 mixer_op_mode; }; +/** + * struct sde_crtc_frame_event_cb_data : info of drm objects of a frame event + * @crtc: pointer to drm crtc object registered for frame event + * @connector: pointer to drm connector which is source of frame event + */ +struct sde_crtc_frame_event_cb_data { + struct drm_crtc *crtc; + struct drm_connector *connector; +}; + /** * struct sde_crtc_frame_event: stores crtc frame event for crtc processing * @work: base work structure * @crtc: Pointer to crtc handling this event + * @connector: pointer to drm connector which is source of frame event * @list: event list * @ts: timestamp at queue entry * @event: event identifier @@ -87,6 +109,7 @@ struct sde_crtc_mixer { struct sde_crtc_frame_event { struct kthread_work work; struct drm_crtc *crtc; + struct drm_connector *connector; struct list_head list; ktime_t ts; u32 event; @@ -154,8 +177,6 @@ struct sde_crtc_event { * @frame_events : static allocation of in-flight frame events * @frame_event_list : available frame event list * @spin_lock : spin lock for frame event, transaction status, etc... - * @retire_events : static allocation of retire fence connector - * @retire_event_list : available retire fence connector list * @event_thread : Pointer to event handler thread * @event_worker : Event worker queue * @event_cache : Local cache of event worker structures @@ -222,8 +243,6 @@ struct sde_crtc { struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE]; struct list_head frame_event_list; spinlock_t spin_lock; - struct sde_crtc_retire_event retire_events[SDE_CRTC_FRAME_EVENT_SIZE]; - struct list_head retire_event_list; /* for handling internal event thread */ struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT]; @@ -379,6 +398,7 @@ struct sde_crtc_state { enum sde_crtc_irq_state { IRQ_NOINIT, IRQ_ENABLED, + IRQ_DISABLING, IRQ_DISABLED, }; @@ -526,11 +546,11 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane); int sde_crtc_post_init(struct drm_device *dev, struct drm_crtc *crtc); /** - * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose + * sde_crtc_complete_flip - complete flip for clients * @crtc: Pointer to drm crtc object * @file: client to cancel's file handle */ -void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); +void sde_crtc_complete_flip(struct drm_crtc *crtc, struct drm_file *file); /** * sde_crtc_register_custom_event - api for enabling/disabling crtc event diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index f1499487adeee2a42f64c101e17fad1bff895e2f..74399204cf5510c22b5ef6a10a89e706b068347d 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -240,7 +240,7 @@ struct sde_encoder_virt { struct mutex enc_lock; DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); void (*crtc_frame_event_cb)(void *, u32 event); - void *crtc_frame_event_cb_data; + struct sde_crtc_frame_event_cb_data crtc_frame_event_cb_data; struct timer_list vsync_event_timer; @@ -425,6 +425,14 @@ bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc) return false; } +int sde_encoder_in_clone_mode(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + + return sde_enc && sde_enc->cur_master && + sde_enc->cur_master->in_clone_mode; +} + static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc, bool enable) { @@ -733,8 +741,8 @@ void sde_encoder_destroy(struct drm_encoder *drm_enc) mutex_destroy(&sde_enc->enc_lock); if (sde_enc->input_handler) { - input_unregister_handler(sde_enc->input_handler); kfree(sde_enc->input_handler); + sde_enc->input_handler = NULL; } kfree(sde_enc); @@ -1195,7 +1203,7 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) const struct sde_rect *roi = &sde_enc->cur_conn_roi; struct msm_mode_info mode_info; struct msm_display_dsc_info *dsc = NULL; - struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl; + struct sde_hw_ctl *hw_ctl; struct sde_ctl_dsc_cfg cfg; int rc; @@ -1210,6 +1218,8 @@ static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc) return -EINVAL; } + hw_ctl = enc_master->hw_ctl; + memset(&cfg, 0, sizeof(cfg)); dsc = &mode_info.comp_info.dsc_info; _sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h); @@ -1582,6 +1592,8 @@ void sde_encoder_helper_vsync_config(struct sde_encoder_phys *phys_enc, struct sde_encoder_virt *sde_enc; int i, rc = 0; + sde_enc = to_sde_encoder_virt(phys_enc->parent); + if (!sde_enc) { SDE_ERROR("invalid param sde_enc:%d\n", sde_enc != NULL); return; @@ -1592,8 +1604,6 @@ void sde_encoder_helper_vsync_config(struct sde_encoder_phys *phys_enc, return; } - sde_enc = to_sde_encoder_virt(phys_enc->parent); - drm_enc = &sde_enc->base; /* this pointers are checked in virt_enable_helper */ priv = drm_enc->dev->dev_private; @@ -1766,6 +1776,7 @@ static int _sde_encoder_update_rsc_client( * only primary command mode panel without Qsync can request CMD state. * all other panels/displays can request for VID state including * secondary command mode panel. + * Clone mode encoder can request CLK STATE only. */ for (i = 0; i < sde_enc->num_phys_encs; i++) { phys = sde_enc->phys_encs[i]; @@ -1778,13 +1789,17 @@ static int _sde_encoder_update_rsc_client( } } - rsc_state = enable ? - (((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) && - disp_info->is_primary && !qsync_mode) ? - SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : - SDE_RSC_IDLE_STATE; + if (sde_encoder_in_clone_mode(drm_enc)) + rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE; + else + rsc_state = enable ? + (((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) + && disp_info->is_primary && !qsync_mode) ? + SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : + SDE_RSC_IDLE_STATE; SDE_EVT32(rsc_state, qsync_mode); + prefill_lines = config ? mode_info.prefill_lines + config->inline_rotate_prefill : mode_info.prefill_lines; @@ -2691,6 +2706,109 @@ void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable) } } +static int _sde_encoder_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int rc = 0; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = handler->name; + + rc = input_register_handle(handle); + if (rc) { + pr_err("failed to register input handle\n"); + goto error; + } + + rc = input_open_device(handle); + if (rc) { + pr_err("failed to open input device\n"); + goto error_unregister; + } + + return 0; + +error_unregister: + input_unregister_handle(handle); + +error: + kfree(handle); + + return rc; +} + +static void _sde_encoder_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +/** + * Structure for specifying event parameters on which to receive callbacks. + * This structure will trigger a callback in case of a touch event (specified by + * EV_ABS) where there is a change in X and Y coordinates, + */ +static const struct input_device_id sde_input_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_ABS) }, + .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = + BIT_MASK(ABS_MT_POSITION_X) | + BIT_MASK(ABS_MT_POSITION_Y) }, + }, + { }, +}; + +static int _sde_encoder_input_handler_register( + struct input_handler *input_handler) +{ + int rc = 0; + + rc = input_register_handler(input_handler); + if (rc) { + pr_err("input_register_handler failed, rc= %d\n", rc); + kfree(input_handler); + return rc; + } + + return rc; +} + +static int _sde_encoder_input_handler( + struct sde_encoder_virt *sde_enc) +{ + struct input_handler *input_handler = NULL; + int rc = 0; + + if (sde_enc->input_handler) { + SDE_ERROR_ENC(sde_enc, + "input_handle is active. unexpected\n"); + return -EINVAL; + } + + input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL); + if (!input_handler) + return -ENOMEM; + + input_handler->event = sde_encoder_input_event_handler; + input_handler->connect = _sde_encoder_input_connect; + input_handler->disconnect = _sde_encoder_input_disconnect; + input_handler->name = "sde"; + input_handler->id_table = sde_input_ids; + input_handler->private = sde_enc; + + sde_enc->input_handler = input_handler; + + return rc; +} + static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = NULL; @@ -2781,12 +2899,14 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) struct msm_compression_info *comp_info = NULL; struct drm_display_mode *cur_mode = NULL; struct msm_mode_info mode_info; + struct msm_display_info *disp_info; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); return; } sde_enc = to_sde_encoder_virt(drm_enc); + disp_info = &sde_enc->disp_info; if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) { SDE_ERROR("power resource is not enabled\n"); @@ -2824,6 +2944,16 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) return; } + if (sde_enc->input_handler) { + ret = _sde_encoder_input_handler_register( + sde_enc->input_handler); + if (ret) + SDE_ERROR( + "input handler registration failed, rc = %d\n", ret); + } + + sde_enc->delayed_off_work.work.worker = NULL; + ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF); if (ret) { SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n", @@ -2874,7 +3004,6 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) struct sde_encoder_virt *sde_enc = NULL; struct msm_drm_private *priv; struct sde_kms *sde_kms; - struct drm_connector *drm_conn = NULL; enum sde_intf_mode intf_mode; int i = 0; @@ -2903,13 +3032,14 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); - /* Disable ESD thread */ - drm_conn = sde_enc->cur_master->connector; - sde_connector_schedule_status_work(drm_conn, false); - /* wait for idle */ sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); + kthread_flush_work(&sde_enc->input_event_work); + + if (sde_enc->input_handler) + input_unregister_handler(sde_enc->input_handler); + /* * For primary command mode encoders, execute the resource control * pre-stop operations before the physical encoders are disabled, to @@ -3064,8 +3194,8 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, } void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, - void (*frame_event_cb)(void *, u32 event), - void *frame_event_cb_data) + void (*frame_event_cb)(void *, u32 event), + struct drm_crtc *crtc) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; @@ -3082,7 +3212,7 @@ void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); sde_enc->crtc_frame_event_cb = frame_event_cb; - sde_enc->crtc_frame_event_cb_data = frame_event_cb_data; + sde_enc->crtc_frame_event_cb_data.crtc = crtc; spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } @@ -3093,6 +3223,9 @@ static void sde_encoder_frame_done_callback( struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; + sde_enc->crtc_frame_event_cb_data.connector = + sde_enc->cur_master->connector; + if (event & (SDE_ENCODER_FRAME_EVENT_DONE | SDE_ENCODER_FRAME_EVENT_ERROR | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)) { @@ -3121,13 +3254,13 @@ static void sde_encoder_frame_done_callback( if (sde_enc->crtc_frame_event_cb) sde_enc->crtc_frame_event_cb( - sde_enc->crtc_frame_event_cb_data, + &sde_enc->crtc_frame_event_cb_data, event); } } else { if (sde_enc->crtc_frame_event_cb) sde_enc->crtc_frame_event_cb( - sde_enc->crtc_frame_event_cb_data, event); + &sde_enc->crtc_frame_event_cb_data, event); } } @@ -3274,6 +3407,9 @@ static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys) SDE_ERROR("invalid parent\n"); return; } + /* avoid ctrl start for encoder in clone mode */ + if (phys->in_clone_mode) + return; ctl = phys->hw_ctl; sde_enc = to_sde_encoder_virt(phys->parent); @@ -3890,102 +4026,6 @@ static void sde_encoder_input_event_work_handler(struct kthread_work *work) SDE_ENC_RC_EVENT_EARLY_WAKEUP); } -static int _sde_encoder_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) -{ - struct input_handle *handle; - int rc = 0; - - handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (!handle) - return -ENOMEM; - - handle->dev = dev; - handle->handler = handler; - handle->name = handler->name; - - rc = input_register_handle(handle); - if (rc) { - pr_err("failed to register input handle\n"); - goto error; - } - - rc = input_open_device(handle); - if (rc) { - pr_err("failed to open input device\n"); - goto error_unregister; - } - - return 0; - -error_unregister: - input_unregister_handle(handle); - -error: - kfree(handle); - - return rc; -} - -static void _sde_encoder_input_disconnect(struct input_handle *handle) -{ - input_close_device(handle); - input_unregister_handle(handle); - kfree(handle); -} - -/** - * Structure for specifying event parameters on which to receive callbacks. - * This structure will trigger a callback in case of a touch event (specified by - * EV_ABS) where there is a change in X and Y coordinates, - */ -static const struct input_device_id sde_input_ids[] = { - { - .flags = INPUT_DEVICE_ID_MATCH_EVBIT, - .evbit = { BIT_MASK(EV_ABS) }, - .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = - BIT_MASK(ABS_MT_POSITION_X) | - BIT_MASK(ABS_MT_POSITION_Y) }, - }, - { }, -}; - -static int _sde_encoder_input_handler( - struct sde_encoder_virt *sde_enc) -{ - struct input_handler *input_handler = NULL; - int rc = 0; - - if (sde_enc->input_handler) { - SDE_ERROR_ENC(sde_enc, - "input_handle is active. unexpected\n"); - return -EINVAL; - } - - input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL); - if (!input_handler) - return -ENOMEM; - - input_handler->event = sde_encoder_input_event_handler; - input_handler->connect = _sde_encoder_input_connect; - input_handler->disconnect = _sde_encoder_input_disconnect; - input_handler->name = "sde"; - input_handler->id_table = sde_input_ids; - input_handler->private = sde_enc; - - rc = input_register_handler(input_handler); - if (rc) { - SDE_ERROR_ENC(sde_enc, - "input_register_handler failed, rc= %d\n", rc); - kfree(input_handler); - return rc; - } - - sde_enc->input_handler = input_handler; - - return rc; -} - static void sde_encoder_vsync_event_work_handler(struct kthread_work *work) { struct sde_encoder_virt *sde_enc = container_of(work, @@ -5208,7 +5248,7 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder) int sde_encoder_display_failure_notification(struct drm_encoder *enc) { - struct msm_drm_thread *disp_thread = NULL; + struct msm_drm_thread *event_thread = NULL; struct msm_drm_private *priv = NULL; struct sde_encoder_virt *sde_enc = NULL; @@ -5220,7 +5260,7 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc) priv = enc->dev->dev_private; sde_enc = to_sde_encoder_virt(enc); if (!sde_enc->crtc || (sde_enc->crtc->index - >= ARRAY_SIZE(priv->disp_thread))) { + >= ARRAY_SIZE(priv->event_thread))) { SDE_DEBUG_ENC(sde_enc, "invalid cached CRTC: %d or crtc index: %d\n", sde_enc->crtc == NULL, @@ -5230,11 +5270,12 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc) SDE_EVT32_VERBOSE(DRMID(enc)); - disp_thread = &priv->disp_thread[sde_enc->crtc->index]; + event_thread = &priv->event_thread[sde_enc->crtc->index]; - kthread_queue_work(&disp_thread->worker, - &sde_enc->esd_trigger_work); + kthread_queue_work(&event_thread->worker, + &sde_enc->esd_trigger_work); kthread_flush_work(&sde_enc->esd_trigger_work); + /** * panel may stop generating te signal (vsync) during esd failure. rsc * hardware may hang without vsync. Avoid rsc hang by generating the diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index 881520fc536bf8d0d7284791f8b2adc56c048600..9adebb53b78389d5d62bf6bbcdfee09b5f16f18a 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -99,10 +99,10 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *encoder, * will be called after the request is complete, or other events. * @encoder: encoder pointer * @cb: callback pointer, provide NULL to deregister - * @data: user data provided to callback + * @crtc: pointer to drm_crtc object interested in frame events */ void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder, - void (*cb)(void *, u32), void *data); + void (*cb)(void *, u32), struct drm_crtc *crtc); /** * sde_encoder_get_rsc_client - gets the rsc client state for primary @@ -269,5 +269,12 @@ bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder); */ void sde_encoder_recovery_events_handler(struct drm_encoder *encoder, bool val); +/** + * sde_encoder_in_clone_mode - checks if underlying phys encoder is in clone + * mode or independent display mode. ref@ WB in Concurrent writeback mode. + * @drm_enc: Pointer to drm encoder structure + * @Return: true if successful in updating the encoder structure + */ +int sde_encoder_in_clone_mode(struct drm_encoder *enc); #endif /* __SDE_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 5c403ef6bef2941a36f3287eb36e7d13f54557a2..e8876ba4ba0bbe12b339ee93a16502c0f5ff725c 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -200,6 +200,11 @@ struct sde_encoder_phys_ops { * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel + * @INTR_IDX_WB_DONE: Writeback done interrupt for WB + * @INTR_IDX_PP2_OVFL: Pingpong overflow interrupt on PP2 for Concurrent WB + * @INTR_IDX_PP3_OVFL: Pingpong overflow interrupt on PP3 for Concurrent WB + * @INTR_IDX_PP4_OVFL: Pingpong overflow interrupt on PP4 for Concurrent WB + * @INTR_IDX_PP5_OVFL: Pingpong overflow interrupt on PP5 for Concurrent WB * @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning * autorefresh has triggered a double buffer flip */ @@ -210,6 +215,11 @@ enum sde_intr_idx { INTR_IDX_CTL_START, INTR_IDX_RDPTR, INTR_IDX_AUTOREFRESH_DONE, + INTR_IDX_WB_DONE, + INTR_IDX_PP2_OVFL, + INTR_IDX_PP3_OVFL, + INTR_IDX_PP4_OVFL, + INTR_IDX_PP5_OVFL, INTR_IDX_MAX, }; @@ -274,6 +284,9 @@ struct sde_encoder_irq { * @has_intf_te: Interface TE configuration support * @cont_splash_single_flush Variable to check if single flush is enabled. * @cont_splash_settings Variable to store continuous splash settings. + * @in_clone_mode Indicates if encoder is in clone mode ref@CWB + * @vfp_cached: cached vertical front porch to be used for + * programming ROT and MDP fetch start */ struct sde_encoder_phys { struct drm_encoder *parent; @@ -307,6 +320,8 @@ struct sde_encoder_phys { bool has_intf_te; u32 cont_splash_single_flush; bool cont_splash_settings; + bool in_clone_mode; + int vfp_cached; }; static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys) @@ -377,7 +392,6 @@ struct sde_encoder_phys_cmd { * writeback specific operations * @base: Baseclass physical encoder structure * @hw_wb: Hardware interface to the wb registers - * @irq_idx: IRQ interface lookup index * @wbdone_timeout: Timeout value for writeback done in msec * @bypass_irqreg: Bypass irq register/unregister if non-zero * @wbdone_complete: for wbdone irq synchronization @@ -400,8 +414,6 @@ struct sde_encoder_phys_cmd { struct sde_encoder_phys_wb { struct sde_encoder_phys base; struct sde_hw_wb *hw_wb; - int irq_idx; - struct sde_irq_callback irq_cb; u32 wbdone_timeout; u32 bypass_irqreg; struct completion wbdone_complete; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 128b9d7a71dde4c4fee8750b0fb9b95011ae0e69..1cdfc6e9fe5b38c68e333ea1c23d5d09258a6587 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -131,7 +131,7 @@ static void _sde_encoder_phys_cmd_update_flush_mask( struct sde_hw_ctl *ctl; bool merge_3d_enable = false; - if (!phys_enc && !phys_enc->hw_intf && !phys_enc->hw_pp) + if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_pp) return; cmd_enc = to_sde_encoder_phys_cmd(phys_enc); @@ -522,6 +522,10 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout( atomic_read(&phys_enc->pending_kickoff_cnt), frame_event); + /* check if panel is still sending TE signal or not */ + if (sde_connector_esd_status(phys_enc->connector)) + goto exit; + /* to avoid flooding, only log first time, and "dead" time */ if (cmd_enc->pp_timeout_report_cnt == 1) { SDE_ERROR_CMDENC(cmd_enc, @@ -549,11 +553,12 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout( SDE_DBG_DUMP("panic"); } - atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); - /* request a ctl reset before the next kickoff */ phys_enc->enable_state = SDE_ENC_ERR_NEEDS_HW_RESET; +exit: + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + if (phys_enc->parent_ops.handle_frame_done) phys_enc->parent_ops.handle_frame_done( phys_enc->parent, phys_enc, frame_event); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index ea0fb886776f4b255ece0ab8bc977db244cb0261..460c4e16fc740fe5d86129072f4d10c27d3de09a 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -103,6 +103,7 @@ static void drm_mode_to_intf_timing_params( timing->border_clr = 0; timing->underflow_clr = 0xff; timing->hsync_skew = mode->hskew; + timing->v_front_porch_fixed = vid_enc->base.vfp_cached; /* DSI controller cannot handle active-low sync signals. */ if (phys_enc->hw_intf->cap->type == INTF_DSI) { @@ -132,12 +133,16 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing) return active + inactive; } -static inline u32 get_vertical_total(const struct intf_timing_params *timing) +static inline u32 get_vertical_total(const struct intf_timing_params *timing, + bool use_fixed_vfp) { + u32 inactive; u32 active = timing->yres; - u32 inactive = - timing->v_back_porch + timing->v_front_porch + - timing->vsync_pulse_width; + u32 v_front_porch = use_fixed_vfp ? + timing->v_front_porch_fixed : timing->v_front_porch; + + inactive = timing->v_back_porch + v_front_porch + + timing->vsync_pulse_width; return active + inactive; } @@ -157,7 +162,8 @@ static inline u32 get_vertical_total(const struct intf_timing_params *timing) */ static u32 programmable_fetch_get_num_lines( struct sde_encoder_phys_vid *vid_enc, - const struct intf_timing_params *timing) + const struct intf_timing_params *timing, + bool use_fixed_vfp) { struct sde_encoder_phys *phys_enc = &vid_enc->base; u32 worst_case_needed_lines = @@ -166,19 +172,21 @@ static u32 programmable_fetch_get_num_lines( timing->v_back_porch + timing->vsync_pulse_width; u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines; u32 actual_vfp_lines = 0; + u32 v_front_porch = use_fixed_vfp ? + timing->v_front_porch_fixed : timing->v_front_porch; /* Fetch must be outside active lines, otherwise undefined. */ if (start_of_frame_lines >= worst_case_needed_lines) { SDE_DEBUG_VIDENC(vid_enc, "prog fetch is not needed, large vbp+vsw\n"); actual_vfp_lines = 0; - } else if (timing->v_front_porch < needed_vfp_lines) { + } else if (v_front_porch < needed_vfp_lines) { /* Warn fetch needed, but not enough porch in panel config */ pr_warn_once ("low vbp+vfp may lead to perf issues in some cases\n"); SDE_DEBUG_VIDENC(vid_enc, "less vfp than fetch req, using entire vfp\n"); - actual_vfp_lines = timing->v_front_porch; + actual_vfp_lines = v_front_porch; } else { SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n"); actual_vfp_lines = needed_vfp_lines; @@ -186,7 +194,7 @@ static u32 programmable_fetch_get_num_lines( SDE_DEBUG_VIDENC(vid_enc, "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n", - timing->v_front_porch, timing->v_back_porch, + v_front_porch, timing->v_back_porch, timing->vsync_pulse_width); SDE_DEBUG_VIDENC(vid_enc, "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n", @@ -223,9 +231,10 @@ static void programmable_fetch_config(struct sde_encoder_phys *phys_enc, m = phys_enc->sde_kms->catalog; - vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing); + vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, + timing, true); if (vfp_fetch_lines) { - vert_total = get_vertical_total(timing); + vert_total = get_vertical_total(timing, true); horiz_total = get_horizontal_total(timing); vfp_fetch_start_vsync_counter = (vert_total - vfp_fetch_lines) * horiz_total + 1; @@ -282,9 +291,10 @@ static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc, return; timing = &vid_enc->timing_params; - vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing); + vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, + timing, true); if (rot_fetch_lines) { - vert_total = get_vertical_total(timing); + vert_total = get_vertical_total(timing, true); horiz_total = get_horizontal_total(timing); if (vert_total >= (vfp_fetch_lines + rot_fetch_lines)) { rot_fetch_start_vsync_counter = @@ -343,7 +353,7 @@ static bool sde_encoder_phys_vid_mode_fixup( /* vid_enc timing_params must be configured before calling this function */ static void _sde_encoder_phys_vid_setup_avr( - struct sde_encoder_phys *phys_enc) + struct sde_encoder_phys *phys_enc, u32 qsync_min_fps) { struct sde_encoder_phys_vid *vid_enc; struct drm_display_mode mode; @@ -352,18 +362,13 @@ static void _sde_encoder_phys_vid_setup_avr( mode = phys_enc->cached_mode; if (vid_enc->base.hw_intf->ops.avr_setup) { struct intf_avr_params avr_params = {0}; - u32 qsync_min_fps = 0; u32 default_fps = mode.vrefresh; int ret; - if (phys_enc->parent_ops.get_qsync_fps) - phys_enc->parent_ops.get_qsync_fps( - phys_enc->parent, &qsync_min_fps); - - if (!qsync_min_fps || !default_fps) { + if (!default_fps) { SDE_ERROR_VIDENC(vid_enc, - "wrong qsync params %d %d\n", - qsync_min_fps, default_fps); + "invalid default fps %d\n", + default_fps); return; } @@ -397,6 +402,7 @@ static void sde_encoder_phys_vid_setup_timing_engine( struct intf_timing_params timing_params = { 0 }; const struct sde_format *fmt = NULL; u32 fmt_fourcc = DRM_FORMAT_RGB888; + u32 qsync_min_fps = 0; unsigned long lock_flags; struct sde_hw_intf_cfg intf_cfg = { 0 }; @@ -428,6 +434,13 @@ static void sde_encoder_phys_vid_setup_timing_engine( mode.hsync_start, mode.hsync_end); } + if (!phys_enc->vfp_cached) { + phys_enc->vfp_cached = + sde_connector_get_panel_vfp(phys_enc->connector, &mode); + if (phys_enc->vfp_cached <= 0) + phys_enc->vfp_cached = mode.vsync_start - mode.vdisplay; + } + drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params); vid_enc->timing_params = timing_params; @@ -462,7 +475,13 @@ static void sde_encoder_phys_vid_setup_timing_engine( programmable_fetch_config(phys_enc, &timing_params); exit: - _sde_encoder_phys_vid_setup_avr(phys_enc); + if (phys_enc->parent_ops.get_qsync_fps) + phys_enc->parent_ops.get_qsync_fps( + phys_enc->parent, &qsync_min_fps); + + /* only panels which support qsync will have a non-zero min fps */ + if (qsync_min_fps) + _sde_encoder_phys_vid_setup_avr(phys_enc, qsync_min_fps); } static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) @@ -667,11 +686,17 @@ static int sde_encoder_phys_vid_control_vblank_irq( SDE_EVT32(DRMID(phys_enc->parent), enable, atomic_read(&phys_enc->vblank_refcount)); - if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) + if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) { ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC); - else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) + if (ret) + atomic_dec_return(&phys_enc->vblank_refcount); + } else if (!enable && + atomic_dec_return(&phys_enc->vblank_refcount) == 0) { ret = sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC); + if (ret) + atomic_inc_return(&phys_enc->vblank_refcount); + } end: if (ret) { @@ -835,6 +860,7 @@ static void sde_encoder_phys_vid_get_hw_resources( return; } + vid_enc = to_sde_encoder_phys_vid(phys_enc); SDE_DEBUG_VIDENC(vid_enc, "\n"); hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO; } @@ -881,7 +907,7 @@ static int _sde_encoder_phys_vid_wait_for_vblank( if (phys_enc->parent_ops.handle_frame_done && event) phys_enc->parent_ops.handle_frame_done( phys_enc->parent, phys_enc, - SDE_ENCODER_FRAME_EVENT_DONE); + event); return ret; } @@ -1071,6 +1097,7 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) phys_enc->hw_pp->merge_3d->idx); exit: + phys_enc->vfp_cached = 0; phys_enc->enable_state = SDE_ENC_DISABLED; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 59acecf73550ad45a04ca44173ba0d4c8c0063ae..4f4ae363c2e87cba3f7c5c7a01d466ef567dfa63 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -32,6 +32,12 @@ #define TO_S15D16(_x_) ((_x_) << 7) +#define MULTIPLE_CONN_DETECTED(x) (x > 1) + +static const u32 cwb_irq_tbl[PINGPONG_MAX] = {SDE_NONE, SDE_NONE, + INTR_IDX_PP2_OVFL, INTR_IDX_PP3_OVFL, INTR_IDX_PP4_OVFL, + INTR_IDX_PP5_OVFL, SDE_NONE, SDE_NONE}; + /** * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix * @@ -465,6 +471,71 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc, } } +static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc, + bool enable) +{ + struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); + struct sde_hw_wb *hw_wb = wb_enc->hw_wb; + struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl; + struct sde_crtc *crtc = to_sde_crtc(wb_enc->crtc); + struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp; + bool need_merge = crtc->num_mixers > 1 ? true : false; + int i = 0; + + if (!phys_enc->in_clone_mode) { + SDE_DEBUG("not in CWB mode. early return\n"); + return; + } + + if (!hw_pp || !hw_ctl || !hw_wb || hw_pp->idx >= PINGPONG_MAX) { + SDE_ERROR("invalid hw resources - return\n"); + return; + } + + hw_ctl = crtc->mixers[0].hw_ctl; + if (hw_ctl && hw_ctl->ops.setup_intf_cfg_v1 && + test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) { + struct sde_hw_intf_cfg_v1 intf_cfg = { 0, }; + + for (i = 0; i < crtc->num_mixers; i++) + intf_cfg.cwb[intf_cfg.cwb_count++] = + (enum sde_cwb)(hw_pp->idx + i); + + if (enable && hw_pp->merge_3d && (intf_cfg.merge_3d_count < + MAX_MERGE_3D_PER_CTL_V1) && need_merge) + intf_cfg.merge_3d[intf_cfg.merge_3d_count++] = + hw_pp->merge_3d->idx; + + if (hw_pp->ops.setup_3d_mode) + hw_pp->ops.setup_3d_mode(hw_pp, (enable && need_merge) ? + BLEND_3D_H_ROW_INT : 0); + + if (hw_wb->ops.bind_pingpong_blk) + hw_wb->ops.bind_pingpong_blk(hw_wb, enable, hw_pp->idx); + + if (hw_ctl->ops.update_cwb_cfg) { + hw_ctl->ops.update_cwb_cfg(hw_ctl, &intf_cfg); + SDE_DEBUG("in CWB mode on CTL_%d PP-%d merge3d:%d\n", + hw_ctl->idx - CTL_0, + hw_pp->idx - PINGPONG_0, + hw_pp->merge_3d ? + hw_pp->merge_3d->idx - MERGE_3D_0 : -1); + } + } else { + struct sde_hw_intf_cfg *intf_cfg = &phys_enc->intf_cfg; + + memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg)); + intf_cfg->intf = SDE_NONE; + intf_cfg->wb = hw_wb->idx; + + if (hw_ctl && hw_ctl->ops.update_wb_cfg) { + hw_ctl->ops.update_wb_cfg(hw_ctl, intf_cfg, enable); + SDE_DEBUG("in CWB mode adding WB for CTL_%d\n", + hw_ctl->idx - CTL_0); + } + } +} + /** * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block * @phys_enc: Pointer to physical encoder @@ -483,6 +554,11 @@ static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc, return; } + if (phys_enc->in_clone_mode) { + SDE_DEBUG("in CWB mode. early return\n"); + return; + } + wb_enc = to_sde_encoder_phys_wb(phys_enc); hw_wb = wb_enc->hw_wb; hw_cdm = phys_enc->hw_cdm; @@ -530,10 +606,104 @@ static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc, intf_cfg->wb = hw_wb->idx; intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc); - phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, intf_cfg); } + +} + +static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state) +{ + struct drm_connector *conn; + struct drm_connector_state *conn_state; + struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); + const struct sde_wb_cfg *wb_cfg = wb_enc->hw_wb->caps; + struct drm_connector_list_iter conn_iter; + int conn_count = 0; + + phys_enc->in_clone_mode = false; + + /* Check if WB has CWB support */ + if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB))) + return; + + /* Count the number of connectors on the given crtc */ + drm_connector_list_iter_begin(crtc_state->crtc->dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { + conn_state = + drm_atomic_get_connector_state(crtc_state->state, conn); + if ((conn->state && conn->state->crtc == crtc_state->crtc) || + (conn_state && + conn_state->crtc == crtc_state->crtc)) + conn_count++; + } + drm_connector_list_iter_end(&conn_iter); + + + /* Enable clone mode If crtc has multiple connectors & one is WB */ + if (MULTIPLE_CONN_DETECTED(conn_count)) + phys_enc->in_clone_mode = true; + + SDE_DEBUG("detect CWB - status:%d\n", phys_enc->in_clone_mode); +} + +static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state); + struct sde_rect wb_roi = {0,}; + int data_pt; + int ds_outw = 0; + int ds_outh = 0; + int ds_in_use = false; + int i = 0; + int ret = 0; + + if (!phys_enc->in_clone_mode) { + SDE_DEBUG("not in CWB mode. early return\n"); + goto exit; + } + + ret = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi); + if (ret) { + SDE_ERROR("failed to get roi %d\n", ret); + goto exit; + } + + data_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT); + + /* compute cumulative ds output dimensions if in use */ + for (i = 0; i < cstate->num_ds; i++) + if (cstate->ds_cfg[i].scl3_cfg.enable) { + ds_in_use = true; + ds_outw += cstate->ds_cfg[i].scl3_cfg.dst_width; + ds_outh += cstate->ds_cfg[i].scl3_cfg.dst_height; + } + + /* if ds in use check wb roi against ds output dimensions */ + if ((data_pt == CAPTURE_DSPP_OUT) && ds_in_use && + ((wb_roi.w != ds_outw) || (wb_roi.h != ds_outh))) { + SDE_ERROR("invalid wb roi with dest scalar [%dx%d vs %dx%d]\n", + wb_roi.w, wb_roi.h, ds_outw, ds_outh); + ret = -EINVAL; + goto exit; + } + + /* validate conn roi against pu rect */ + if (!sde_kms_rect_is_null(&cstate->crtc_roi)) { + if (wb_roi.w != cstate->crtc_roi.w || + wb_roi.h != cstate->crtc_roi.h) { + SDE_ERROR("invalid wb roi with pu [%dx%d vs %dx%d]\n", + wb_roi.w, wb_roi.h, cstate->crtc_roi.w, + cstate->crtc_roi.h); + ret = -EINVAL; + goto exit; + } + } +exit: + return ret; } /** @@ -570,6 +740,8 @@ static int sde_encoder_phys_wb_atomic_check( return -EINVAL; } + _sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state); + memset(&wb_roi, 0, sizeof(struct sde_rect)); rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi); @@ -658,7 +830,93 @@ static int sde_encoder_phys_wb_atomic_check( } } - return 0; + rc = _sde_enc_phys_wb_validate_cwb(phys_enc, crtc_state, conn_state); + if (rc) { + SDE_ERROR("failed in cwb validation %d\n", rc); + return rc; + } + + return rc; +} + +static void _sde_encoder_phys_wb_update_cwb_flush( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_wb *wb_enc; + struct sde_hw_wb *hw_wb; + struct sde_hw_ctl *hw_ctl; + struct sde_hw_cdm *hw_cdm; + struct sde_hw_pingpong *hw_pp; + struct sde_crtc *crtc; + struct sde_crtc_state *crtc_state; + int i = 0; + int cwb_capture_mode = 0; + enum sde_cwb cwb_idx = 0; + enum sde_cwb src_pp_idx = 0; + bool dspp_out = false; + bool need_merge = false; + + if (!phys_enc->in_clone_mode) { + SDE_DEBUG("not in CWB mode. early return\n"); + return; + } + + wb_enc = to_sde_encoder_phys_wb(phys_enc); + crtc = to_sde_crtc(wb_enc->crtc); + crtc_state = to_sde_crtc_state(wb_enc->crtc->state); + cwb_capture_mode = sde_crtc_get_property(crtc_state, + CRTC_PROP_CAPTURE_OUTPUT); + + hw_pp = phys_enc->hw_pp; + hw_wb = wb_enc->hw_wb; + hw_cdm = phys_enc->hw_cdm; + + /* In CWB mode, program actual source master sde_hw_ctl from crtc */ + hw_ctl = crtc->mixers[0].hw_ctl; + if (!hw_ctl || !hw_wb || !hw_pp) { + SDE_ERROR("[wb] HW resource not available for CWB\n"); + return; + } + + /* treating LM idx of primary display ctl path as source ping-pong idx*/ + src_pp_idx = (enum sde_cwb)crtc->mixers[0].hw_lm->idx; + cwb_idx = (enum sde_cwb)hw_pp->idx; + dspp_out = (cwb_capture_mode == CAPTURE_DSPP_OUT); + need_merge = (crtc->num_mixers > 1) ? true : false; + + if (src_pp_idx > LM_0 || ((cwb_idx + crtc->num_mixers) > CWB_MAX)) { + SDE_ERROR("invalid hw config for CWB\n"); + return; + } + + if (hw_ctl->ops.update_bitmask_wb) + hw_ctl->ops.update_bitmask_wb(hw_ctl, hw_wb->idx, 1); + + if (hw_ctl->ops.update_bitmask_cdm && hw_cdm) + hw_ctl->ops.update_bitmask_cdm(hw_ctl, hw_cdm->idx, 1); + + if (test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) { + for (i = 0; i < crtc->num_mixers; i++) { + cwb_idx = (enum sde_cwb) (hw_pp->idx + i); + src_pp_idx = (enum sde_cwb) (src_pp_idx + i); + + if (hw_wb->ops.program_cwb_ctrl) + hw_wb->ops.program_cwb_ctrl(hw_wb, cwb_idx, + src_pp_idx, dspp_out); + + if (hw_ctl->ops.update_bitmask_cwb) + hw_ctl->ops.update_bitmask_cwb(hw_ctl, + cwb_idx, 1); + } + + if (need_merge && hw_ctl->ops.update_bitmask_merge3d + && hw_pp && hw_pp->merge_3d) + hw_ctl->ops.update_bitmask_merge3d(hw_ctl, + hw_pp->merge_3d->idx, 1); + } else { + phys_enc->hw_mdptop->ops.set_cwb_ppb_cntl(phys_enc->hw_mdptop, + need_merge, dspp_out); + } } /** @@ -667,7 +925,7 @@ static int sde_encoder_phys_wb_atomic_check( */ static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc) { - struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); + struct sde_encoder_phys_wb *wb_enc; struct sde_hw_wb *hw_wb; struct sde_hw_ctl *hw_ctl; struct sde_hw_cdm *hw_cdm; @@ -677,13 +935,19 @@ static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc) if (!phys_enc) return; + wb_enc = to_sde_encoder_phys_wb(phys_enc); hw_wb = wb_enc->hw_wb; - hw_ctl = phys_enc->hw_ctl; hw_cdm = phys_enc->hw_cdm; hw_pp = phys_enc->hw_pp; + hw_ctl = phys_enc->hw_ctl; SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); + if (phys_enc->in_clone_mode) { + SDE_DEBUG("in CWB mode. early return\n"); + return; + } + if (!hw_ctl) { SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0); return; @@ -782,54 +1046,28 @@ static void sde_encoder_phys_wb_setup( sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi); sde_encoder_phys_wb_setup_cdp(phys_enc, wb_enc->wb_fmt); -} - -/** - * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler - * @phys_enc: Pointer to physical encoder - */ -static int sde_encoder_phys_wb_unregister_irq( - struct sde_encoder_phys *phys_enc) -{ - struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); - struct sde_hw_wb *hw_wb = wb_enc->hw_wb; - if (wb_enc->bypass_irqreg) - return 0; - - sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1); - sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx, - &wb_enc->irq_cb); - - SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n", - hw_wb->idx - WB_0, - wb_enc->irq_idx); - - return 0; + _sde_encoder_phys_wb_setup_cwb(phys_enc, true); } -/** - * sde_encoder_phys_wb_done_irq - writeback interrupt handler - * @arg: Pointer to writeback encoder - * @irq_idx: interrupt index - */ -static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx) +static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error) { struct sde_encoder_phys_wb *wb_enc = arg; struct sde_encoder_phys *phys_enc = &wb_enc->base; struct sde_hw_wb *hw_wb = wb_enc->hw_wb; - u32 event = 0; + u32 event = frame_error ? SDE_ENCODER_FRAME_EVENT_ERROR : 0; + + event |= SDE_ENCODER_FRAME_EVENT_DONE | + SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE; - SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, - wb_enc->frame_count); + SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0, wb_enc->frame_count); /* don't notify upper layer for internal commit */ if (phys_enc->enable_state == SDE_ENC_DISABLING) goto complete; - event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE - | SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE - | SDE_ENCODER_FRAME_EVENT_DONE; + if (!phys_enc->in_clone_mode) + event |= SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE; atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0); if (phys_enc->parent_ops.handle_frame_done) @@ -847,58 +1085,65 @@ static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx) } /** - * sde_encoder_phys_wb_register_irq - register writeback interrupt handler - * @phys_enc: Pointer to physical encoder + * sde_encoder_phys_wb_done_irq - Pingpong overflow interrupt handler for CWB + * @arg: Pointer to writeback encoder + * @irq_idx: interrupt index */ -static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc) +static void sde_encoder_phys_cwb_ovflow(void *arg, int irq_idx) { - struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); - struct sde_hw_wb *hw_wb = wb_enc->hw_wb; - struct sde_irq_callback *irq_cb = &wb_enc->irq_cb; - enum sde_intr_type intr_type; - int ret = 0; + _sde_encoder_phys_wb_frame_done_helper(arg, true); +} - if (wb_enc->bypass_irqreg) - return 0; +/** + * sde_encoder_phys_wb_done_irq - writeback interrupt handler + * @arg: Pointer to writeback encoder + * @irq_idx: interrupt index + */ +static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx) +{ + _sde_encoder_phys_wb_frame_done_helper(arg, false); +} - intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb); - wb_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms, - intr_type, hw_wb->idx); - if (wb_enc->irq_idx < 0) { - SDE_ERROR( - "failed to lookup IRQ index for WB_DONE with wb=%d\n", - hw_wb->idx - WB_0); - return -EINVAL; - } +/** + * sde_encoder_phys_wb_irq_ctrl - irq control of WB + * @phys: Pointer to physical encoder + * @enable: indicates enable or disable interrupts + */ +static void sde_encoder_phys_wb_irq_ctrl( + struct sde_encoder_phys *phys, bool enable) +{ - irq_cb->func = sde_encoder_phys_wb_done_irq; - irq_cb->arg = wb_enc; - ret = sde_core_irq_register_callback(phys_enc->sde_kms, - wb_enc->irq_idx, irq_cb); - if (ret) { - SDE_ERROR("failed to register IRQ callback WB_DONE\n"); - return ret; - } + struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys); + int index = 0; + int pp = 0; - ret = sde_core_irq_enable(phys_enc->sde_kms, &wb_enc->irq_idx, 1); - if (ret) { - SDE_ERROR( - "failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n", - hw_wb->idx - WB_0, - wb_enc->irq_idx); - wb_enc->irq_idx = -EINVAL; - - /* Unregister callback on IRQ enable failure */ - sde_core_irq_unregister_callback(phys_enc->sde_kms, - wb_enc->irq_idx, irq_cb); - return ret; - } + if (!wb_enc) + return; + + if (wb_enc->bypass_irqreg) + return; - SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n", - hw_wb->idx - WB_0, - wb_enc->irq_idx); + pp = phys->hw_pp->idx - PINGPONG_0; + if ((pp + CRTC_DUAL_MIXERS) >= PINGPONG_MAX) { + SDE_ERROR("invalid pingpong index for WB or CWB\n"); + return; + } - return ret; + if (enable) { + sde_encoder_helper_register_irq(phys, INTR_IDX_WB_DONE); + if (phys->in_clone_mode) { + for (index = 0; index < CRTC_DUAL_MIXERS; index++) + sde_encoder_helper_register_irq(phys, + cwb_irq_tbl[index + pp]); + } + } else { + sde_encoder_helper_unregister_irq(phys, INTR_IDX_WB_DONE); + if (phys->in_clone_mode) { + for (index = 0; index < CRTC_DUAL_MIXERS; index++) + sde_encoder_helper_unregister_irq(phys, + cwb_irq_tbl[index + pp]); + } + } } /** @@ -969,6 +1214,7 @@ static int sde_encoder_phys_wb_wait_for_commit_done( u32 irq_status, event = 0; u64 wb_time = 0; int rc = 0; + int irq_idx = phys_enc->irq[INTR_IDX_WB_DONE].irq_idx; u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS); /* Return EWOULDBLOCK since we know the wait isn't necessary */ @@ -983,7 +1229,7 @@ static int sde_encoder_phys_wb_wait_for_commit_done( /* signal completion if commit with no framebuffer */ if (!wb_enc->wb_fb) { SDE_DEBUG("no output framebuffer\n"); - sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx); + _sde_encoder_phys_wb_frame_done_helper(wb_enc, false); } ret = wait_for_completion_timeout(&wb_enc->wbdone_complete, @@ -993,11 +1239,11 @@ static int sde_encoder_phys_wb_wait_for_commit_done( SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count); irq_status = sde_core_irq_read(phys_enc->sde_kms, - wb_enc->irq_idx, true); + irq_idx, true); if (irq_status) { SDE_DEBUG("wb:%d done but irq not triggered\n", WBID(wb_enc)); - sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx); + _sde_encoder_phys_wb_frame_done_helper(wb_enc, false); } else { SDE_ERROR("wb:%d kickoff timed out\n", WBID(wb_enc)); @@ -1014,8 +1260,6 @@ static int sde_encoder_phys_wb_wait_for_commit_done( } } - sde_encoder_phys_wb_unregister_irq(phys_enc); - if (!rc) wb_enc->end_time = ktime_get(); @@ -1060,19 +1304,12 @@ static int sde_encoder_phys_wb_prepare_for_kickoff( struct sde_encoder_kickoff_params *params) { struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); - int ret; SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0, wb_enc->kickoff_count); reinit_completion(&wb_enc->wbdone_complete); - ret = sde_encoder_phys_wb_register_irq(phys_enc); - if (ret) { - SDE_ERROR("failed to register irq %d\n", ret); - return ret; - } - wb_enc->kickoff_count++; /* set OT limit & enable traffic shaper */ @@ -1080,6 +1317,8 @@ static int sde_encoder_phys_wb_prepare_for_kickoff( _sde_encoder_phys_wb_update_flush(phys_enc); + _sde_encoder_phys_wb_update_cwb_flush(phys_enc); + /* vote for iommu/clk/bus */ wb_enc->start_time = ktime_get(); @@ -1100,6 +1339,15 @@ static void sde_encoder_phys_wb_trigger_flush(struct sde_encoder_phys *phys_enc) return; } + /* + * Bail out iff in CWB mode. In case of CWB, primary control-path + * which is actually driving would trigger the flush + */ + if (phys_enc->in_clone_mode) { + SDE_DEBUG("in CWB mode. early return\n"); + return; + } + SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0); /* clear pending flush if commit with no framebuffer */ @@ -1178,7 +1426,7 @@ static int _sde_encoder_phys_wb_init_internal_fb( /* allocate gem tracking object */ nplanes = drm_format_num_planes(pixel_format); - if (nplanes > SDE_MAX_PLANES) { + if (nplanes >= SDE_MAX_PLANES) { SDE_ERROR("requested format has too many planes\n"); return -EINVAL; } @@ -1304,6 +1552,13 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc) goto exit; } + /* avoid reset frame for CWB */ + if (phys_enc->in_clone_mode) { + _sde_encoder_phys_wb_setup_cwb(phys_enc, false); + phys_enc->in_clone_mode = false; + goto exit; + } + /* reset h/w before final flush */ if (phys_enc->hw_ctl->ops.clear_pending_flush) phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); @@ -1440,6 +1695,7 @@ static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops) ops->trigger_flush = sde_encoder_phys_wb_trigger_flush; ops->trigger_start = sde_encoder_helper_trigger_start; ops->hw_reset = sde_encoder_helper_hw_reset; + ops->irq_control = sde_encoder_phys_wb_irq_ctrl; } /** @@ -1452,6 +1708,7 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( struct sde_encoder_phys *phys_enc; struct sde_encoder_phys_wb *wb_enc; struct sde_hw_mdp *hw_mdp; + struct sde_encoder_irq *irq; int ret = 0; SDE_DEBUG("\n"); @@ -1468,7 +1725,6 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( ret = -ENOMEM; goto fail_alloc; } - wb_enc->irq_idx = -EINVAL; wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS; init_completion(&wb_enc->wbdone_complete); @@ -1531,7 +1787,56 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( phys_enc->intf_idx = p->intf_idx; phys_enc->enc_spinlock = p->enc_spinlock; atomic_set(&phys_enc->pending_retire_fence_cnt, 0); - INIT_LIST_HEAD(&wb_enc->irq_cb.list); + + irq = &phys_enc->irq[INTR_IDX_WB_DONE]; + INIT_LIST_HEAD(&irq->cb.list); + irq->name = "wb_done"; + irq->hw_idx = wb_enc->hw_wb->idx; + irq->irq_idx = -1; + irq->intr_type = sde_encoder_phys_wb_get_intr_type(wb_enc->hw_wb); + irq->intr_idx = INTR_IDX_WB_DONE; + irq->cb.arg = wb_enc; + irq->cb.func = sde_encoder_phys_wb_done_irq; + + irq = &phys_enc->irq[INTR_IDX_PP2_OVFL]; + INIT_LIST_HEAD(&irq->cb.list); + irq->name = "pp2_overflow"; + irq->hw_idx = CWB_2; + irq->irq_idx = -1; + irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; + irq->intr_idx = INTR_IDX_PP2_OVFL; + irq->cb.arg = wb_enc; + irq->cb.func = sde_encoder_phys_cwb_ovflow; + + irq = &phys_enc->irq[INTR_IDX_PP3_OVFL]; + INIT_LIST_HEAD(&irq->cb.list); + irq->name = "pp3_overflow"; + irq->hw_idx = CWB_3; + irq->irq_idx = -1; + irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; + irq->intr_idx = INTR_IDX_PP3_OVFL; + irq->cb.arg = wb_enc; + irq->cb.func = sde_encoder_phys_cwb_ovflow; + + irq = &phys_enc->irq[INTR_IDX_PP4_OVFL]; + INIT_LIST_HEAD(&irq->cb.list); + irq->name = "pp4_overflow"; + irq->hw_idx = CWB_4; + irq->irq_idx = -1; + irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; + irq->intr_idx = INTR_IDX_PP4_OVFL; + irq->cb.arg = wb_enc; + irq->cb.func = sde_encoder_phys_cwb_ovflow; + + irq = &phys_enc->irq[INTR_IDX_PP5_OVFL]; + INIT_LIST_HEAD(&irq->cb.list); + irq->name = "pp5_overflow"; + irq->hw_idx = CWB_5; + irq->irq_idx = -1; + irq->intr_type = SDE_IRQ_TYPE_CWB_OVERFLOW; + irq->intr_idx = INTR_IDX_PP5_OVFL; + irq->cb.arg = wb_enc; + irq->cb.func = sde_encoder_phys_cwb_ovflow; /* create internal buffer for disable logic */ if (_sde_encoder_phys_wb_init_internal_fb(wb_enc, diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c index c764ceb8c18fa90a02849bfab9d85764a0da8e00..0371a3f6abbfdf1944265db86dab9904a6e655bf 100644 --- a/drivers/gpu/drm/msm/sde/sde_fence.c +++ b/drivers/gpu/drm/msm/sde/sde_fence.c @@ -279,7 +279,8 @@ void sde_fence_prepare(struct sde_fence_context *ctx) } } -static void _sde_fence_trigger(struct sde_fence_context *ctx, ktime_t ts) +static void _sde_fence_trigger(struct sde_fence_context *ctx, + ktime_t ts, bool error) { unsigned long flags; struct sde_fence *fc, *next; @@ -301,6 +302,7 @@ static void _sde_fence_trigger(struct sde_fence_context *ctx, ktime_t ts) list_for_each_entry_safe(fc, next, &local_list_head, fence_list) { spin_lock_irqsave(&ctx->lock, flags); + fc->base.error = error ? -EBUSY : 0; fc->base.timestamp = ts; is_signaled = dma_fence_is_signaled_locked(&fc->base); spin_unlock_irqrestore(&ctx->lock, flags); @@ -352,7 +354,7 @@ int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val, if (fd >= 0) { rc = 0; - _sde_fence_trigger(ctx, ktime_get()); + _sde_fence_trigger(ctx, ktime_get(), false); } else { rc = fd; } @@ -361,7 +363,7 @@ int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val, } void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, - bool reset_timeline) + enum sde_fence_event fence_event) { unsigned long flags; @@ -371,7 +373,7 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, } spin_lock_irqsave(&ctx->lock, flags); - if (reset_timeline) { + if (fence_event == SDE_FENCE_RESET_TIMELINE) { if ((int)(ctx->done_count - ctx->commit_count) < 0) { SDE_ERROR( "timeline reset attempt! done count:%d commit:%d\n", @@ -379,7 +381,7 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, ctx->done_count = ctx->commit_count; SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count, ktime_to_us(ts), - reset_timeline, SDE_EVTLOG_FATAL); + fence_event, SDE_EVTLOG_FATAL); } else { spin_unlock_irqrestore(&ctx->lock, flags); return; @@ -392,7 +394,7 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, SDE_ERROR("extra signal attempt! done count:%d commit:%d\n", ctx->done_count, ctx->commit_count); SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count, - ktime_to_us(ts), reset_timeline, SDE_EVTLOG_FATAL); + ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL); spin_unlock_irqrestore(&ctx->lock, flags); return; } @@ -401,7 +403,7 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts, SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count, ktime_to_us(ts)); - _sde_fence_trigger(ctx, ts); + _sde_fence_trigger(ctx, ts, (fence_event == SDE_FENCE_SIGNAL_ERROR)); } void sde_fence_timeline_status(struct sde_fence_context *ctx, diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h index 29d2ec740e4f5dd280ed5617cea87436a08043cb..7891be45f13825d0d8a6b6f2cad70c75885eaf68 100644 --- a/drivers/gpu/drm/msm/sde/sde_fence.h +++ b/drivers/gpu/drm/msm/sde/sde_fence.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,6 +47,18 @@ struct sde_fence_context { char name[SDE_FENCE_NAME_SIZE]; }; +/** + * enum sde_fence_event - sde fence event as hint fence operation + * @SDE_FENCE_SIGNAL: Signal the fence cleanly with current timeline + * @SDE_FENCE_RESET_TIMELINE: Reset timeline of the fence context + * @SDE_FENCE_SIGNAL: Signal the fence but indicate error throughfence status + */ +enum sde_fence_event { + SDE_FENCE_SIGNAL, + SDE_FENCE_RESET_TIMELINE, + SDE_FENCE_SIGNAL_ERROR +}; + #if IS_ENABLED(CONFIG_SYNC_FILE) /** * sde_sync_get - Query sync fence object from a file handle @@ -128,10 +140,10 @@ int sde_fence_create(struct sde_fence_context *fence, uint64_t *val, * sde_fence_signal - advance fence timeline to signal outstanding fences * @fence: Pointer fence container * @ts: fence timestamp - * @reset_timeline: reset the fence timeline to done count equal to commit count + * @fence_event: fence event to indicate nature of fence signal. */ void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts, - bool reset_timeline); + enum sde_fence_event fence_event); /** * sde_fence_timeline_status - prints fence timeline status diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 99a48a9f0433851c370d563c4b622aa2383599bf..18500acc225e67d3eaf208a1e2afc643361bc158 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -1876,6 +1876,12 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)) set_bit(SDE_WB_INPUT_CTRL, &wb->features); + if (sde_cfg->has_cwb_support) { + set_bit(SDE_WB_HAS_CWB, &wb->features); + if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)) + set_bit(SDE_WB_CWB_CTRL, &wb->features); + } + for (j = 0; j < sde_cfg->mdp_count; j++) { sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off = PROP_BITVALUE_ACCESS(prop_value, @@ -3552,6 +3558,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) sde_cfg->ts_prefill_rev = 1; } else if (IS_SDM845_TARGET(hw_rev)) { sde_cfg->has_wb_ubwc = true; + sde_cfg->has_cwb_support = true; sde_cfg->perf.min_prefill_lines = 24; sde_cfg->vbif_qos_nlvl = 8; sde_cfg->ts_prefill_rev = 2; @@ -3563,6 +3570,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev) sde_cfg->vbif_qos_nlvl = 8; sde_cfg->ts_prefill_rev = 2; } else if (IS_SM8150_TARGET(hw_rev)) { + sde_cfg->has_cwb_support = true; sde_cfg->has_wb_ubwc = true; sde_cfg->has_qsync = true; sde_cfg->perf.min_prefill_lines = 24; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index eef9e2110e52bf42d3fe121310db01c73528524f..fe2817f489f2b7b6da2d1a7678392ab932e8c22d 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -328,6 +328,8 @@ enum { * @SDE_WB_CDP Writeback supports client driven prefetch * @SDE_WB_INPUT_CTRL Writeback supports from which pp block input pixel * data arrives. + * @SDE_WB_HAS_CWB Writeback block supports concurrent writeback + * @SDE_WB_CWB_CTRL Separate CWB control is available for configuring * @SDE_WB_MAX maximum value */ enum { @@ -347,6 +349,8 @@ enum { SDE_WB_QOS_8LVL, SDE_WB_CDP, SDE_WB_INPUT_CTRL, + SDE_WB_HAS_CWB, + SDE_WB_CWB_CTRL, SDE_WB_MAX }; @@ -1028,6 +1032,7 @@ struct sde_perf_cfg { * @has_src_split source split feature status * @has_cdp Client driven prefetch feature status * @has_wb_ubwc UBWC feature supported on WB + * @has_cwb_support indicates if device supports primary capture through CWB * @ubwc_version UBWC feature version (0x0 for not supported) * @ubwc_bw_calc_version indicate how UBWC BW has to be calculated * @has_sbuf indicate if stream buffer is available @@ -1078,6 +1083,7 @@ struct sde_mdss_cfg { bool has_cdp; bool has_dim_layer; bool has_wb_ubwc; + bool has_cwb_support; u32 ubwc_version; u32 ubwc_bw_calc_version; bool has_sbuf; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c index 5025eaac99a073c9a3fdd4ce074ac0b857f4c535..d25d1ce686de36c180eb8658fc6a1a6f2be35bc4 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -223,7 +223,7 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx, struct sde_hw_cdm_cfg *cdm) { struct sde_hw_blk_reg_map *c = &ctx->hw; - const struct sde_format *fmt = cdm->output_fmt; + const struct sde_format *fmt; struct cdm_output_cfg cdm_cfg = { 0 }; u32 opmode = 0; u32 csc = 0; @@ -231,6 +231,8 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx, if (!ctx || !cdm) return -EINVAL; + fmt = cdm->output_fmt; + if (!SDE_FORMAT_IS_YUV(fmt)) return -EINVAL; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 12130822cfb932ec7dfff09dbdbe1e58db9baec3..15258ff45d1e0b78969f19c1cc47bde9ccf10de2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -954,6 +954,39 @@ static int sde_hw_ctl_reset_post_te_disable(struct sde_hw_ctl *ctx, return 0; } +static int sde_hw_ctl_update_cwb_cfg(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg_v1 *cfg) +{ + int i; + u32 cwb_active = 0; + u32 merge_3d_active = 0; + u32 wb_active = 0; + struct sde_hw_blk_reg_map *c; + + if (!ctx) + return -EINVAL; + + c = &ctx->hw; + cwb_active = SDE_REG_READ(c, CTL_CWB_ACTIVE); + for (i = 0; i < cfg->cwb_count; i++) { + if (cfg->cwb[i]) + cwb_active |= BIT(cfg->cwb[i] - CWB_0); + } + + merge_3d_active = SDE_REG_READ(c, CTL_MERGE_3D_ACTIVE); + for (i = 0; i < cfg->merge_3d_count; i++) { + if (cfg->merge_3d[i]) + merge_3d_active |= BIT(cfg->merge_3d[i] - MERGE_3D_0); + } + + wb_active = BIT(2); + SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); + SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active); + SDE_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active); + + return 0; +} + static int sde_hw_ctl_dsc_cfg(struct sde_hw_ctl *ctx, struct sde_ctl_dsc_cfg *cfg) { @@ -1011,6 +1044,24 @@ static int sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx, return 0; } +static void sde_hw_ctl_update_wb_cfg(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg *cfg, bool enable) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 intf_cfg = 0; + + if (!cfg->wb) + return; + + intf_cfg = SDE_REG_READ(c, CTL_TOP); + if (enable) + intf_cfg |= (cfg->wb & 0x3) + 2; + else + intf_cfg &= ~((cfg->wb & 0x3) + 2); + + SDE_REG_WRITE(c, CTL_TOP, intf_cfg); +} + static inline u32 sde_hw_ctl_read_ctl_top(struct sde_hw_ctl *ctx) { struct sde_hw_blk_reg_map *c; @@ -1080,6 +1131,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, ops->trigger_flush = sde_hw_ctl_trigger_flush_v1; ops->setup_intf_cfg_v1 = sde_hw_ctl_intf_cfg_v1; + ops->update_cwb_cfg = sde_hw_ctl_update_cwb_cfg; ops->setup_dsc_cfg = sde_hw_ctl_dsc_cfg; ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm_v1; @@ -1109,6 +1161,7 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, ops->trigger_pending = sde_hw_ctl_trigger_pending; ops->read_ctl_top = sde_hw_ctl_read_ctl_top; ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers; + ops->update_wb_cfg = sde_hw_ctl_update_wb_cfg; ops->reset = sde_hw_ctl_reset_control; ops->get_reset = sde_hw_ctl_get_reset_status; ops->hard_reset = sde_hw_ctl_hard_reset; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h index a4b1dd745827988b2933860efaa90fd474b80745..e55a173a7ff96970daa0abec00508fc2b3637781 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h @@ -250,6 +250,14 @@ struct sde_hw_ctl_ops { int (*reset_post_te_disable)(struct sde_hw_ctl *ctx, struct sde_hw_intf_cfg_v1 *cfg, u32 merge_3d_idx); + /** update cwb for ctl_path + * @ctx : ctl path ctx pointer + * @cfg : interface config structure pointer + * @Return: error code + */ + int (*update_cwb_cfg)(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg_v1 *cfg); + /** * Setup ctl_path interface config for SDE_CTL_ACTIVE_CFG * @ctx : ctl path ctx pointer @@ -268,6 +276,14 @@ struct sde_hw_ctl_ops { int (*setup_dsc_cfg)(struct sde_hw_ctl *ctx, struct sde_ctl_dsc_cfg *cfg); + /** Update the interface selection with input WB config + * @ctx : ctl path ctx pointer + * @cfg : pointer to input wb config + * @enable : set if true, clear otherwise + */ + void (*update_wb_cfg)(struct sde_hw_ctl *ctx, + struct sde_hw_intf_cfg *cfg, bool enable); + int (*reset)(struct sde_hw_ctl *c); /** diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c index ecf4a4ff99c1e8067676d7bd9453868898b09a21..0721493e2b1ef9d4b565e9574a52aae8e6fd6ea3 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c @@ -130,6 +130,8 @@ */ #define SDE_INTR_CWB_2_OVERFLOW BIT(14) #define SDE_INTR_CWB_3_OVERFLOW BIT(15) +#define SDE_INTR_CWB_4_OVERFLOW BIT(20) +#define SDE_INTR_CWB_5_OVERFLOW BIT(21) /** * Histogram VIG done interrupt status bit definitions @@ -390,9 +392,10 @@ static const struct sde_irq_type sde_irq_map[] = { SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1}, { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3, SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1}, - /* irq_idx: 53-55 */ - { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, - { SDE_IRQ_TYPE_RESERVED, 0, 0, 1}, + /* irq_idx: 53-54 */ + { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, 1}, + { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, 1}, + /* irq_idx: 55 */ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0, SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1}, /* irq_idx: 56-59 */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h index 387de73a84b308bbddc4ec5563c33babcec70880..3cd0dfce12f1fb0541d965793687466ef370df4e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h @@ -38,6 +38,7 @@ struct intf_timing_params { u32 border_clr; u32 underflow_clr; u32 hsync_skew; + u32 v_front_porch_fixed; }; struct intf_prog_fetch { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c index b03fc9ea1360eacc9277c277c40b4c7ba7528ed9..608ef1715fe70221598774bf59e50fd47c1a6257 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c @@ -1725,7 +1725,7 @@ void reg_dmav1_setup_vig_gamutv5(struct sde_hw_pipe *ctx, void *cfg) hw_cfg->len, sizeof(struct drm_msm_3d_gamut)); return; } - op_mode = SDE_REG_READ(&ctx->hw, gamut_base); + op_mode = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->gamut_blk.base); payload = hw_cfg->payload; rc = sde_gamut_get_mode_info(SSPP, payload, &tbl_len, &tbl_off, &op_mode, &scale_off); @@ -1896,7 +1896,7 @@ void reg_dmav1_setup_vig_igcv5(struct sde_hw_pipe *ctx, void *cfg) if (!data) return; - reg = SDE_REG_READ(&ctx->hw, igc_base); + reg = SDE_REG_READ(&ctx->hw, ctx->cap->sblk->igc_blk[0].base); lut_enable = (reg >> 8) & BIT(0); lut_sel = (reg >> 9) & BIT(0); /* select LUT table (0 or 1) when 1D LUT is in active mode */ @@ -2527,7 +2527,7 @@ void reg_dmav1_setup_vig_qseed3(struct sde_hw_pipe *ctx, end: if (sspp->layout.format) { - if (SDE_FORMAT_IS_DX(sspp->layout.format)) + if (!SDE_FORMAT_IS_DX(sspp->layout.format)) op_mode |= BIT(14); if (sspp->layout.format->alpha_enable) { op_mode |= BIT(10); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c index 82840e64d12f55626db2906296262fe8bbccaf83..7128f64ba30d348454273f2ec830078843eb7663 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_top.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c @@ -413,6 +413,18 @@ static void sde_hw_intf_audio_select(struct sde_hw_mdp *mdp) SDE_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1); } +static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp, + bool dual, bool dspp_out) +{ + u32 value = dspp_out ? 0x4 : 0x0; + + SDE_REG_WRITE(&mdp->hw, PPB2_CNTL, value); + if (dual) { + value |= 0x1; + SDE_REG_WRITE(&mdp->hw, PPB3_CNTL, value); + } +} + static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, unsigned long cap) { @@ -422,6 +434,7 @@ static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops, ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl; ops->get_danger_status = sde_hw_get_danger_status; ops->setup_vsync_source = sde_hw_setup_vsync_source; + ops->set_cwb_ppb_cntl = sde_hw_program_cwb_ppb_ctrl; ops->get_safe_status = sde_hw_get_safe_status; ops->get_split_flush_status = sde_hw_get_split_flush; ops->setup_dce = sde_hw_setup_dce; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h index 950a62cee8ef1683cc2394a0df68b8206adc6216..210ea5379e3ba17175542a3df803b497af50fc64 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_top.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h @@ -194,6 +194,15 @@ struct sde_hw_mdp_ops { * @mdp: mdp top context driver */ void (*intf_audio_select)(struct sde_hw_mdp *mdp); + + /** + * set_cwb_ppb_cntl - select the data point for CWB + * @mdp: mdp top context driver + * @dual: indicates if dual pipe line needs to be programmed + * @dspp_out : true if dspp output required. LM is default tap point + */ + void (*set_cwb_ppb_cntl)(struct sde_hw_mdp *mdp, + bool dual, bool dspp_out); }; struct sde_hw_mdp { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c index 95edb3b1c61d1075da551a73ce8d070a3317e6f2..92c774867428f6d02c9c17f7d8101ec4c26048c8 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c @@ -55,6 +55,11 @@ #define WB_OUT_IMAGE_SIZE 0x2C0 #define WB_OUT_XY 0x2C4 +#define CWB_CTRL_SRC_SEL 0x0 +#define CWB_CTRL_MODE 0x4 +#define CWB_CTRL_BLK_SIZE 0x100 +#define CWB_CTRL_BASE_OFFSET 0x83000 + /* WB_QOS_CTRL */ #define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0) @@ -78,6 +83,18 @@ static struct sde_wb_cfg *_wb_offset(enum sde_wb wb, return ERR_PTR(-EINVAL); } +static void _sde_hw_cwb_ctrl_init(struct sde_mdss_cfg *m, + void __iomem *addr, struct sde_hw_blk_reg_map *b) +{ + if (b) { + b->base_off = addr; + b->blk_off = CWB_CTRL_BASE_OFFSET; + b->length = CWB_CTRL_BLK_SIZE * m->pingpong_count; + b->hwversion = m->hwversion; + b->log_mask = SDE_DBG_MASK_WB; + } +} + static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *data) { @@ -262,6 +279,23 @@ static void sde_hw_wb_bind_pingpong_blk( SDE_REG_WRITE(c, WB_MUX, mux_cfg); } +static void sde_hw_wb_program_cwb_ctrl(struct sde_hw_wb *ctx, + const enum sde_cwb cur_idx, + const enum sde_cwb data_src, bool dspp_out) +{ + struct sde_hw_blk_reg_map *c; + u32 blk_base; + + if (!ctx) + return; + + c = &ctx->cwb_hw; + blk_base = CWB_CTRL_BLK_SIZE * (cur_idx - CWB_0); + + SDE_REG_WRITE(c, blk_base + CWB_CTRL_SRC_SEL, data_src - CWB_0); + SDE_REG_WRITE(c, blk_base + CWB_CTRL_MODE, dspp_out); +} + static void _setup_wb_ops(struct sde_hw_wb_ops *ops, unsigned long features) { @@ -283,6 +317,9 @@ static void _setup_wb_ops(struct sde_hw_wb_ops *ops, if (test_bit(SDE_WB_INPUT_CTRL, &features)) ops->bind_pingpong_blk = sde_hw_wb_bind_pingpong_blk; + + if (test_bit(SDE_WB_CWB_CTRL, &features)) + ops->program_cwb_ctrl = sde_hw_wb_program_cwb_ctrl; } static struct sde_hw_blk_ops sde_hw_ops = { @@ -330,6 +367,9 @@ struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx, sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, c->hw.blk_off + c->hw.length, c->hw.xin_id); + if (test_bit(SDE_WB_CWB_CTRL, &cfg->features)) + _sde_hw_cwb_ctrl_init(m, addr, &c->cwb_hw); + return c; blk_init_error: diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h index c908ad058d463c33657fe9329fe48fb7f667d198..268d0ef8b3d98b9ad989a4243f3d69da700aab75 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h @@ -137,6 +137,16 @@ struct sde_hw_wb_ops { void (*bind_pingpong_blk)(struct sde_hw_wb *ctx, bool enable, const enum sde_pingpong pp); + + /** + * program_cwb_ctrl - program cwb block configp + * @ctx: Pointer to wb context + * @pp_idx: Current CWB block index to poram + * @data_src: Source CWB/PingPong block index + * @dspp_out: Tap dspp output or default LM output + */ + void (*program_cwb_ctrl)(struct sde_hw_wb *ctx, const enum sde_cwb cwb, + const enum sde_cwb data_src, bool dspp_out); }; /** @@ -149,6 +159,7 @@ struct sde_hw_wb_ops { * @wb_hw_caps: hardware capabilities * @ops: function pointers * @hw_mdp: MDP top level hardware block + * @cwb_hw: CWB control hwio details */ struct sde_hw_wb { struct sde_hw_blk base; @@ -164,6 +175,7 @@ struct sde_hw_wb { struct sde_hw_wb_ops ops; struct sde_hw_mdp *hw_mdp; + struct sde_hw_blk_reg_map cwb_hw; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h index cc020d993def901f5150bb850f0109ded240798c..a59222350a11f03cdfa2a38379ea82e5ccf06223 100644 --- a/drivers/gpu/drm/msm/sde/sde_hwio.h +++ b/drivers/gpu/drm/msm/sde/sde_hwio.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -40,6 +40,8 @@ #define PPB0_CONFIG 0x334 #define PPB1_CNTL 0x338 #define PPB1_CONFIG 0x33C +#define PPB2_CNTL 0x370 +#define PPB3_CNTL 0x374 #define HW_EVENTS_CTL 0x37C #define CLK_CTRL3 0x3A8 #define CLK_STATUS3 0x3AC diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 95752d1b0313ea9e6b2fce2d7fbf47479225643b..83b6fb96be25f50721c7667c05a9631f2e0e12f9 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1096,6 +1096,8 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms, SDE_ERROR("wait for commit done returned %d\n", ret); break; } + + sde_crtc_complete_flip(crtc, NULL); } } @@ -1261,6 +1263,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .enable_event = dsi_conn_enable_event, .cmd_transfer = dsi_display_cmd_transfer, .cont_splash_config = dsi_display_cont_splash_config, + .get_panel_vfp = dsi_display_get_panel_vfp, }; static const struct sde_connector_ops wb_ops = { .post_init = sde_wb_connector_post_init, @@ -1275,6 +1278,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .check_status = NULL, .cmd_transfer = NULL, .cont_splash_config = NULL, + .get_panel_vfp = NULL, }; static const struct sde_connector_ops dp_ops = { .post_init = dp_connector_post_init, @@ -1288,6 +1292,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .config_hdr = dp_connector_config_hdr, .cmd_transfer = NULL, .cont_splash_config = NULL, + .get_panel_vfp = NULL, }; struct msm_display_info info; struct drm_encoder *encoder; @@ -1799,8 +1804,8 @@ int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only) aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_aspace_domain_attach_detach_update(aspace, false); aspace->domain_attached = true; + msm_gem_aspace_domain_attach_detach_update(aspace, false); } return 0; @@ -1928,8 +1933,9 @@ static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file) struct drm_modeset_acquire_ctx ctx; int ret = 0; + /* cancel pending flip event */ for (i = 0; i < priv->num_crtcs; i++) - sde_crtc_cancel_pending_flip(priv->crtcs[i], file); + sde_crtc_complete_flip(priv->crtcs[i], file); drm_modeset_acquire_init(&ctx, 0); retry: @@ -2225,6 +2231,7 @@ static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file) struct drm_connector *connector = NULL; struct drm_connector_list_iter conn_iter; struct sde_connector *sde_conn = NULL; + int i; if (!kms) { SDE_ERROR("invalid kms\n"); @@ -2242,6 +2249,18 @@ static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file) if (!dev->mode_config.poll_enabled) return; + /* init external dsi bridge here to make sure ext bridge is probed*/ + for (i = 0; i < sde_kms->dsi_display_count; ++i) { + struct dsi_display *dsi_display; + + dsi_display = sde_kms->dsi_displays[i]; + if (dsi_display->bridge) { + dsi_display_drm_ext_bridge_init(dsi_display, + dsi_display->bridge->base.encoder, + dsi_display->drm_conn); + } + } + mutex_lock(&dev->mode_config.mutex); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { @@ -2340,7 +2359,7 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms) * configuration. */ if (conn_iter && - conn_iter->encoder_ids[0] == encoder->base.id) { + (conn_iter->encoder_ids[0] == encoder->base.id)) { connector = conn_iter; break; } @@ -3017,6 +3036,32 @@ static int sde_kms_hw_init(struct msm_kms *kms) sde_kms->splash_data.resource_handoff_pending = true; + /* initialize power domain if defined */ + if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) { + sde_kms->genpd.name = dev->unique; + sde_kms->genpd.power_off = sde_kms_pd_disable; + sde_kms->genpd.power_on = sde_kms_pd_enable; + + rc = pm_genpd_init(&sde_kms->genpd, NULL, true); + if (rc < 0) { + SDE_ERROR("failed to init genpd provider %s: %d\n", + sde_kms->genpd.name, rc); + goto genpd_err; + } + + rc = of_genpd_add_provider_simple(dev->dev->of_node, + &sde_kms->genpd); + if (rc < 0) { + SDE_ERROR("failed to add genpd provider %s: %d\n", + sde_kms->genpd.name, rc); + pm_genpd_remove(&sde_kms->genpd); + goto genpd_err; + } + + sde_kms->genpd_init = true; + SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name); + } + rc = _sde_kms_mmu_init(sde_kms); if (rc) { SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc); @@ -3125,32 +3170,6 @@ static int sde_kms_hw_init(struct msm_kms *kms) SDE_POWER_EVENT_PRE_DISABLE, sde_kms_handle_power_event, sde_kms, "kms"); - /* initialize power domain if defined */ - if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) { - sde_kms->genpd.name = dev->unique; - sde_kms->genpd.power_off = sde_kms_pd_disable; - sde_kms->genpd.power_on = sde_kms_pd_enable; - - rc = pm_genpd_init(&sde_kms->genpd, NULL, true); - if (rc < 0) { - SDE_ERROR("failed to init genpd provider %s: %d\n", - sde_kms->genpd.name, rc); - goto genpd_err; - } - - rc = of_genpd_add_provider_simple(dev->dev->of_node, - &sde_kms->genpd); - if (rc < 0) { - SDE_ERROR("failed to add genpd provider %s: %d\n", - sde_kms->genpd.name, rc); - pm_genpd_remove(&sde_kms->genpd); - goto genpd_err; - } - - sde_kms->genpd_init = true; - SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name); - } - if (sde_kms->splash_data.cont_splash_en) { SDE_DEBUG("Skipping MDP Resources disable\n"); } else { diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index e0e9d10c5d98397ae7be92bd42c1ed74c9974bbe..627f41f2cb0b213ca2dafd00ca6f57236d2a380c 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1077,23 +1077,26 @@ static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde, } static void _sde_plane_setup_scaler3(struct sde_plane *psde, - struct sde_plane_state *pstate, - uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h, - struct sde_hw_scaler3_cfg *scale_cfg, - const struct sde_format *fmt, + struct sde_plane_state *pstate, const struct sde_format *fmt, uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v) { - uint32_t decimated, i; - - if (!psde || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h || - !chroma_subsmpl_v) { - SDE_ERROR( - "psde %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n", - !!psde, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h, - chroma_subsmpl_v); + uint32_t decimated, i, src_w, src_h, dst_w, dst_h; + struct sde_hw_scaler3_cfg *scale_cfg; + + if (!psde || !pstate || !fmt || + !chroma_subsmpl_h || !chroma_subsmpl_v) { + SDE_ERROR("psde %d pstate %d fmt %d smp_h %d smp_v %d\n", + !!psde, !!pstate, !!fmt, chroma_subsmpl_h, + chroma_subsmpl_v); return; } + scale_cfg = &pstate->scaler3_cfg; + src_w = psde->pipe_cfg.src_rect.w; + src_h = psde->pipe_cfg.src_rect.h; + dst_w = psde->pipe_cfg.dst_rect.w; + dst_h = psde->pipe_cfg.dst_rect.h; + memset(scale_cfg, 0, sizeof(*scale_cfg)); memset(&pstate->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext)); @@ -1143,6 +1146,12 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde, && (src_w == dst_w)) || pstate->multirect_mode) return; + SDE_DEBUG_PLANE(psde, + "setting bilinear: src:%dx%d dst:%dx%d chroma:%dx%d fmt:%x\n", + src_w, src_h, dst_w, dst_h, + chroma_subsmpl_v, chroma_subsmpl_h, + fmt->base.pixel_format); + scale_cfg->dst_width = dst_w; scale_cfg->dst_height = dst_h; scale_cfg->y_rgb_filter_cfg = SDE_SCALE_BIL; @@ -1499,14 +1508,18 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde, else rc = -EINVAL; if (rc || pstate->scaler_check_state != - SDE_PLANE_SCLCHECK_SCALER_V2) { - /* calculate default config for QSEED3 */ - _sde_plane_setup_scaler3(psde, pstate, + SDE_PLANE_SCLCHECK_SCALER_V2) { + SDE_EVT32(DRMID(&psde->base), color_fill, + pstate->scaler_check_state, + psde->debugfs_default_scale, rc, psde->pipe_cfg.src_rect.w, psde->pipe_cfg.src_rect.h, psde->pipe_cfg.dst_rect.w, psde->pipe_cfg.dst_rect.h, - &pstate->scaler3_cfg, fmt, + pstate->multirect_mode); + + /* calculate default config for QSEED3 */ + _sde_plane_setup_scaler3(psde, pstate, fmt, chroma_subsmpl_h, chroma_subsmpl_v); } } else if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 || @@ -3454,6 +3467,25 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, src_w, src_h); return -EINVAL; } + + /* + * SSPP fetch , unpack output and QSEED3 input lines need + * to match for Y plane + */ + if (i == 0 && + (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) & + BIT(SDE_DRM_DEINTERLACE)) && + ((pstate->scaler3_cfg.src_height[i] != (src_h/2)) || + (pstate->pixel_ext.roi_h[i] != (src_h/2)))) { + SDE_ERROR_PLANE(psde, + "de-interlace fail roi[%d] %d/%d, src %dx%d, src %dx%d\n", + i, pstate->pixel_ext.roi_w[i], + pstate->pixel_ext.roi_h[i], + pstate->scaler3_cfg.src_width[i], + pstate->scaler3_cfg.src_height[i], + src_w, src_h); + return -EINVAL; + } } pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2; @@ -3560,17 +3592,16 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane, ret = -EINVAL; /* decimation validation */ - } else if (deci_w || deci_h) { - if ((deci_w > psde->pipe_sblk->maxhdeciexp) || - (deci_h > psde->pipe_sblk->maxvdeciexp)) { - SDE_ERROR_PLANE(psde, - "too much decimation requested\n"); - ret = -EINVAL; - } else if (fmt->fetch_mode != SDE_FETCH_LINEAR) { - SDE_ERROR_PLANE(psde, - "decimation requires linear fetch\n"); - ret = -EINVAL; - } + } else if ((deci_w || deci_h) + && ((deci_w > psde->pipe_sblk->maxhdeciexp) + || (deci_h > psde->pipe_sblk->maxvdeciexp))) { + SDE_ERROR_PLANE(psde, "too much decimation requested\n"); + ret = -EINVAL; + + } else if ((deci_w || deci_h) + && (fmt->fetch_mode != SDE_FETCH_LINEAR)) { + SDE_ERROR_PLANE(psde, "decimation requires linear fetch\n"); + ret = -EINVAL; } else if (!(psde->features & SDE_SSPP_SCALER) && ((src.w != dst.w) || (src.h != dst.h))) { diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c index 92734565d80b23b8b296ecc78ac759742d857756..82891477c403181ea5c6ce8bdbc10be887ddc0ab 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.c +++ b/drivers/gpu/drm/msm/sde/sde_rm.c @@ -421,7 +421,7 @@ int sde_rm_init(struct sde_rm *rm, void __iomem *mmio, struct drm_device *dev) { - int rc, i; + int i, rc = 0; enum sde_hw_blk_type type; if (!rm || !cat || !mmio || !dev) { diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c index 788884547c25e236444fa4d76ea9ec06d6b55005..575d767856610f9fd174b68c5ba7335421ef30b6 100644 --- a/drivers/gpu/drm/msm/sde/sde_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_wb.c @@ -818,6 +818,7 @@ static struct platform_driver sde_wb_driver = { .driver = { .name = "sde_wb", .of_match_table = dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index 80049ce0c9eb50070ba03f354a898923b38616d8..b4cdaf65e586c944efb88eb751d68bbaf5f5fa88 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -847,6 +847,68 @@ void sde_power_resource_deinit(struct platform_device *pdev, sde_rsc_client_destroy(phandle->rsc_client); } + +int sde_power_scale_reg_bus(struct sde_power_handle *phandle, + struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock) +{ + struct sde_power_client *client; + int rc = 0; + u32 max_usecase_ndx = VOTE_INDEX_DISABLE; + + if (!skip_lock) { + mutex_lock(&phandle->phandle_lock); + + if (WARN_ON(pclient->refcount == 0)) { + /* + * This is not expected, clients calling without skip + * lock are outside the power resource enable, which + * means that they should have enabled the power + * resource before trying to scale. + */ + rc = -EINVAL; + goto exit; + } + } + + pr_debug("%pS: current idx:%d requested:%d client:%d\n", + __builtin_return_address(0), pclient->usecase_ndx, + usecase_ndx, pclient->id); + + pclient->usecase_ndx = usecase_ndx; + + list_for_each_entry(client, &phandle->power_client_clist, list) { + if (client->usecase_ndx < VOTE_INDEX_MAX && + client->usecase_ndx > max_usecase_ndx) + max_usecase_ndx = client->usecase_ndx; + } + + rc = sde_power_reg_bus_update(phandle->reg_bus_hdl, + max_usecase_ndx); + if (rc) + pr_err("failed to set reg bus vote rc=%d\n", rc); + +exit: + if (!skip_lock) + mutex_unlock(&phandle->phandle_lock); + + return rc; +} + +static inline bool _resource_changed(u32 current_usecase_ndx, + u32 max_usecase_ndx) +{ + WARN_ON((current_usecase_ndx >= VOTE_INDEX_MAX) + || (max_usecase_ndx >= VOTE_INDEX_MAX)); + + if (((current_usecase_ndx >= VOTE_INDEX_LOW) && /* enabled */ + (max_usecase_ndx == VOTE_INDEX_DISABLE)) || /* max disabled */ + ((current_usecase_ndx == VOTE_INDEX_DISABLE) && /* disabled */ + (max_usecase_ndx >= VOTE_INDEX_LOW))) /* max enabled */ + return true; + + return false; +} + int sde_power_resource_enable(struct sde_power_handle *phandle, struct sde_power_client *pclient, bool enable) { @@ -880,7 +942,15 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, max_usecase_ndx = client->usecase_ndx; } - if (phandle->current_usecase_ndx != max_usecase_ndx) { + /* + * Check if we need to enable/disable the power resource, we won't + * only-scale up/down the AHB vote in this API; if a client wants to + * bump up the AHB clock above the LOW (default) level, it needs to + * call 'sde_power_scale_reg_bus' with the desired vote after the power + * resource was enabled. + */ + if (_resource_changed(phandle->current_usecase_ndx, + max_usecase_ndx)) { changed = true; prev_usecase_ndx = phandle->current_usecase_ndx; phandle->current_usecase_ndx = max_usecase_ndx; @@ -916,8 +986,8 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, goto vreg_err; } - rc = sde_power_reg_bus_update(phandle->reg_bus_hdl, - max_usecase_ndx); + rc = sde_power_scale_reg_bus(phandle, pclient, + max_usecase_ndx, true); if (rc) { pr_err("failed to set reg bus vote rc=%d\n", rc); goto reg_bus_hdl_err; @@ -948,8 +1018,8 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable); - sde_power_reg_bus_update(phandle->reg_bus_hdl, - max_usecase_ndx); + sde_power_scale_reg_bus(phandle, pclient, + max_usecase_ndx, true); msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable); @@ -970,7 +1040,7 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, clk_err: sde_power_rsc_update(phandle, false); rsc_err: - sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx); + sde_power_scale_reg_bus(phandle, pclient, max_usecase_ndx, true); reg_bus_hdl_err: msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0); vreg_err: diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h index b07779f0be9a5d8a37013fd627455749b3236c47..2c8665f9bb2b109d31adbb0a7c2caa4a80d80cce 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.h +++ b/drivers/gpu/drm/msm/sde_power_handle.h @@ -43,11 +43,15 @@ * mdss_bus_vote_type: register bus vote type * VOTE_INDEX_DISABLE: removes the client vote * VOTE_INDEX_LOW: keeps the lowest vote for register bus + * VOTE_INDEX_MEDIUM: keeps medium vote for register bus + * VOTE_INDEX_HIGH: keeps the highest vote for register bus * VOTE_INDEX_MAX: invalid */ enum mdss_bus_vote_type { VOTE_INDEX_DISABLE, VOTE_INDEX_LOW, + VOTE_INDEX_MEDIUM, + VOTE_INDEX_HIGH, VOTE_INDEX_MAX, }; @@ -227,6 +231,19 @@ void sde_power_client_destroy(struct sde_power_handle *phandle, int sde_power_resource_enable(struct sde_power_handle *pdata, struct sde_power_client *pclient, bool enable); +/** + * sde_power_scale_reg_bus() - Scale the registers bus for the specified client + * @phandle: power handle containing the resources + * @pclient: client information to scale its vote + * @usecase_ndx: new use case to scale the reg bus + * @skip_lock: will skip holding the power rsrc mutex during the call, this is + * for internal callers that already hold this required lock. + * + * Return: error code. + */ +int sde_power_scale_reg_bus(struct sde_power_handle *phandle, + struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock); + /** * sde_power_resource_is_enabled() - return true if power resource is enabled * @pdata: power handle containing the resources diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index 0fa9758e72ce6e0bc8488f4b74c377a964935fcd..ecd2fcb76e81f03d88677dfc56e3876a1f5459e5 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -1472,6 +1472,7 @@ static struct platform_driver sde_rsc_platform_driver = { .driver = { .name = "sde_rsc", .of_match_table = dt_match, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f169348da377ed2798cd604c443ad37f79921470..ef3731d2f2e79e96f0ece5e4055839f79d5d0e85 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -634,7 +634,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index c3453f3bd603f49d95ac11423185753870aa887f..1359bf50598f4d0ae79c81734bb149825655394a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -926,8 +926,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { const struct hdmi4_features *features; struct resource *res; + const struct soc_device_attribute *soc; - features = soc_device_match(hdmi4_soc_devices)->data; + soc = soc_device_match(hdmi4_soc_devices); + if (!soc) + return -ENODEV; + + features = soc->data; core->cts_swmode = features->cts_swmode; core->audio_use_mclk = features->audio_use_mclk; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index b3221ca5bcd8413852ea1f0df3570adcb6d14337..26db0ce7a085538c118e53dce5a10d7152ffba3d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -660,7 +660,7 @@ static int hdmi_audio_config(struct device *dev, struct omap_dss_audio *dss_audio) { struct omap_hdmi *hd = dev_get_drvdata(dev); - int ret; + int ret = 0; mutex_lock(&hd->lock); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index aa5ba9ae2191c15518ee1aa326165793653912e1..556335ecb2b77b7fb1dc9d2e19bab577d7405176 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -123,6 +123,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) if (dssdrv->read_edid) { void *edid = kzalloc(MAX_EDID, GFP_KERNEL); + if (!edid) + return 0; + if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && drm_edid_is_valid(edid)) { drm_mode_connector_update_edid_property( @@ -141,6 +144,9 @@ static int omap_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = drm_mode_create(dev); struct videomode vm = {0}; + if (!mode) + return 0; + dssdrv->get_timings(dssdev, &vm); drm_display_mode_from_videomode(&vm, mode); @@ -196,6 +202,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector, if (!r) { /* check if vrefresh is still valid */ new_mode = drm_mode_duplicate(dev, mode); + + if (!new_mode) + return MODE_BAD; + new_mode->clock = vm.pixelclock / 1000; new_mode->vrefresh = 0; if (mode->vrefresh == drm_mode_vrefresh(new_mode)) diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index fd05f7e9f43fb3c198bf959def25500629ffc55f..df05fe53c399b478228a0a8f7b5845babca79647 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -389,12 +389,16 @@ int tiler_unpin(struct tiler_block *block) struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h, uint16_t align) { - struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL); + struct tiler_block *block; u32 min_align = 128; int ret; unsigned long flags; u32 slot_bytes; + block = kzalloc(sizeof(*block), GFP_KERNEL); + if (!block) + return ERR_PTR(-ENOMEM); + BUG_ON(!validfmt(fmt)); /* convert width/height to slots */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index c10fdfc0930f7fae6a06c28f70724316de329896..1cd39507b6348c270a93d8fb04d41df3d1cb4ee6 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -92,7 +92,7 @@ static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset, { int i; unsigned long index; - bool area_free; + bool area_free = false; unsigned long slots_per_band = PAGE_SIZE / slot_bytes; unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0; unsigned long curr_bit = bit_offset; diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index f461bc1649d3e0fb41a5d4409c9c5b02737e611b..e84ee9a2b727a5bb37ec16cd84ec561792162671 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -379,6 +379,27 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .gpmu_major = 0x1, .gpmu_minor = 0x003, }, + { + .gpurev = ADRENO_REV_A640, + .core = 6, + .major = 4, + .minor = 0, + .patchid = 0, + .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | + ADRENO_CONTENT_PROTECTION, + .sqefw_name = "a630_sqe.fw", + .zap_name = "a640_zap", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = SZ_1M, //Verified 1MB + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a640_gmu.bin", + .gpmu_major = 0x2, + .gpmu_minor = 0x000, + .gpmu_tsens = 0x000C000D, + .max_power = 5448, + .va_padding = SZ_64K, + }, { .gpurev = ADRENO_REV_A640, .core = 6, @@ -386,7 +407,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 0, .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU | - ADRENO_IFPC | ADRENO_CONTENT_PROTECTION, + ADRENO_CONTENT_PROTECTION, .sqefw_name = "a630_sqe.fw", .zap_name = "a640_zap", .gpudev = &adreno_a6xx_gpudev, @@ -419,4 +440,17 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .gpmu_tsens = 0x000C000D, .max_power = 5448, }, + { + .gpurev = ADRENO_REV_A608, + .core = 6, + .major = 0, + .minor = 8, + .patchid = ANY_ID, + .features = ADRENO_64BIT, + .sqefw_name = "a630_sqe.fw", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = (SZ_128K + SZ_4K), + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + }, }; diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 70e51e50187c6aa6d4af738edbfb84dd96a3c93a..9e42b25cf0e479883c1bf2a6433dbc7faaac2018 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1832,16 +1832,16 @@ static int _adreno_start(struct adreno_device *adreno_dev) } /* Send OOB request to turn on the GX */ - if (gmu_dev_ops->oob_set) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) { status = gmu_dev_ops->oob_set(adreno_dev, oob_gpu); if (status) goto error_mmu_off; } - if (gmu_dev_ops->hfi_start_msg) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, hfi_start_msg)) { status = gmu_dev_ops->hfi_start_msg(adreno_dev); if (status) - goto error_mmu_off; + goto error_oob_clear; } /* Enable 64 bit gpu addr if feature is set */ @@ -1967,6 +1967,17 @@ static int _adreno_start(struct adreno_device *adreno_dev) } } + if (gmu_core_isenabled(device) && adreno_dev->perfctr_ifpc_lo == 0) { + ret = adreno_perfcounter_get(adreno_dev, + KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 4, + &adreno_dev->perfctr_ifpc_lo, NULL, + PERFCOUNTER_FLAG_KERNEL); + if (ret) { + WARN_ONCE(1, "Unable to get perf counter for IFPC\n"); + adreno_dev->perfctr_ifpc_lo = 0; + } + } + /* Clear the busy_data stats - we're starting over from scratch */ adreno_dev->busy_data.gpu_busy = 0; adreno_dev->busy_data.bif_ram_cycles = 0; @@ -1975,6 +1986,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0; adreno_dev->busy_data.bif_starved_ram = 0; adreno_dev->busy_data.bif_starved_ram_ch1 = 0; + adreno_dev->busy_data.num_ifpc = 0; /* Restore performance counter registers with saved values */ adreno_perfcounter_restore(adreno_dev); @@ -2016,7 +2028,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) pmqos_active_vote); /* Send OOB request to allow IFPC */ - if (gmu_dev_ops->oob_clear) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) { gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); /* If we made it this far, the BOOT OOB was sent to the GMU */ @@ -2027,7 +2039,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) return 0; error_oob_clear: - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); error_mmu_off: @@ -2080,9 +2092,9 @@ static int adreno_stop(struct kgsl_device *device) return 0; /* Turn the power on one last time before stopping */ - if (gmu_dev_ops->oob_set) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) { error = gmu_dev_ops->oob_set(adreno_dev, oob_gpu); - if (error) { + if (error && GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) { gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); if (gmu_core_regulator_isenabled(device)) { /* GPU is on. Try recovery */ @@ -2116,7 +2128,7 @@ static int adreno_stop(struct kgsl_device *device) /* Save physical performance counter values before GPU power down*/ adreno_perfcounter_save(adreno_dev); - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); /* @@ -2125,7 +2137,7 @@ static int adreno_stop(struct kgsl_device *device) * GMU to return to the lowest idle level. This is * because some idle level transitions require VBIF and MMU. */ - if (!error && gmu_dev_ops->wait_for_lowest_idle && + if (!error && GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_lowest_idle) && gmu_dev_ops->wait_for_lowest_idle(adreno_dev)) { gmu_core_setbit(device, GMU_FAULT); @@ -2206,13 +2218,17 @@ int adreno_reset(struct kgsl_device *device, int fault) /* since device is officially off now clear start bit */ clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv); - /* Keep trying to start the device until it works */ - for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) { - ret = adreno_start(device, 0); - if (!ret) - break; + /* Try to reset the device */ + ret = adreno_start(device, 0); - msleep(20); + /* On some GPUS, keep trying until it works */ + if (ret && ADRENO_GPUREV(adreno_dev) < 600) { + for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) { + msleep(20); + ret = adreno_start(device, 0); + if (!ret) + break; + } } } if (ret) @@ -2777,7 +2793,7 @@ int adreno_soft_reset(struct kgsl_device *device) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); int ret; - if (gmu_dev_ops->oob_set) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) { ret = gmu_dev_ops->oob_set(adreno_dev, oob_gpu); if (ret) return ret; @@ -2801,7 +2817,7 @@ int adreno_soft_reset(struct kgsl_device *device) else ret = _soft_reset(adreno_dev); if (ret) { - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); return ret; } @@ -2855,7 +2871,7 @@ int adreno_soft_reset(struct kgsl_device *device) /* Restore physical performance counter values after soft reset */ adreno_perfcounter_restore(adreno_dev); - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_gpu); return ret; @@ -3111,9 +3127,50 @@ static void adreno_regwrite(struct kgsl_device *device, __raw_writel(value, reg); } +/** + * adreno_gmu_clear_and_unmask_irqs() - Clear pending IRQs and Unmask IRQs + * @adreno_dev: Pointer to the Adreno device that owns the GMU + */ +void adreno_gmu_clear_and_unmask_irqs(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); + + /* Clear any pending IRQs before unmasking on GMU */ + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR, + 0xFFFFFFFF); + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, + 0xFFFFFFFF); + + /* Unmask needed IRQs on GMU */ + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, + (unsigned int) ~(gmu_dev_ops->gmu2host_intr_mask)); + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, + (unsigned int) ~(gmu_dev_ops->gmu_ao_intr_mask)); +} + +/** + * adreno_gmu_mask_and_clear_irqs() - Mask all IRQs and clear pending IRQs + * @adreno_dev: Pointer to the Adreno device that owns the GMU + */ +void adreno_gmu_mask_and_clear_irqs(struct adreno_device *adreno_dev) +{ + /* Mask all IRQs on GMU */ + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, + 0xFFFFFFFF); + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, + 0xFFFFFFFF); + + /* Clear any pending IRQs before disabling */ + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, + 0xFFFFFFFF); + adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR, + 0xFFFFFFFF); +} + /* * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled - * @adreno_dev: Pointer to the Adreno device device that owns the GMU + * @adreno_dev: Pointer to the Adreno device that owns the GMU * @offset: 32bit register enum that is to be written * @val: The value to be written to the register * @fence_mask: The value to poll the fence status register @@ -3162,7 +3219,7 @@ unsigned int adreno_gmu_ifpc_show(struct adreno_device *adreno_dev) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS( KGSL_DEVICE(adreno_dev)); - if (gmu_dev_ops->ifpc_show) + if (GMU_DEV_OP_VALID(gmu_dev_ops, ifpc_show)) return gmu_dev_ops->ifpc_show(adreno_dev); return 0; @@ -3173,7 +3230,7 @@ int adreno_gmu_ifpc_store(struct adreno_device *adreno_dev, unsigned int val) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS( KGSL_DEVICE(adreno_dev)); - if (gmu_dev_ops->ifpc_store) + if (GMU_DEV_OP_VALID(gmu_dev_ops, ifpc_store)) return gmu_dev_ops->ifpc_store(adreno_dev, val); return -EINVAL; @@ -3456,6 +3513,17 @@ static void adreno_power_stats(struct kgsl_device *device, stats->ram_time = ram_cycles; stats->ram_wait = starved_ram; } + + if (adreno_dev->perfctr_ifpc_lo != 0) { + uint32_t num_ifpc; + + num_ifpc = counter_delta(device, adreno_dev->perfctr_ifpc_lo, + &busy->num_ifpc); + adreno_dev->ifpc_count += num_ifpc; + if (num_ifpc > 0) + trace_adreno_ifpc_count(adreno_dev->ifpc_count); + } + if (adreno_dev->lm_threshold_count && gpudev->count_throttles) gpudev->count_throttles(adreno_dev, adj); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 952e48b53006235ae12617af711faf76e401060a..dea6286e2d9cf04af1726d268d37ca83e6ef8263 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -171,8 +171,8 @@ #define KGSL_END_OF_PROFILE_IDENTIFIER 0x2DEFADE2 #define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA -/* Number of times to try hard reset */ -#define NUM_TIMES_RESET_RETRY 5 +/* Number of times to try hard reset for pre-a6xx GPUs */ +#define NUM_TIMES_RESET_RETRY 4 /* Number of times to poll the AHB fence in ISR */ #define FENCE_RETRY_MAX 100 @@ -210,6 +210,7 @@ enum adreno_gpurev { ADRENO_REV_A512 = 512, ADRENO_REV_A530 = 530, ADRENO_REV_A540 = 540, + ADRENO_REV_A608 = 608, ADRENO_REV_A615 = 615, ADRENO_REV_A630 = 630, ADRENO_REV_A640 = 640, @@ -306,6 +307,7 @@ struct adreno_busy_data { unsigned int bif_ram_cycles_write_ch1; unsigned int bif_starved_ram; unsigned int bif_starved_ram_ch1; + unsigned int num_ifpc; unsigned int throttle_cycles[ADRENO_GPMU_THROTTLE_COUNTERS]; }; @@ -443,6 +445,7 @@ enum gpu_coresight_sources { * stall cycles in case of GBIF) * @starved_ram_lo_ch1: Number of cycles GBIF is stalled by DDR channel 1 * @perfctr_pwr_lo: GPU busy cycles + * @perfctr_ifpc_lo: IFPC count * @halt: Atomic variable to check whether the GPU is currently halted * @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers * @ctx_d_debugfs: Context debugfs node @@ -460,6 +463,7 @@ enum gpu_coresight_sources { * @lm_limit: limiting value for LM * @lm_threshold_count: register value for counter for lm threshold breakin * @lm_threshold_cross: number of current peaks exceeding threshold + * @ifpc_count: Number of times the GPU went into IFPC * @speed_bin: Indicate which power level set to use * @csdev: Pointer to a coresight device (if applicable) * @gpmu_throttle_counters - counteers for number of throttled clocks @@ -509,6 +513,7 @@ struct adreno_device { unsigned int starved_ram_lo; unsigned int starved_ram_lo_ch1; unsigned int perfctr_pwr_lo; + unsigned int perfctr_ifpc_lo; atomic_t halt; atomic_t pending_irq_refcnt; struct dentry *ctx_d_debugfs; @@ -528,6 +533,7 @@ struct adreno_device { uint32_t lm_limit; uint32_t lm_threshold_count; uint32_t lm_threshold_cross; + uint32_t ifpc_count; unsigned int speed_bin; unsigned int quirks; @@ -1249,6 +1255,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev) ADRENO_GPUREV(adreno_dev) < 700; } +ADRENO_TARGET(a608, ADRENO_REV_A608) ADRENO_TARGET(a615, ADRENO_REV_A615) ADRENO_TARGET(a630, ADRENO_REV_A630) ADRENO_TARGET(a640, ADRENO_REV_A640) @@ -1272,6 +1279,12 @@ static inline int adreno_is_a640v1(struct adreno_device *adreno_dev) (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); } +static inline int adreno_is_a640v2(struct adreno_device *adreno_dev) +{ + return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A640) && + (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); +} + /* * adreno_checkreg_off() - Checks the validity of a register enum * @adreno_dev: Pointer to adreno device @@ -1862,10 +1875,13 @@ static inline int adreno_perfcntr_active_oob_get( if (ret) return ret; - if (gmu_dev_ops->oob_set) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) { ret = gmu_dev_ops->oob_set(adreno_dev, oob_perfcntr); - if (ret) + if (ret) { + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); kgsl_active_count_put(KGSL_DEVICE(adreno_dev)); + } } return ret; @@ -1877,7 +1893,7 @@ static inline void adreno_perfcntr_active_oob_put( struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS( KGSL_DEVICE(adreno_dev)); - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_perfcntr); kgsl_active_count_put(KGSL_DEVICE(adreno_dev)); @@ -1935,7 +1951,8 @@ static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev) if (adreno_has_gbif(adreno_dev)) adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0x0); } - +void adreno_gmu_clear_and_unmask_irqs(struct adreno_device *adreno_dev); +void adreno_gmu_mask_and_clear_irqs(struct adreno_device *adreno_dev); int adreno_gmu_fenced_write(struct adreno_device *adreno_dev, enum adreno_regs offset, unsigned int val, unsigned int fence_mask); diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 4f989127811831030ce7e6ebcf78b03cb902c4d6..25909776d4b0611c8a5e0c81e608494f352e0ddd 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "kgsl.h" #include "adreno.h" @@ -1904,6 +1905,29 @@ int a3xx_microcode_load(struct adreno_device *adreno_dev, return 0; } +static void a3xx_clk_set_options(struct adreno_device *adreno_dev, + const char *name, struct clk *clk, bool on) +{ + if (!adreno_is_a306a(adreno_dev)) + return; + + /* Handle clock settings for GFX PSCBCs */ + if (on) { + if (!strcmp(name, "mem_iface_clk")) { + clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH); + clk_set_flags(clk, CLKFLAG_NORETAIN_MEM); + } else if (!strcmp(name, "core_clk")) { + clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH); + clk_set_flags(clk, CLKFLAG_RETAIN_MEM); + } + } else { + if (!strcmp(name, "core_clk")) { + clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH); + clk_set_flags(clk, CLKFLAG_NORETAIN_MEM); + } + } +} + struct adreno_gpudev adreno_a3xx_gpudev = { .reg_offsets = &a3xx_reg_offsets, .int_bits = a3xx_int_bits, @@ -1924,4 +1948,5 @@ struct adreno_gpudev adreno_a3xx_gpudev = { .start = a3xx_start, .snapshot = a3xx_snapshot, .coresight = {&a3xx_coresight}, + .clk_set_options = a3xx_clk_set_options, }; diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index c38dff8d2046367d0df179b27879772bdefa62ab..908511ee663d0352ebaa8e742f88a1e111ce8559 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -69,6 +69,7 @@ static const struct adreno_vbif_platform a6xx_vbif_platforms[] = { { adreno_is_a615, a615_gbif }, { adreno_is_a640, a640_gbif }, { adreno_is_a680, a640_gbif }, + { adreno_is_a608, a615_gbif }, }; struct kgsl_hwcg_reg { @@ -301,6 +302,58 @@ static const struct kgsl_hwcg_reg a640_hwcg_regs[] = { {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, }; +static const struct kgsl_hwcg_reg a608_hwcg_regs[] = { + {A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF}, + {A6XX_RBBM_CLOCK_HYST_SP0, 0x00000081}, + {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222}, + {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022}, + {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {A6XX_RBBM_ISDB_CNT, 0x00000182}, + {A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, +}; + static const struct { int (*devfunc)(struct adreno_device *adreno_dev); const struct kgsl_hwcg_reg *regs; @@ -310,6 +363,7 @@ static const struct { {adreno_is_a615, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)}, {adreno_is_a640, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, {adreno_is_a680, a640_hwcg_regs, ARRAY_SIZE(a640_hwcg_regs)}, + {adreno_is_a608, a608_hwcg_regs, ARRAY_SIZE(a608_hwcg_regs)}, }; static struct a6xx_protected_regs { @@ -545,6 +599,8 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev) { if (adreno_is_a630(adreno_dev)) return 0x8AA8AA02; + else if (adreno_is_a608(adreno_dev)) + return 0xAAA8AA82; else return 0x8AA8AA82; } @@ -552,7 +608,9 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev) static inline unsigned int __get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev)) + if (adreno_is_a608(adreno_dev)) + return 0x00000022; + else if (adreno_is_a615(adreno_dev)) return 0x00000222; else return 0x00020202; @@ -561,7 +619,9 @@ __get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev) static inline unsigned int __get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev)) + if (adreno_is_a608(adreno_dev)) + return 0x00000011; + else if (adreno_is_a615(adreno_dev)) return 0x00000111; else return 0x00010111; @@ -570,7 +630,9 @@ __get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev) static inline unsigned int __get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev) { - if (adreno_is_a615(adreno_dev)) + if (adreno_is_a608(adreno_dev)) + return 0x00000055; + else if (adreno_is_a615(adreno_dev)) return 0x00000555; else return 0x00005555; @@ -671,7 +733,7 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev) + sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist)); - if (adreno_is_a615(adreno_dev)) { + if (adreno_is_a615(adreno_dev) || adreno_is_a608(adreno_dev)) { for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) { r = &a615_pwrup_reglist[i]; kgsl_regread(KGSL_DEVICE(adreno_dev), @@ -744,20 +806,30 @@ static void a6xx_start(struct adreno_device *adreno_dev) kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4); /* ROQ sizes are twice as big on a640/a680 than on a630 */ - if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) + if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) { kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); - else + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); + } else if (adreno_is_a608(adreno_dev)) { + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x800060); + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); + } else { kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0); - kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); + kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C); + } - /* Setting the mem pool size */ - kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128); + /* For a608 Mem pool size is reduced to 1/4 */ + if (adreno_is_a608(adreno_dev)) + kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 32); + else + kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128); /* Setting the primFifo thresholds values */ if (adreno_is_a640(adreno_dev)) kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x400 << 11)); else if (adreno_is_a680(adreno_dev)) kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x800 << 11)); + else if (adreno_is_a608(adreno_dev)) + kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x100 << 11)); else kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11)); @@ -845,7 +917,7 @@ static void a6xx_start(struct adreno_device *adreno_dev) * 3. HFI * At this point, we are guaranteed all. */ - if (gmu_dev_ops->enable_lm) + if (GMU_DEV_OP_VALID(gmu_dev_ops, enable_lm)) gmu_dev_ops->enable_lm(device); } @@ -1123,6 +1195,44 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev, return a6xx_post_start(adreno_dev); } +/* + * a6xx_sptprac_enable() - Power on SPTPRAC + * @adreno_dev: Pointer to Adreno device + */ +static int a6xx_sptprac_enable(struct adreno_device *adreno_dev) +{ + if (adreno_is_a608(adreno_dev)) + return 0; + + return a6xx_gmu_sptprac_enable(adreno_dev); +} + +/* + * a6xx_sptprac_disable() - Power off SPTPRAC + * @adreno_dev: Pointer to Adreno device + */ +static void a6xx_sptprac_disable(struct adreno_device *adreno_dev) +{ + if (adreno_is_a608(adreno_dev)) + return; + + a6xx_gmu_sptprac_disable(adreno_dev); +} + +/* + * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register + * @adreno_dev - Pointer to adreno_device + * This check should only be performed if the keepalive bit is set or it + * can be guaranteed that the power state of the GPU will remain unchanged + */ +bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev) +{ + if (!adreno_has_sptprac_gdsc(adreno_dev)) + return true; + + return a6xx_gmu_sptprac_is_on(adreno_dev); +} + unsigned int a6xx_set_marker( unsigned int *cmds, enum adreno_cp_marker_type type) { @@ -1226,7 +1336,10 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev) return ret; } - return gmu_dev_ops->load_firmware(device); + if (GMU_DEV_OP_VALID(gmu_dev_ops, load_firmware)) + return gmu_dev_ops->load_firmware(device); + + return 0; } static int a6xx_soft_reset(struct adreno_device *adreno_dev) @@ -1271,7 +1384,7 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev) /* Clear GBIF client halt and CX arbiter halt */ adreno_deassert_gbif_halt(adreno_dev); - a6xx_gmu_sptprac_enable(adreno_dev); + a6xx_sptprac_enable(adreno_dev); return 0; } @@ -2813,8 +2926,8 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .platform_setup = a6xx_platform_setup, .init = a6xx_init, .rb_start = a6xx_rb_start, - .regulator_enable = a6xx_gmu_sptprac_enable, - .regulator_disable = a6xx_gmu_sptprac_disable, + .regulator_enable = a6xx_sptprac_enable, + .regulator_disable = a6xx_sptprac_disable, .perfcounters = &a6xx_perfcounters, .enable_pwr_counters = a6xx_enable_pwr_counters, .count_throttles = a6xx_count_throttles, @@ -2836,7 +2949,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .preemption_context_init = a6xx_preemption_context_init, .preemption_context_destroy = a6xx_preemption_context_destroy, .gx_is_on = a6xx_gmu_gx_is_on, - .sptprac_is_on = a6xx_gmu_sptprac_is_on, + .sptprac_is_on = a6xx_sptprac_is_on, .ccu_invalidate = a6xx_ccu_invalidate, .perfcounter_update = a6xx_perfcounter_update, .coresight = {&a6xx_coresight, &a6xx_coresight_cx}, diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index c40dcccf0bf8981ebc023198b5ba4acce92d916d..7d4f9b895c4d640bfb796f85770e156d76a092cc 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -106,6 +106,68 @@ struct cpu_gpu_lock { #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F)) +/* + * timed_poll_check() - polling *gmu* register at given offset until + * its value changed to match expected value. The function times + * out and returns after given duration if register is not updated + * as expected. + * + * @device: Pointer to KGSL device + * @offset: Register offset + * @expected_ret: expected register value that stops polling + * @timout: number of jiffies to abort the polling + * @mask: bitmask to filter register value to match expected_ret + */ +static inline int timed_poll_check(struct kgsl_device *device, + unsigned int offset, unsigned int expected_ret, + unsigned int timeout, unsigned int mask) +{ + unsigned long t; + unsigned int value; + + t = jiffies + msecs_to_jiffies(timeout); + + do { + gmu_core_regread(device, offset, &value); + if ((value & mask) == expected_ret) + return 0; + /* Wait 100us to reduce unnecessary AHB bus traffic */ + usleep_range(10, 100); + } while (!time_after(jiffies, t)); + + /* Double check one last time */ + gmu_core_regread(device, offset, &value); + if ((value & mask) == expected_ret) + return 0; + + return -ETIMEDOUT; +} + +/* + * read_AO_counter() - Returns the 64bit always on counter value + * + * @device: Pointer to KGSL device + */ +static inline uint64_t read_AO_counter(struct kgsl_device *device) +{ + unsigned int l, h, h1; + + gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h); + gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l); + gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1); + + /* + * If there's no change in COUNTER_H we have no overflow so return, + * otherwise read COUNTER_L again + */ + + if (h == h1) + return (uint64_t) l | ((uint64_t) h << 32); + + gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l); + return (uint64_t) l | ((uint64_t) h1 << 32); +} + /* Preemption functions */ void a6xx_preemption_trigger(struct adreno_device *adreno_dev); void a6xx_preemption_schedule(struct adreno_device *adreno_dev); @@ -131,8 +193,6 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot); void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot); -void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, - struct kgsl_snapshot *snapshot); void a6xx_crashdump_init(struct adreno_device *adreno_dev); int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev); void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev); diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index d3f36cfe7c69e2324d07f7d259641672a0b74ac9..8b5b797d014d80c971478a79c39a0d57db4287d8 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -11,9 +11,8 @@ * */ #include -#include -#include #include +#include #include "kgsl_gmu_core.h" #include "kgsl_gmu.h" @@ -171,48 +170,13 @@ static void _load_gmu_rpmh_ucode(struct kgsl_device *device) wmb(); } +/* GMU timeouts */ +#define GMU_IDLE_TIMEOUT 100 /* ms */ #define GMU_START_TIMEOUT 100 /* ms */ #define GPU_START_TIMEOUT 100 /* ms */ #define GPU_RESET_TIMEOUT 1 /* ms */ #define GPU_RESET_TIMEOUT_US 10 /* us */ -/* - * timed_poll_check() - polling *gmu* register at given offset until - * its value changed to match expected value. The function times - * out and returns after given duration if register is not updated - * as expected. - * - * @device: Pointer to KGSL device - * @offset: Register offset - * @expected_ret: expected register value that stops polling - * @timout: number of jiffies to abort the polling - * @mask: bitmask to filter register value to match expected_ret - */ -static int timed_poll_check(struct kgsl_device *device, - unsigned int offset, unsigned int expected_ret, - unsigned int timeout, unsigned int mask) -{ - unsigned long t; - unsigned int value; - - t = jiffies + msecs_to_jiffies(timeout); - - do { - gmu_core_regread(device, offset, &value); - if ((value & mask) == expected_ret) - return 0; - /* Wait 100us to reduce unnecessary AHB bus traffic */ - usleep_range(10, 100); - } while (!time_after(jiffies, t)); - - /* Double check one last time */ - gmu_core_regread(device, offset, &value); - if ((value & mask) == expected_ret) - return 0; - - return -EINVAL; -} - /* * The lowest 16 bits of this value are the number of XO clock cycles * for main hysteresis. This is the first hysteresis. Here we set it @@ -334,26 +298,6 @@ static int a6xx_gmu_hfi_start(struct kgsl_device *device) return 0; } -static uint64_t read_AO_counter(struct kgsl_device *device) -{ - unsigned int l, h, h1; - - gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h); - gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l); - gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1); - - /* - * If there's no change in COUNTER_H we have no overflow so return, - * otherwise read COUNTER_L again - */ - - if (h == h1) - return (uint64_t) l | ((uint64_t) h << 32); - - gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l); - return (uint64_t) l | ((uint64_t) h1 << 32); -} - static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device) { struct gmu_device *gmu = KGSL_GMU_DEVICE(device); @@ -621,6 +565,32 @@ static inline void a6xx_gmu_oob_clear(struct adreno_device *adreno_dev, trace_kgsl_gmu_oob_clear(clear); } +static void a6xx_gmu_irq_enable(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct kgsl_hfi *hfi = &gmu->hfi; + + /* Clear pending IRQs and Unmask needed IRQs */ + adreno_gmu_clear_and_unmask_irqs(ADRENO_DEVICE(device)); + + /* Enable all IRQs on host */ + enable_irq(hfi->hfi_interrupt_num); + enable_irq(gmu->gmu_interrupt_num); +} + +static void a6xx_gmu_irq_disable(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct kgsl_hfi *hfi = &gmu->hfi; + + /* Disable all IRQs on host */ + disable_irq(gmu->gmu_interrupt_num); + disable_irq(hfi->hfi_interrupt_num); + + /* Mask all IRQs and clear pending IRQs */ + adreno_gmu_mask_and_clear_irqs(ADRENO_DEVICE(device)); +} + static int a6xx_gmu_hfi_start_msg(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -701,10 +671,8 @@ int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev) struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - if (!gmu_core_gpmu_isenabled(device)) - return -EINVAL; - - if (!adreno_has_sptprac_gdsc(adreno_dev)) + if (!gmu_core_gpmu_isenabled(device) || + !adreno_has_sptprac_gdsc(adreno_dev)) return 0; gmu_core_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, @@ -731,10 +699,8 @@ void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev) struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - if (!adreno_has_sptprac_gdsc(adreno_dev)) - return; - - if (!gmu_core_gpmu_isenabled(device)) + if (!gmu_core_gpmu_isenabled(device) || + !adreno_has_sptprac_gdsc(adreno_dev)) return; /* Ensure that retention is on */ @@ -786,7 +752,7 @@ bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev) struct kgsl_device *device = KGSL_DEVICE(adreno_dev); unsigned int val; - if (!gmu_core_isenabled(device) || !adreno_has_sptprac_gdsc(adreno_dev)) + if (!gmu_core_isenabled(device)) return true; gmu_core_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val); @@ -901,6 +867,9 @@ static int a6xx_gmu_wait_for_idle(struct adreno_device *adreno_dev) return 0; } +/* A6xx GMU FENCE RANGE MASK */ +#define GMU_FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0)) + /* * a6xx_gmu_fw_start() - set up GMU and start FW * @device: Pointer to KGSL device @@ -958,7 +927,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1); gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0, - FENCE_RANGE_MASK); + GMU_FENCE_RANGE_MASK); /* Pass chipid to GMU FW, must happen before starting GMU */ @@ -1389,7 +1358,7 @@ static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device, * This is where all of the A6XX GMU specific bits and pieces are grabbed * into the snapshot memory */ -void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, +static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -1432,14 +1401,16 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .load_firmware = a6xx_gmu_load_firmware, .oob_set = a6xx_gmu_oob_set, .oob_clear = a6xx_gmu_oob_clear, + .irq_enable = a6xx_gmu_irq_enable, + .irq_disable = a6xx_gmu_irq_disable, .hfi_start_msg = a6xx_gmu_hfi_start_msg, .enable_lm = a6xx_gmu_enable_lm, .rpmh_gpu_pwrctrl = a6xx_gmu_rpmh_gpu_pwrctrl, .wait_for_lowest_idle = a6xx_gmu_wait_for_lowest_idle, .wait_for_gmu_idle = a6xx_gmu_wait_for_idle, - .sptprac_enable = a6xx_gmu_sptprac_enable, - .sptprac_disable = a6xx_gmu_sptprac_disable, .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, .snapshot = a6xx_gmu_snapshot, + .gmu2host_intr_mask = HFI_IRQ_MASK, + .gmu_ao_intr_mask = GMU_AO_INT_MASK, }; diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index 0c308edea824e363e3e96422ba5b10d817932d33..bbfac62152970f1dfc0387b8fcbc70a90985777c 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -46,10 +46,15 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer) if (in_interrupt() == 0) { int status; - if (gmu_dev_ops->oob_set) { + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) { status = gmu_dev_ops->oob_set(adreno_dev, oob_preempt); - if (status) + if (status) { + adreno_set_gpu_fault(adreno_dev, + ADRENO_GMU_FAULT); + adreno_dispatcher_schedule( + KGSL_DEVICE(adreno_dev)); return; + } } } @@ -75,7 +80,7 @@ static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer) spin_unlock_irqrestore(&rb->preempt_lock, flags); if (in_interrupt() == 0) { - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_preempt); } } diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c index 6b92c077367ffe09a917756d99c28467386ed831..b9016cdf95333435c765a20425b313f2dab7b0f7 100644 --- a/drivers/gpu/msm/adreno_a6xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c @@ -18,6 +18,7 @@ #include "adreno_snapshot.h" #include "a6xx_reg.h" #include "adreno_a6xx.h" +#include "kgsl_gmu_core.h" #define A6XX_NUM_CTXTS 2 #define A6XX_NUM_AXI_ARB_BLOCKS 2 @@ -1491,12 +1492,18 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; bool sptprac_on; - unsigned int i; + unsigned int i, roq_size; + + /* ROQ size is 0x800 DW on a640 and a680 */ + roq_size = adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev) ? + (snap_data->sect_sizes->roq * 2) : snap_data->sect_sizes->roq; /* GMU TCM data dumped through AHB */ - a6xx_gmu_snapshot(adreno_dev, snapshot); + if (GMU_DEV_OP_VALID(gmu_dev_ops, snapshot)) + gmu_dev_ops->snapshot(adreno_dev, snapshot); sptprac_on = gpudev->sptprac_is_on(adreno_dev); @@ -1544,8 +1551,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, /* CP ROQ */ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, - snapshot, adreno_snapshot_cp_roq, - &snap_data->sect_sizes->roq); + snapshot, adreno_snapshot_cp_roq, &roq_size); /* SQE Firmware */ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG, diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c index 066423c1265c77741bcbd8c6155009c26b3765e4..891fcb3a4b04ac03b779b31012d8596c3ca9ceb5 100644 --- a/drivers/gpu/msm/adreno_perfcounter.c +++ b/drivers/gpu/msm/adreno_perfcounter.c @@ -181,7 +181,7 @@ inline void adreno_perfcounter_save(struct adreno_device *adreno_dev) if (counters == NULL) return; - if (gmu_dev_ops->oob_set) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_set)) ret = gmu_dev_ops->oob_set(adreno_dev, oob_perfcntr); /* if oob_set timeout, clear the mask and return */ @@ -209,7 +209,7 @@ inline void adreno_perfcounter_save(struct adreno_device *adreno_dev) } done: - if (gmu_dev_ops->oob_clear) + if (GMU_DEV_OP_VALID(gmu_dev_ops, oob_clear)) gmu_dev_ops->oob_clear(adreno_dev, oob_perfcntr); } diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index df55407564f90d7451c518b2712164e04af16b88..9df89e38cb1170f50761d35e2c9b8b013a66085d 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -13,7 +13,6 @@ #include "kgsl.h" #include "kgsl_sharedmem.h" #include "kgsl_snapshot.h" -#include "kgsl_gmu_core.h" #include "adreno.h" #include "adreno_pm4types.h" @@ -961,7 +960,7 @@ void adreno_snapshot_gmu(struct kgsl_device *device, struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); /* Add GMU specific sections */ - if (gmu_dev_ops && gmu_dev_ops->snapshot) + if (GMU_DEV_OP_VALID(gmu_dev_ops, snapshot)) gmu_dev_ops->snapshot(adreno_dev, snapshot); if (gpudev->snapshot_debugbus) diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c index 232770e347e55a7acb743028e5d18ce85976cb52..a414e553cd7b028cefe18328c06f9bf02bea0049 100644 --- a/drivers/gpu/msm/adreno_sysfs.c +++ b/drivers/gpu/msm/adreno_sysfs.c @@ -300,6 +300,11 @@ static unsigned int _ifpc_show(struct adreno_device *adreno_dev) return adreno_gmu_ifpc_show(adreno_dev); } +static unsigned int _ifpc_count_show(struct adreno_device *adreno_dev) +{ + return adreno_dev->ifpc_count; +} + static unsigned int _preempt_count_show(struct adreno_device *adreno_dev) { struct adreno_preemption *preempt = &adreno_dev->preempt; @@ -410,6 +415,7 @@ static ADRENO_SYSFS_BOOL(preemption); static ADRENO_SYSFS_BOOL(hwcg); static ADRENO_SYSFS_BOOL(throttling); static ADRENO_SYSFS_BOOL(ifpc); +static ADRENO_SYSFS_RO_U32(ifpc_count); @@ -431,6 +437,7 @@ static const struct device_attribute *_attr_list[] = { &adreno_attr_usesgmem.attr, &adreno_attr_skipsaverestore.attr, &adreno_attr_ifpc.attr, + &adreno_attr_ifpc_count.attr, &adreno_attr_preempt_count.attr, NULL, }; diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index de028fad6a02cf9a7b7c7c6d166da1dca49365d4..bf5e798839b0400c04b82c22fe2dad27a356437e 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -609,6 +609,19 @@ TRACE_EVENT(adreno_preempt_done, __entry->next->id, __entry->cur->id, __entry->level ) ); + +TRACE_EVENT(adreno_ifpc_count, + TP_PROTO(unsigned int ifpc_count), + TP_ARGS(ifpc_count), + TP_STRUCT__entry( + __field(unsigned int, ifpc_count) + ), + TP_fast_assign( + __entry->ifpc_count = ifpc_count; + ), + TP_printk("total times GMU entered IFPC = %d", __entry->ifpc_count) +); + #endif /* _ADRENO_TRACE_H */ /* This part must be outside protection */ diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index c27aa2dcd3680d5cae1f205e3146448f5d55a74e..4abeda28fb73ea1290ec8d593c53d6431d749d85 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -10,16 +10,14 @@ * GNU General Public License for more details. * */ -#include -#include #include #include +#include #include #include #include #include #include -#include #include #include @@ -1033,35 +1031,6 @@ static irqreturn_t gmu_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t hfi_irq_handler(int irq, void *data) -{ - struct kgsl_device *device = data; - struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - struct kgsl_hfi *hfi = &gmu->hfi; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - unsigned int status = 0; - - adreno_read_gmureg(ADRENO_DEVICE(device), - ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status); - adreno_write_gmureg(ADRENO_DEVICE(device), - ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status); - - if (status & HFI_IRQ_MSGQ_MASK) - tasklet_hi_schedule(&hfi->tasklet); - if (status & HFI_IRQ_CM3_FAULT_MASK) { - dev_err_ratelimited(&gmu->pdev->dev, - "GMU CM3 fault interrupt received\n"); - adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); - adreno_dispatcher_schedule(device); - } - if (status & ~HFI_IRQ_MASK) - dev_err_ratelimited(&gmu->pdev->dev, - "Unhandled HFI interrupts 0x%lx\n", - status & ~HFI_IRQ_MASK); - - return IRQ_HANDLED; -} - static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node) { int ret; @@ -1306,52 +1275,6 @@ static int gmu_irq_probe(struct kgsl_device *device, struct gmu_device *gmu) return ret; } -static void gmu_irq_enable(struct kgsl_device *device) -{ - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - struct kgsl_hfi *hfi = &gmu->hfi; - - /* Clear any pending IRQs before unmasking on GMU */ - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR, - 0xFFFFFFFF); - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, - 0xFFFFFFFF); - - /* Unmask needed IRQs on GMU */ - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, - (unsigned int) ~HFI_IRQ_MASK); - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, - (unsigned int) ~GMU_AO_INT_MASK); - - /* Enable all IRQs on host */ - enable_irq(hfi->hfi_interrupt_num); - enable_irq(gmu->gmu_interrupt_num); -} - -static void gmu_irq_disable(struct kgsl_device *device) -{ - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct gmu_device *gmu = KGSL_GMU_DEVICE(device); - struct kgsl_hfi *hfi = &gmu->hfi; - - /* Disable all IRQs on host */ - disable_irq(gmu->gmu_interrupt_num); - disable_irq(hfi->hfi_interrupt_num); - - /* Mask all IRQs on GMU */ - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, - 0xFFFFFFFF); - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, - 0xFFFFFFFF); - - /* Clear any pending IRQs before disabling */ - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, - 0xFFFFFFFF); - adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR, - 0xFFFFFFFF); -} - /* Do not access any GMU registers in GMU probe function */ static int gmu_probe(struct kgsl_device *device, struct device_node *node, unsigned long flags) @@ -1412,11 +1335,11 @@ static int gmu_probe(struct kgsl_device *device, goto error; /* Don't enable GMU interrupts until GMU started */ - /* We cannot use gmu_irq_disable because it writes registers */ + /* We cannot use irq_disable because it writes registers */ disable_irq(gmu->gmu_interrupt_num); disable_irq(hfi->hfi_interrupt_num); - tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu); + tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)device); INIT_LIST_HEAD(&hfi->msglist); spin_lock_init(&hfi->msglock); hfi->kgsldev = device; @@ -1585,9 +1508,8 @@ static int gmu_suspend(struct kgsl_device *device) return 0; /* Pending message in all queues are abandoned */ + gmu_dev_ops->irq_disable(device); hfi_stop(gmu); - clear_bit(GMU_HFI_ON, &gmu->flags); - gmu_irq_disable(device); if (gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0)) return -EINVAL; @@ -1601,6 +1523,7 @@ static int gmu_suspend(struct kgsl_device *device) static void gmu_snapshot(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); /* Mask so there's no interrupt caused by NMI */ @@ -1623,37 +1546,11 @@ static void gmu_snapshot(struct kgsl_device *device) ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, - (unsigned int) ~HFI_IRQ_MASK); + ~(gmu_dev_ops->gmu2host_intr_mask)); gmu->fault_count++; } -static void gmu_change_gpu_pwrlevel(struct kgsl_device *device, - unsigned int new_level) -{ - - struct kgsl_pwrctrl *pwr = &device->pwrctrl; - unsigned int old_level = pwr->active_pwrlevel; - - /* - * Update the level according to any thermal, - * max/min, or power constraints. - */ - new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level); - - /* - * If thermal cycling is required and the new level hits the - * thermal limit, kick off the cycling. - */ - kgsl_pwrctrl_set_thermal_cycle(device, new_level); - - pwr->active_pwrlevel = new_level; - pwr->previous_pwrlevel = old_level; - - /* Request adjusted DCVS level */ - kgsl_clk_set_rate(device, pwr->active_pwrlevel); -} - /* To be called to power on both GPU and GMU */ static int gmu_start(struct kgsl_device *device) { @@ -1669,7 +1566,7 @@ static int gmu_start(struct kgsl_device *device) WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags)); gmu_enable_gdsc(gmu); gmu_enable_clks(gmu); - gmu_irq_enable(device); + gmu_dev_ops->irq_enable(device); /* Vote for 300MHz DDR for GMU to init */ ret = msm_bus_scale_client_update_request(gmu->pcl, @@ -1688,7 +1585,7 @@ static int gmu_start(struct kgsl_device *device) goto error_gmu; /* Request default DCVS level */ - gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel); + kgsl_pwrctrl_set_default_gpu_pwrlevel(device); msm_bus_scale_client_update_request(gmu->pcl, 0); break; @@ -1696,7 +1593,7 @@ static int gmu_start(struct kgsl_device *device) WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags)); gmu_enable_gdsc(gmu); gmu_enable_clks(gmu); - gmu_irq_enable(device); + gmu_dev_ops->irq_enable(device); ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START, GMU_COLD_BOOT, 0); @@ -1707,7 +1604,7 @@ static int gmu_start(struct kgsl_device *device) if (ret) goto error_gmu; - gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel); + kgsl_pwrctrl_set_default_gpu_pwrlevel(device); break; case KGSL_STATE_RESET: @@ -1716,7 +1613,7 @@ static int gmu_start(struct kgsl_device *device) gmu_suspend(device); gmu_enable_gdsc(gmu); gmu_enable_clks(gmu); - gmu_irq_enable(device); + gmu_dev_ops->irq_enable(device); ret = gmu_dev_ops->rpmh_gpu_pwrctrl( adreno_dev, GMU_FW_START, GMU_COLD_BOOT, 0); @@ -1729,8 +1626,7 @@ static int gmu_start(struct kgsl_device *device) goto error_gmu; /* Send DCVS level prior to reset*/ - gmu_change_gpu_pwrlevel(device, - pwr->default_pwrlevel); + kgsl_pwrctrl_set_default_gpu_pwrlevel(device); } else { /* GMU fast boot */ hfi_stop(gmu); @@ -1784,9 +1680,8 @@ static void gmu_stop(struct kgsl_device *device) goto error; /* Pending message in all queues are abandoned */ + gmu_dev_ops->irq_disable(device); hfi_stop(gmu); - clear_bit(GMU_HFI_ON, &gmu->flags); - gmu_irq_disable(device); gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0); gmu_disable_clks(gmu); @@ -1828,13 +1723,13 @@ static void gmu_remove(struct kgsl_device *device) if (gmu->gmu_interrupt_num) { devm_free_irq(&gmu->pdev->dev, - gmu->gmu_interrupt_num, gmu); + gmu->gmu_interrupt_num, device); gmu->gmu_interrupt_num = 0; } if (hfi->hfi_interrupt_num) { devm_free_irq(&gmu->pdev->dev, - hfi->hfi_interrupt_num, hfi); + hfi->hfi_interrupt_num, device); hfi->hfi_interrupt_num = 0; } diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h index 33605c8a04322a4a5926cef6cc7f5633a17e72cf..b8e097e8e8549144120ae58c491a995272a83b15 100644 --- a/drivers/gpu/msm/kgsl_gmu.h +++ b/drivers/gpu/msm/kgsl_gmu.h @@ -13,13 +13,23 @@ #ifndef __KGSL_GMU_H #define __KGSL_GMU_H +#include "kgsl_gmu_core.h" #include "kgsl_hfi.h" #define MAX_GMUFW_SIZE 0x2000 /* in bytes */ -#define FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0)) #define BWMEM_SIZE (12 + (4 * NUM_BW_LEVELS)) /*in bytes*/ +#define GMU_INT_WDOG_BITE BIT(0) +#define GMU_INT_RSCC_COMP BIT(1) +#define GMU_INT_FENCE_ERR BIT(3) +#define GMU_INT_DBD_WAKEUP BIT(4) +#define GMU_INT_HOST_AHB_BUS_ERR BIT(5) +#define GMU_AO_INT_MASK \ + (GMU_INT_WDOG_BITE | \ + GMU_INT_FENCE_ERR | \ + GMU_INT_HOST_AHB_BUS_ERR) + /* Bitmask for GPU low power mode enabling and hysterisis*/ #define SPTP_ENABLE_MASK (BIT(2) | BIT(0)) #define IFPC_ENABLE_MASK (BIT(1) | BIT(0)) @@ -41,9 +51,6 @@ CX_VOTE_ENABLE | \ GFX_VOTE_ENABLE) -/* GMU timeouts */ -#define GMU_IDLE_TIMEOUT 100 /* ms */ - /* Constants for GMU OOBs */ #define OOB_BOOT_OPTION 0 #define OOB_SLUMBER_OPTION 1 @@ -92,25 +99,6 @@ enum gmu_load_mode { INVALID_LOAD }; -enum gmu_pwrctrl_mode { - GMU_FW_START, - GMU_FW_STOP, - GMU_SUSPEND, - GMU_DCVS_NOHFI, - GMU_NOTIFY_SLUMBER, - INVALID_POWER_CTRL -}; - -enum gpu_idle_level { - GPU_HW_ACTIVE = 0x0, - GPU_HW_SPTP_PC = 0x2, - GPU_HW_IFPC = 0x3, - GPU_HW_NAP = 0x4, - GPU_HW_MIN_VOLT = 0x5, - GPU_HW_MIN_DDR = 0x6, - GPU_HW_SLUMBER = 0xF -}; - /** * struct gmu_device - GMU device structure * @ver: GMU FW version, read from GMU diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 37a9d3178e8eadda06b5e2ac113cc80fbe69a6bf..3979a93839a245ece40153b2961f674570cc3d88 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -12,15 +12,6 @@ */ #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include "kgsl_device.h" #include "kgsl_gmu_core.h" @@ -202,6 +193,11 @@ void gmu_core_regread(struct kgsl_device *device, unsigned int offsetwords, { struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); + if (!gmu_core_is_register_offset(device, offsetwords)) { + WARN(1, "Out of bounds register read: 0x%x\n", offsetwords); + return; + } + if (gmu_core_ops && gmu_core_ops->regread) gmu_core_ops->regread(device, offsetwords, value); else @@ -213,6 +209,11 @@ void gmu_core_regwrite(struct kgsl_device *device, unsigned int offsetwords, { struct gmu_core_ops *gmu_core_ops = GMU_CORE_OPS(device); + if (!gmu_core_is_register_offset(device, offsetwords)) { + WARN(1, "Out of bounds register write: 0x%x\n", offsetwords); + return; + } + if (gmu_core_ops && gmu_core_ops->regwrite) gmu_core_ops->regwrite(device, offsetwords, value); } @@ -223,6 +224,11 @@ void gmu_core_regrmw(struct kgsl_device *device, { unsigned int val = 0; + if (!gmu_core_is_register_offset(device, offsetwords)) { + WARN(1, "Out of bounds register rmw: 0x%x\n", offsetwords); + return; + } + gmu_core_regread(device, offsetwords, &val); val &= ~mask; gmu_core_regwrite(device, offsetwords, val | bits); diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index 9d8b65818af8a2a5131a425d31ba020663bdc137..91510e42f1e0d7e70c959de57bb5ea0c0b349be1 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -13,20 +13,14 @@ #ifndef __KGSL_GMU_CORE_H #define __KGSL_GMU_CORE_H -#define GMU_INT_WDOG_BITE BIT(0) -#define GMU_INT_RSCC_COMP BIT(1) -#define GMU_INT_FENCE_ERR BIT(3) -#define GMU_INT_DBD_WAKEUP BIT(4) -#define GMU_INT_HOST_AHB_BUS_ERR BIT(5) -#define GMU_AO_INT_MASK \ - (GMU_INT_WDOG_BITE | \ - GMU_INT_HOST_AHB_BUS_ERR | \ - GMU_INT_FENCE_ERR) - /* GMU_DEVICE - Given an KGSL device return the GMU specific struct */ #define GMU_DEVICE_OPS(_a) ((_a)->gmu_core.dev_ops) #define GMU_CORE_OPS(_a) ((_a)->gmu_core.core_ops) +#define GMU_DEV_OP_VALID(_devops, _field) \ + (((_devops) != NULL) && \ + ((_devops)->_field != NULL)) + #define NUM_BW_LEVELS 100 #define MAX_GX_LEVELS 16 #define MAX_CX_LEVELS 4 @@ -80,6 +74,25 @@ enum oob_request { oob_dcvs = 7, /* reserved special case */ }; +enum gmu_pwrctrl_mode { + GMU_FW_START, + GMU_FW_STOP, + GMU_SUSPEND, + GMU_DCVS_NOHFI, + GMU_NOTIFY_SLUMBER, + INVALID_POWER_CTRL +}; + +enum gpu_idle_level { + GPU_HW_ACTIVE = 0x0, + GPU_HW_SPTP_PC = 0x2, + GPU_HW_IFPC = 0x3, + GPU_HW_NAP = 0x4, + GPU_HW_MIN_VOLT = 0x5, + GPU_HW_MIN_DDR = 0x6, + GPU_HW_SLUMBER = 0xF +}; + /* * Wait time before trying to write the register again. * Hopefully the GMU has finished waking up during this delay. @@ -115,8 +128,6 @@ struct gmu_core_ops { int (*start)(struct kgsl_device *device); void (*stop)(struct kgsl_device *device); void (*snapshot)(struct kgsl_device *device); - int (*get_idle_level)(struct kgsl_device *device); - void (*set_idle_level)(struct kgsl_device *device, unsigned int val); bool (*regulator_isenabled)(struct kgsl_device *device); int (*suspend)(struct kgsl_device *device); }; @@ -127,18 +138,20 @@ struct gmu_dev_ops { enum oob_request req); void (*oob_clear)(struct adreno_device *adreno_dev, enum oob_request req); + void (*irq_enable)(struct kgsl_device *device); + void (*irq_disable)(struct kgsl_device *device); int (*hfi_start_msg)(struct adreno_device *adreno_dev); void (*enable_lm)(struct kgsl_device *device); int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops, unsigned int arg1, unsigned int arg2); int (*wait_for_lowest_idle)(struct adreno_device *); int (*wait_for_gmu_idle)(struct adreno_device *); - int (*sptprac_enable)(struct adreno_device *adreno_dev); - void (*sptprac_disable)(struct adreno_device *adreno_dev); int (*ifpc_store)(struct adreno_device *adreno_dev, unsigned int val); unsigned int (*ifpc_show)(struct adreno_device *adreno_dev); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); + const unsigned int gmu2host_intr_mask; + const unsigned int gmu_ao_intr_mask; }; /** diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index bec11d672aa2767b95082314d67fabfeef077fd3..354258ca647d0d97015769a09c843f1f89093ea1 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -58,6 +58,9 @@ static int hfi_queue_read(struct gmu_device *gmu, uint32_t queue_idx, uint32_t size; int result = 0; + if (hdr->status == HFI_QUEUE_STATUS_DISABLED) + return -EINVAL; + if (hdr->read_index == hdr->write_index) { hdr->rx_req = 1; return -ENODATA; @@ -113,7 +116,7 @@ static int hfi_queue_write(struct gmu_device *gmu, uint32_t queue_idx, uint32_t size = MSG_HDR_GET_SIZE(*msg); uint32_t id = MSG_HDR_GET_ID(*msg); - if (hdr->enabled == 0) + if (hdr->status == HFI_QUEUE_STATUS_DISABLED) return -EINVAL; if (size > HFI_MAX_MSG_SIZE) { @@ -188,11 +191,12 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, struct { unsigned int idx; unsigned int pri; + unsigned int status; } queue[HFI_QUEUE_MAX] = { - { HFI_CMD_ID, HFI_CMD_PRI }, - { HFI_MSG_ID, HFI_MSG_PRI }, - { HFI_DBG_ID, HFI_DBG_PRI }, - { HFI_DSP_ID_0, HFI_DSP_PRI_0 }, + { HFI_CMD_IDX, HFI_CMD_PRI, HFI_QUEUE_STATUS_ENABLED }, + { HFI_MSG_IDX, HFI_MSG_PRI, HFI_QUEUE_STATUS_ENABLED }, + { HFI_DBG_IDX, HFI_DBG_PRI, HFI_QUEUE_STATUS_ENABLED }, + { HFI_DSP_IDX_0, HFI_DSP_PRI_0, HFI_QUEUE_STATUS_DISABLED }, }; /* Fill Table Header */ @@ -209,7 +213,7 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, hdr = &tbl->qhdr[i]; hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i); hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0, 0); - hdr->enabled = 0x1; + hdr->status = queue[i].status; hdr->queue_size = queue_sz_bytes >> 2; /* convert to dwords */ hdr->msg_size = 0; hdr->drop_cnt = 0; @@ -227,13 +231,15 @@ void hfi_init(struct kgsl_hfi *hfi, struct gmu_memdesc *mem_addr, #define HDR_CMP_SEQNUM(out_hdr, in_hdr) \ (MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr)) -static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd) +static void receive_ack_cmd(struct kgsl_device *device, + struct gmu_device *gmu, void *rcvd) { uint32_t *ack = rcvd; uint32_t hdr = ack[0]; uint32_t req_hdr = ack[1]; struct kgsl_hfi *hfi = &gmu->hfi; struct pending_cmd *cmd = NULL; + uint32_t waiters[64], i = 0, j; trace_kgsl_hfi_receive(MSG_HDR_GET_ID(req_hdr), MSG_HDR_GET_SIZE(req_hdr), @@ -247,11 +253,21 @@ static void receive_ack_cmd(struct gmu_device *gmu, void *rcvd) spin_unlock_bh(&hfi->msglock); return; } + if (i < 64) + waiters[i++] = cmd->sent_hdr; } spin_unlock_bh(&hfi->msglock); - dev_err(&gmu->pdev->dev, - "HFI ACK(0x%x) Cannot find sender\n", req_hdr); + dev_err_ratelimited(&gmu->pdev->dev, + "HFI ACK: Cannot find sender for 0x%8.8X\n", req_hdr); + /* Didn't find the sender, list all the waiters */ + for (j = 0; j < i && j < 64; j++) { + dev_err_ratelimited(&gmu->pdev->dev, + "HFI ACK: Waiters: 0x%8.8X\n", waiters[j]); + } + + adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(device); } #define MSG_HDR_SET_SEQNUM(hdr, num) \ @@ -312,7 +328,13 @@ static int hfi_send_generic_req(struct gmu_device *gmu, uint32_t queue, if (rc) return rc; - return ret_cmd.results[2]; + if (ret_cmd.results[2]) + dev_err(&gmu->pdev->dev, + "HFI ACK failure: Req 0x%8.8X Error 0x%X\n", + ret_cmd.results[1], + ret_cmd.results[2]); + + return ret_cmd.results[2] ? -EINVAL : 0; } static int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state) @@ -364,36 +386,60 @@ static int hfi_send_core_fw_start(struct gmu_device *gmu) return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); } -static struct hfi_feature { - uint32_t feature; - uint32_t enable; - uint32_t data; -} hfi_features[] = { - { HFI_FEATURE_ECP, 0, 0}, +static const char * const hfi_features[] = { + [HFI_FEATURE_ECP] = "ECP", }; -static int hfi_send_feature_ctrls(struct gmu_device *gmu) +static const char *feature_to_string(uint32_t feature) { - struct hfi_feature_ctrl_cmd cmd; - int ret = 0, i; - - for (i = 0; i < ARRAY_SIZE(hfi_features); i++) { - cmd.hdr = CMD_MSG_HDR(H2F_MSG_FEATURE_CTRL, sizeof(cmd)); - cmd.feature = hfi_features[i].feature; - cmd.enable = hfi_features[i].enable; - cmd.data = hfi_features[i].data; - - ret = hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); - if (ret) { - pr_err("KGSL: setfeature fail:%d [%d:%d:0x%x]\n", ret, - hfi_features[i].feature, - hfi_features[i].enable, - hfi_features[i].data); - return ret; - } + if (feature < ARRAY_SIZE(hfi_features) && hfi_features[feature]) + return hfi_features[feature]; + + return "unknown"; +} + +static int hfi_send_feature_ctrl(struct gmu_device *gmu, + uint32_t feature, uint32_t enable, uint32_t data) +{ + struct hfi_feature_ctrl_cmd cmd = { + .hdr = CMD_MSG_HDR(H2F_MSG_FEATURE_CTRL, sizeof(cmd)), + .feature = feature, + .enable = enable, + .data = data, + }; + int ret; + + ret = hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); + if (ret) + dev_err(&gmu->pdev->dev, + "Unable to %s feature %s (%d)\n", + enable ? "enable" : "disable", + feature_to_string(feature), + feature); + return ret; +} + +static int hfi_send_dcvstbl_v1(struct gmu_device *gmu) +{ + struct hfi_dcvstable_v1_cmd cmd = { + .hdr = CMD_MSG_HDR(H2F_MSG_PERF_TBL, sizeof(cmd)), + .gpu_level_num = gmu->num_gpupwrlevels, + .gmu_level_num = gmu->num_gmupwrlevels, + }; + int i; + + for (i = 0; i < gmu->num_gpupwrlevels; i++) { + cmd.gx_votes[i].vote = gmu->rpmh_votes.gx_votes[i]; + /* Divide by 1000 to convert to kHz */ + cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; } - return 0; + for (i = 0; i < gmu->num_gmupwrlevels; i++) { + cmd.cx_votes[i].vote = gmu->rpmh_votes.cx_votes[i]; + cmd.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; + } + + return hfi_send_generic_req(gmu, HFI_CMD_IDX, &cmd); } static int hfi_send_dcvstbl(struct gmu_device *gmu) @@ -407,6 +453,11 @@ static int hfi_send_dcvstbl(struct gmu_device *gmu) for (i = 0; i < gmu->num_gpupwrlevels; i++) { cmd.gx_votes[i].vote = gmu->rpmh_votes.gx_votes[i]; + /* + * Set ACD threshold to the maximum value as a default. + * At this level, ACD will never activate. + */ + cmd.gx_votes[i].acd = 0xFFFFFFFF; /* Divide by 1000 to convert to kHz */ cmd.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; } @@ -480,11 +531,12 @@ static void receive_debug_req(struct gmu_device *gmu, void *rcvd) cmd->type, cmd->timestamp, cmd->data); } -static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd) +static void hfi_v1_receiver(struct kgsl_device *device, + struct gmu_device *gmu, uint32_t *rcvd) { /* V1 ACK Handler */ if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_V1_MSG_ACK) { - receive_ack_cmd(gmu, rcvd); + receive_ack_cmd(device, gmu, rcvd); return; } @@ -506,6 +558,7 @@ static void hfi_v1_receiver(struct gmu_device *gmu, uint32_t *rcvd) void hfi_receiver(unsigned long data) { + struct kgsl_device *device; struct gmu_device *gmu; uint32_t rcvd[MAX_RCVD_SIZE]; int read_queue[] = { @@ -517,7 +570,8 @@ void hfi_receiver(unsigned long data) if (!data) return; - gmu = (struct gmu_device *)data; + device = (struct kgsl_device *)data; + gmu = KGSL_GMU_DEVICE(device); /* While we are here, check all of the queues for messages */ for (q = 0; q < ARRAY_SIZE(read_queue); q++) { @@ -525,13 +579,13 @@ void hfi_receiver(unsigned long data) rcvd, sizeof(rcvd)) > 0) { /* Special case if we're v1 */ if (HFI_VER_MAJOR(&gmu->hfi) < 2) { - hfi_v1_receiver(gmu, rcvd); + hfi_v1_receiver(device, gmu, rcvd); continue; } /* V2 ACK Handler */ if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) { - receive_ack_cmd(gmu, rcvd); + receive_ack_cmd(device, gmu, rcvd); continue; } @@ -604,11 +658,28 @@ int hfi_start(struct kgsl_device *device, struct gmu_device *gmu, uint32_t boot_state) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - int result; + struct gmu_memdesc *mem_addr = gmu->hfi_mem; + struct hfi_queue_table *tbl = mem_addr->hostptr; + struct hfi_queue_header *hdr; + int result, i; if (test_bit(GMU_HFI_ON, &gmu->flags)) return 0; + /* Force read_index to the write_index no matter what */ + for (i = 0; i < HFI_QUEUE_MAX; i++) { + hdr = &tbl->qhdr[i]; + if (hdr->status == HFI_QUEUE_STATUS_DISABLED) + continue; + + if (hdr->read_index != hdr->write_index) { + dev_err(&gmu->pdev->dev, + "HFI Q[%d] Index Error: read:0x%X write:0x%X\n", + i, hdr->read_index, hdr->write_index); + hdr->read_index = hdr->write_index; + } + } + if (!adreno_is_a640(adreno_dev) && !adreno_is_a680(adreno_dev)) { result = hfi_send_gmu_init(gmu, boot_state); if (result) @@ -619,7 +690,10 @@ int hfi_start(struct kgsl_device *device, if (result) return result; - result = hfi_send_dcvstbl(gmu); + if (HFI_VER_MAJOR(&gmu->hfi) < 2) + result = hfi_send_dcvstbl_v1(gmu); + else + result = hfi_send_dcvstbl(gmu); if (result) return result; @@ -632,9 +706,8 @@ int hfi_start(struct kgsl_device *device, * we are sending no more HFIs until the next boot otherwise * send H2F_MSG_CORE_FW_START and features for A640 devices */ - if (HFI_VER_MAJOR(&gmu->hfi) >= 2) { - result = hfi_send_feature_ctrls(gmu); + result = hfi_send_feature_ctrl(gmu, HFI_FEATURE_ECP, 0, 0); if (result) return result; @@ -666,14 +739,13 @@ void hfi_stop(struct gmu_device *gmu) /* Flush HFI queues */ for (i = 0; i < HFI_QUEUE_MAX; i++) { hdr = &tbl->qhdr[i]; + if (hdr->status == HFI_QUEUE_STATUS_DISABLED) + continue; if (hdr->read_index != hdr->write_index) dev_err(&gmu->pdev->dev, "HFI queue[%d] is not empty before close: rd=%d,wt=%d", i, hdr->read_index, hdr->write_index); - - hdr->read_index = 0x0; - hdr->write_index = 0x0; } clear_bit(GMU_HFI_ON, &gmu->flags); @@ -720,3 +792,33 @@ int hfi_send_req(struct gmu_device *gmu, unsigned int id, void *data) return -EINVAL; } + +/* HFI interrupt handler */ +irqreturn_t hfi_irq_handler(int irq, void *data) +{ + struct kgsl_device *device = data; + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct kgsl_hfi *hfi = &gmu->hfi; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int status = 0; + + adreno_read_gmureg(ADRENO_DEVICE(device), + ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status); + adreno_write_gmureg(ADRENO_DEVICE(device), + ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status); + + if (status & HFI_IRQ_MSGQ_MASK) + tasklet_hi_schedule(&hfi->tasklet); + if (status & HFI_IRQ_CM3_FAULT_MASK) { + dev_err_ratelimited(&gmu->pdev->dev, + "GMU CM3 fault interrupt received\n"); + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); + adreno_dispatcher_schedule(device); + } + if (status & ~HFI_IRQ_MASK) + dev_err_ratelimited(&gmu->pdev->dev, + "Unhandled HFI interrupts 0x%lx\n", + status & ~HFI_IRQ_MASK); + + return IRQ_HANDLED; +} diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index 372b45a3cf9d369ec5c901677a9513747c51c90e..197e0d8aecb1619a57ef2881419bc89de5ce3c0c 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -38,6 +38,9 @@ #define HFI_DSP_IDX_BASE 3 #define HFI_DSP_IDX_0 3 +#define HFI_QUEUE_STATUS_DISABLED 0 +#define HFI_QUEUE_STATUS_ENABLED 1 + /* HTOF queue priority, 1 is highest priority */ #define HFI_CMD_PRI 10 #define HFI_MSG_PRI 10 @@ -128,7 +131,7 @@ struct hfi_queue_table_header { /** * struct hfi_queue_header - HFI queue header structure - * @enabled: active: 1; inactive: 0 + * @status: active: 1; inactive: 0 * @start_addr: starting address of the queue in GMU VA space * @type: queue type encoded the priority, ID and send/recevie types * @queue_size: size of the queue @@ -143,7 +146,7 @@ struct hfi_queue_table_header { * @write_index: write index of the queue */ struct hfi_queue_header { - uint32_t enabled; + uint32_t status; uint32_t start_addr; uint32_t type; uint32_t queue_size; @@ -249,14 +252,19 @@ struct hfi_bwbuf { uint32_t arr[NUM_BW_LEVELS]; }; -struct opp_desc { +struct opp_gx_desc { uint32_t vote; uint32_t acd; uint32_t freq; }; +struct opp_desc { + uint32_t vote; + uint32_t freq; +}; + /* H2F */ -struct hfi_dcvstable_cmd { +struct hfi_dcvstable_v1_cmd { uint32_t hdr; uint32_t gpu_level_num; uint32_t gmu_level_num; @@ -264,6 +272,15 @@ struct hfi_dcvstable_cmd { struct opp_desc cx_votes[MAX_CX_LEVELS]; }; +/* H2F */ +struct hfi_dcvstable_cmd { + uint32_t hdr; + uint32_t gpu_level_num; + uint32_t gmu_level_num; + struct opp_gx_desc gx_votes[MAX_GX_LEVELS]; + struct opp_desc cx_votes[MAX_CX_LEVELS]; +}; + /* H2F */ struct hfi_test_cmd { uint32_t hdr; @@ -406,7 +423,7 @@ struct hfi_prep_slumber_cmd { struct hfi_err_cmd { uint32_t hdr; uint32_t error_code; - uint32_t data[2]; + uint32_t data[16]; }; /* F2H */ @@ -615,6 +632,7 @@ struct kgsl_hfi { struct gmu_device; struct gmu_memdesc; +irqreturn_t hfi_irq_handler(int irq, void *data); int hfi_start(struct kgsl_device *device, struct gmu_device *gmu, uint32_t boot_state); void hfi_stop(struct gmu_device *gmu); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 72722618941c96bdb1b7b6cb9a2209741ad7c34e..acdc8421de92a4139283baae65e2717d7a679e1c 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -3361,3 +3361,32 @@ unsigned int kgsl_pwr_limits_get_freq(enum kgsl_deviceid id) return freq; } EXPORT_SYMBOL(kgsl_pwr_limits_get_freq); + +/** + * kgsl_pwrctrl_set_default_gpu_pwrlevel() - Set GPU to default power level + * @device: Pointer to the kgsl_device struct + */ +void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + unsigned int new_level = pwr->default_pwrlevel; + unsigned int old_level = pwr->active_pwrlevel; + + /* + * Update the level according to any thermal, + * max/min, or power constraints. + */ + new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level); + + /* + * If thermal cycling is required and the new level hits the + * thermal limit, kick off the cycling. + */ + kgsl_pwrctrl_set_thermal_cycle(device, new_level); + + pwr->active_pwrlevel = new_level; + pwr->previous_pwrlevel = old_level; + + /* Request adjusted DCVS level */ + kgsl_clk_set_rate(device, pwr->active_pwrlevel); +} diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index d3b9c9ccb001a9ae804344048faa8b59c007ce40..1800d14c389db888796854479ee665203d89d213 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -273,4 +273,5 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device, struct kgsl_pwr_constraint *pwrc, uint32_t id); void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, unsigned long timeout_us); +void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device); #endif /* __KGSL_PWRCTRL_H */ diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index 5e41611f7286a9c174df9a75aa95154b33320094..0ed17d859080ad16f9f9e54038b8d51149dc4155 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -719,8 +719,6 @@ void kgsl_device_snapshot(struct kgsl_device *device, if (device->ftbl->snapshot) device->ftbl->snapshot(device, snapshot, context); - if (device->ftbl->snapshot_gmu) - device->ftbl->snapshot_gmu(device, snapshot); } /* diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 8c7a0ce147a136c6596b1580b25de71c059507c2..eca4c9d97110c2e5a1de2eed8457fd44133515ba 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -436,10 +436,11 @@ config HID_LENOVO select NEW_LEDS select LEDS_CLASS ---help--- - Support for Lenovo devices that are not fully compliant with HID standard. + Support for IBM/Lenovo devices that are not fully compliant with HID standard. - Say Y if you want support for the non-compliant features of the Lenovo - Thinkpad standalone keyboards, e.g: + Say Y if you want support for horizontal scrolling of the IBM/Lenovo + Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad + standalone keyboards, e.g: - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint configuration) - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ff539c0b4637114dcfc35dffe7541df9a20b0416..9e478f03e8456f18ee2df447602ebf7bc42c2ea6 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -532,6 +532,13 @@ #define USB_VENDOR_ID_HUION 0x256c #define USB_DEVICE_ID_HUION_TABLET 0x006e +#define USB_VENDOR_ID_IBM 0x04b3 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO 0x3103 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL 0x3105 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL 0x3108 +#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO 0x3109 + #define USB_VENDOR_ID_IDEACOM 0x1cb6 #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 @@ -664,6 +671,7 @@ #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 +#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 1ac4ff4d57a659fc89c6a2bf36b83ba2d679fdcc..643b6eb54442ed4bc297e182ad1b7c77a25e82c0 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -6,6 +6,17 @@ * * Copyright (c) 2012 Bernhard Seibold * Copyright (c) 2014 Jamie Lentin + * + * Linux IBM/Lenovo Scrollpoint mouse driver: + * - IBM Scrollpoint III + * - IBM Scrollpoint Pro + * - IBM Scrollpoint Optical + * - IBM Scrollpoint Optical 800dpi + * - IBM Scrollpoint Optical 800dpi Pro + * - Lenovo Scrollpoint Optical + * + * Copyright (c) 2012 Peter De Wachter + * Copyright (c) 2018 Peter Ganzhorn */ /* @@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, return 0; } +static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + if (usage->hid == HID_GD_Z) { + hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); + return 1; + } + return 0; +} + static int lenovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev, case USB_DEVICE_ID_LENOVO_CBTKBD: return lenovo_input_mapping_cptkbd(hdev, hi, field, usage, bit, max); + case USB_DEVICE_ID_IBM_SCROLLPOINT_III: + case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO: + case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL: + case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO: + case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: + return lenovo_input_mapping_scrollpoint(hdev, hi, field, + usage, bit, max); default: return 0; } @@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, { } }; diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 20d824f74f99e6fd6809e68f9811dda8ec14fddf..90d7be08fea0019e8b0cb9d327fa5233c3759e01 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -204,8 +204,7 @@ static void ish_remove(struct pci_dev *pdev) kfree(ishtp_dev); } -#ifdef CONFIG_PM -static struct device *ish_resume_device; +static struct device __maybe_unused *ish_resume_device; /* 50ms to get resume response */ #define WAIT_FOR_RESUME_ACK_MS 50 @@ -219,7 +218,7 @@ static struct device *ish_resume_device; * in that case a simple resume message is enough, others we need * a reset sequence. */ -static void ish_resume_handler(struct work_struct *work) +static void __maybe_unused ish_resume_handler(struct work_struct *work) { struct pci_dev *pdev = to_pci_dev(ish_resume_device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -261,7 +260,7 @@ static void ish_resume_handler(struct work_struct *work) * * Return: 0 to the pm core */ -static int ish_suspend(struct device *device) +static int __maybe_unused ish_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -287,7 +286,7 @@ static int ish_suspend(struct device *device) return 0; } -static DECLARE_WORK(resume_work, ish_resume_handler); +static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler); /** * ish_resume() - ISH resume callback * @device: device pointer @@ -296,7 +295,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler); * * Return: 0 to the pm core */ -static int ish_resume(struct device *device) +static int __maybe_unused ish_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ishtp_device *dev = pci_get_drvdata(pdev); @@ -310,21 +309,14 @@ static int ish_resume(struct device *device) return 0; } -static const struct dev_pm_ops ish_pm_ops = { - .suspend = ish_suspend, - .resume = ish_resume, -}; -#define ISHTP_ISH_PM_OPS (&ish_pm_ops) -#else -#define ISHTP_ISH_PM_OPS NULL -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume); static struct pci_driver ish_driver = { .name = KBUILD_MODNAME, .id_table = ish_pci_tbl, .probe = ish_probe, .remove = ish_remove, - .driver.pm = ISHTP_ISH_PM_OPS, + .driver.pm = &ish_pm_ops, }; module_pci_driver(ish_driver); diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c index f272cdd9bd558311c27a82729c5eb314cade2a1f..2623a567ffba5ae51e90653e47bea42127ea9b02 100644 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c @@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev, list_del(&device->device_link); spin_unlock_irqrestore(&dev->device_list_lock, flags); dev_err(dev->devc, "Failed to register ISHTP client device\n"); - kfree(device); + put_device(&device->dev); return NULL; } diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4c337585479eb403b94832eb57f1bf769ec0a4c4..18d5b99d13f1b94711396c7c88980c745d50bf1f 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -284,6 +284,14 @@ static void wacom_usage_mapping(struct hid_device *hdev, } } + /* 2nd-generation Intuos Pro Large has incorrect Y maximum */ + if (hdev->vendor == USB_VENDOR_ID_WACOM && + hdev->product == 0x0358 && + WACOM_PEN_FIELD(field) && + wacom_equivalent_usage(usage->hid) == HID_GD_Y) { + field->logical_maximum = 43200; + } + switch (usage->hid) { case HID_GD_X: features->x_max = field->logical_maximum; @@ -1102,8 +1110,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom, devres->root = root; error = sysfs_create_group(devres->root, group); - if (error) + if (error) { + devres_free(devres); return error; + } devres_add(&wacom->hdev->dev, devres); diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c index e4101963b4ab86b1300e754efb374682c76b721c..1637ad234110470ed66eb4f4304b0708271689ba 100644 --- a/drivers/hwtracing/coresight/coresight-csr.c +++ b/drivers/hwtracing/coresight/coresight-csr.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "coresight-priv.h" @@ -88,6 +89,8 @@ struct csr_drvdata { }; static LIST_HEAD(csr_list); +static DEFINE_MUTEX(csr_lock); + #define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr) void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr) @@ -236,12 +239,15 @@ EXPORT_SYMBOL(coresight_csr_set_byte_cntr); struct coresight_csr *coresight_csr_get(const char *name) { struct coresight_csr *csr; - + mutex_lock(&csr_lock); list_for_each_entry(csr, &csr_list, link) { - if (!strcmp(csr->name, name)) + if (!strcmp(csr->name, name)) { + mutex_unlock(&csr_lock); return csr; + } } + mutex_unlock(&csr_lock); return ERR_PTR(-EINVAL); } EXPORT_SYMBOL(coresight_csr_get); @@ -391,7 +397,10 @@ static int csr_probe(struct platform_device *pdev) spin_lock_init(&drvdata->spin_lock); drvdata->csr.name = ((struct coresight_platform_data *) (pdev->dev.platform_data))->name; + + mutex_lock(&csr_lock); list_add_tail(&drvdata->csr.link, &csr_list); + mutex_unlock(&csr_lock); dev_info(dev, "CSR initialized: %s\n", drvdata->csr.name); return 0; @@ -399,12 +408,13 @@ static int csr_probe(struct platform_device *pdev) static int csr_remove(struct platform_device *pdev) { - unsigned long flags; struct csr_drvdata *drvdata = platform_get_drvdata(pdev); - spin_lock_irqsave(&drvdata->spin_lock, flags); + mutex_lock(&csr_lock); + list_del(&drvdata->csr.link); + mutex_unlock(&csr_lock); + coresight_unregister(drvdata->csdev); - spin_unlock_irqrestore(&drvdata->spin_lock, flags); return 0; } diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 8fb2d1d2203030b12f454ac65e4eca144643872f..9e12d3df16d249650befbd6240e9f28ca14d8d8f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -184,7 +184,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) kfree(buf); if (!ret) { - coresight_cti_map_trigin(drvdata->cti_reset, 2, 0); + coresight_cti_map_trigin(drvdata->cti_reset, 0, 0); coresight_cti_map_trigout(drvdata->cti_flush, 1, 0); dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); } @@ -266,7 +266,7 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev) spin_unlock_irqrestore(&drvdata->spinlock, flags); - coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0); + coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0); coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0); dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 74b31597ec5ad09a31cb5836e233f6f0c467e965..b857b9c59fb42f693e4eef62c350d3113e21287e 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -719,12 +719,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) dev_err(dev, "invalid cti data\n"); } else if (ctidata && ctidata->nr_ctis == 2) { drvdata->cti_flush = coresight_cti_get(ctidata->names[0]); - if (IS_ERR(drvdata->cti_flush)) - dev_err(dev, "failed to get flush cti\n"); + if (IS_ERR(drvdata->cti_flush)) { + dev_err(dev, "failed to get flush cti, defer probe\n"); + tmc_iommu_deinit(drvdata); + return -EPROBE_DEFER; + } drvdata->cti_reset = coresight_cti_get(ctidata->names[1]); - if (IS_ERR(drvdata->cti_reset)) - dev_err(dev, "failed to get reset cti\n"); + if (IS_ERR(drvdata->cti_reset)) { + dev_err(dev, "failed to get reset cti, defer probe\n"); + tmc_iommu_deinit(drvdata); + return -EPROBE_DEFER; + } } ret = of_get_coresight_csr_name(adev->dev.of_node, &drvdata->csr_name); diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 591f97040e406dbd98733dd9f679e36061483900..ef79c9efd8409adc432af692922a154c1cc81f8e 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -99,6 +99,23 @@ static int coresight_source_is_unique(struct coresight_device *csdev) csdev, coresight_id_match); } +static int coresight_reset_sink(struct device *dev, void *data) +{ + struct coresight_device *csdev = to_coresight_device(dev); + + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && + csdev->activated) + csdev->activated = false; + + return 0; +} + +static void coresight_reset_all_sink(void) +{ + bus_for_each_dev(&coresight_bustype, NULL, NULL, coresight_reset_sink); +} + static int coresight_find_link_inport(struct coresight_device *csdev, struct coresight_device *parent) { @@ -1018,6 +1035,9 @@ static ssize_t reset_source_sink_store(struct bus_type *bus, __coresight_disable(csdev); } + /* Reset all activated sinks */ + coresight_reset_all_sink(); + mutex_unlock(&coresight_mutex); return size; } diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index fedbaa9733cb1ad92c23a5665403bc433e3ae794..24be5a8454acfdf45583ca8a95510cd948617c0c 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -175,8 +175,9 @@ static int stp_master_alloc(struct stm_device *stm, unsigned int idx) { struct stp_master *master; size_t size; + unsigned long align = sizeof(unsigned long); - size = ALIGN(stm->data->sw_nchannels, 8) / 8; + size = ALIGN(stm->data->sw_nchannels, align) / align; size += sizeof(struct stp_master); master = kzalloc(size, GFP_ATOMIC); if (!master) diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 2aa0e83174c52895a0fb1416e8a17a00d31b24c6..dae8ac618a5221fdd886afab417f88945af7f143 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap, * TODO: We could potentially loop and retry in the case * of MSP_TWI_XFER_TIMEOUT. */ - return -1; + return -EIO; } - return 0; + return num; } static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 25fcc3c1e32bf3d9a41fa345982039fb234dbcbd..4053259bccb8d704d9d386287086841690174844 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c @@ -86,6 +86,7 @@ struct sprd_i2c { u32 count; int irq; int err; + bool is_suspended; }; static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) @@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap, struct sprd_i2c *i2c_dev = i2c_adap->algo_data; int im, ret; + if (i2c_dev->is_suspended) + return -EBUSY; + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) return ret; @@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id) struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we got one ACK from slave when writing data, and we did not @@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id) { struct sprd_i2c *i2c_dev = dev_id; struct i2c_msg *msg = i2c_dev->msg; - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); u32 i2c_tran; if (msg->flags & I2C_M_RD) i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; else - i2c_tran = i2c_count; + i2c_tran = i2c_dev->count; /* * If we did not get one ACK from slave when writing data, then we @@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev) static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = true; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_suspend(pdev); } static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) { + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adap); + i2c_dev->is_suspended = false; + i2c_unlock_adapter(&i2c_dev->adap); + return pm_runtime_force_resume(pdev); } diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index e4be86b3de9a28b201bcd197470ab3a3a78ee683..7235c7302bb7cd000db814ec12de03193a22ef01 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c @@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, } mutex_unlock(&vb->lock); } - return 0; + return num; error: mutex_unlock(&vb->lock); return error; diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c index 39ab210c44f69b5e273a202cb6d61834f64bf96d..565f7d8d3304a3eba3b13d69ad568cc7f916b9bd 100644 --- a/drivers/iio/accel/sca3000.c +++ b/drivers/iio/accel/sca3000.c @@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev) { struct iio_buffer *buffer; - buffer = iio_kfifo_allocate(); + buffer = devm_iio_kfifo_allocate(&indio_dev->dev); if (!buffer) return -ENOMEM; @@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev) return 0; } -static void sca3000_unconfigure_ring(struct iio_dev *indio_dev) -{ - iio_kfifo_free(indio_dev->buffer); -} - static inline int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state) { @@ -1547,8 +1542,6 @@ static int sca3000_remove(struct spi_device *spi) if (spi->irq) free_irq(spi->irq, indio_dev); - sca3000_unconfigure_ring(indio_dev); - return 0; } diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c index 34e353c43ac8bbfbb2d1b38299b4964f94e5b115..677f812f372a264753a16b553c8e30e5cb2ed7db 100644 --- a/drivers/iio/adc/ad7791.c +++ b/drivers/iio/adc/ad7791.c @@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev, return -EINVAL; } -static const char * const ad7791_sample_freq_avail[] = { - [AD7791_FILTER_RATE_120] = "120", - [AD7791_FILTER_RATE_100] = "100", - [AD7791_FILTER_RATE_33_3] = "33.3", - [AD7791_FILTER_RATE_20] = "20", - [AD7791_FILTER_RATE_16_6] = "16.6", - [AD7791_FILTER_RATE_16_7] = "16.7", - [AD7791_FILTER_RATE_13_3] = "13.3", - [AD7791_FILTER_RATE_9_5] = "9.5", -}; - -static ssize_t ad7791_read_frequency(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7791_state *st = iio_priv(indio_dev); - unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK; - - return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]); -} - -static ssize_t ad7791_write_frequency(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct ad7791_state *st = iio_priv(indio_dev); - int i, ret; - - i = sysfs_match_string(ad7791_sample_freq_avail, buf); - if (i < 0) - return i; - - ret = iio_device_claim_direct_mode(indio_dev); - if (ret) - return ret; - st->filter &= ~AD7791_FILTER_RATE_MASK; - st->filter |= i; - ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter), - st->filter); - iio_device_release_direct_mode(indio_dev); - - return len; -} - -static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, - ad7791_read_frequency, - ad7791_write_frequency); - static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5"); static struct attribute *ad7791_attributes[] = { - &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, NULL }; diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 3726205c8704d846e0b2bbd927efd20ecae3c51f..7507cc641de34e814beed6e9e73782fd2cfd26b2 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -60,9 +60,12 @@ config INFINIBAND_ON_DEMAND_PAGING pages on demand instead. config INFINIBAND_ADDR_TRANS - bool + bool "RDMA/CM" depends on INFINIBAND default y + ---help--- + Support for RDMA communication manager (CM). + This allows for a generic connection abstraction over RDMA. config INFINIBAND_ADDR_TRANS_CONFIGFS bool diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6c725c435f5dd745df6f1805e626c16469839640..79843a3ca9dcd53d8cc4362bb72b9124ed33bd65 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -420,6 +420,8 @@ struct cma_hdr { #define CMA_VERSION 0x00 struct cma_req_info { + struct sockaddr_storage listen_addr_storage; + struct sockaddr_storage src_addr_storage; struct ib_device *device; int port; union ib_gid local_gid; @@ -898,7 +900,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; - union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -921,12 +922,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, - rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index, - &sgid, NULL); - if (ret) - goto out; - BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) @@ -1372,11 +1367,11 @@ static bool validate_net_dev(struct net_device *net_dev, } static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, - const struct cma_req_info *req) + struct cma_req_info *req) { - struct sockaddr_storage listen_addr_storage, src_addr_storage; - struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, - *src_addr = (struct sockaddr *)&src_addr_storage; + struct sockaddr *listen_addr = + (struct sockaddr *)&req->listen_addr_storage; + struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; @@ -1391,11 +1386,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, if (!net_dev) return ERR_PTR(-ENODEV); - if (!validate_net_dev(net_dev, listen_addr, src_addr)) { - dev_put(net_dev); - return ERR_PTR(-EHOSTUNREACH); - } - return net_dev; } @@ -1531,15 +1521,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + /* + * Net namespace might be getting deleted while route lookup, + * cm_id lookup is in progress. Therefore, perform netdevice + * validation, cm_id lookup under rcu lock. + * RCU lock along with netdevice state check, synchronizes with + * netdevice migrating to different net namespace and also avoids + * case where net namespace doesn't get deleted while lookup is in + * progress. + * If the device state is not IFF_UP, its properties such as ifindex + * and nd_net cannot be trusted to remain valid without rcu lock. + * net/core/dev.c change_net_namespace() ensures to synchronize with + * ongoing operations on net device after device is closed using + * synchronize_net(). + */ + rcu_read_lock(); + if (*net_dev) { + /* + * If netdevice is down, it is likely that it is administratively + * down or it might be migrating to different namespace. + * In that case avoid further processing, as the net namespace + * or ifindex may change. + */ + if (((*net_dev)->flags & IFF_UP) == 0) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + + if (!validate_net_dev(*net_dev, + (struct sockaddr *)&req.listen_addr_storage, + (struct sockaddr *)&req.src_addr_storage)) { + id_priv = ERR_PTR(-EHOSTUNREACH); + goto err; + } + } + bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); +err: + rcu_read_unlock(); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } - return id_priv; } diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 81528f64061a49df3715a8401ad4dc73f24528b9..cb0fecc958b5570326999f61cb08ba8c435e1937 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, struct sockaddr_storage *mapped_sockaddr, u8 nl_client) { - struct hlist_head *hash_bucket_head; + struct hlist_head *hash_bucket_head = NULL; struct iwpm_mapping_info *map_info; unsigned long flags; int ret = -EINVAL; @@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, } } spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); + + if (!hash_bucket_head) + kfree(map_info); return ret; } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index cb91245e91633282d3421f8d2fc5f35323674079..d8efdc191c27f0cda1a5458d523b402811c87805 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -60,7 +60,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); static struct list_head ib_mad_port_list; -static u32 ib_mad_client_id = 0; +static atomic_t ib_mad_client_id = ATOMIC_INIT(0); /* Port list lock */ static DEFINE_SPINLOCK(ib_mad_port_list_lock); @@ -378,7 +378,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, } spin_lock_irqsave(&port_priv->reg_lock, flags); - mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; + mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id); /* * Make sure MAD registration (if supplied) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 2b6c9b5160705a95d779b22aec4292904ec3b040..d76455edd2923bc36dbbc1c459cfaa3a85c38565 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->length = size; umem->address = addr; umem->page_shift = PAGE_SHIFT; - /* - * We ask for writable memory if any of the following - * access flags are set. "Local write" and "remote write" - * obviously require write access. "Remote atomic" can do - * things like fetch and add, which will modify memory, and - * "MW bind" can change permissions by binding a window. - */ - umem->writable = !!(access & - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); + umem->writable = ib_access_writable(access); if (access & IB_ACCESS_ON_DEMAND) { ret = ib_umem_odp_get(context, umem, access); diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index 5e9f72ea45790a96dee57d7eb2005f8717b3da45..5feb8bbeff18c86691a5b90e33804dc392816a70 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -191,6 +191,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met return -EINVAL; } + for (; i < method_spec->num_buckets; i++) { + struct uverbs_attr_spec_hash *attr_spec_bucket = + method_spec->attr_buckets[i]; + + if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask, + attr_spec_bucket->num_attrs)) + return -EINVAL; + } + return 0; } diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a97055dd4fbdeeefcd9be4b39deebb5939e958eb..b5fab55cc275068166369c184a759ab9a925849d 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix) static int get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) { - int ret; cpumask_var_t diff; struct hfi1_affinity_node *entry; struct cpu_mask_set *set = NULL; @@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, extra[0] = '\0'; cpumask_clear(&msix->mask); - ret = zalloc_cpumask_var(&diff, GFP_KERNEL); - if (!ret) - return -ENOMEM; - entry = node_affinity_lookup(dd->node); switch (msix->type) { @@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd, * finds its CPU here. */ if (cpu == -1 && set) { + if (!zalloc_cpumask_var(&diff, GFP_KERNEL)) + return -ENOMEM; + if (cpumask_equal(&set->mask, &set->used)) { /* * We've used up all the CPUs, bump up the generation @@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd, cpumask_andnot(diff, &set->mask, &set->used); cpu = cpumask_first(diff); cpumask_set_cpu(cpu, &set->used); + + free_cpumask_var(diff); } cpumask_set_cpu(cpu, &msix->mask); @@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd, hfi1_setup_sdma_notifier(msix); } - free_cpumask_var(diff); return 0; } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2595622826688d831f52c1ff3ce858619e3bd6b1..33cf1734c4e5d492766aa2a4e5e211a63766c11d 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6829,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) } rcvmask = HFI1_RCVCTRL_CTXT_ENB; /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ - rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ? + rcvmask |= rcd->rcvhdrtail_kvaddr ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(dd, rcvmask, rcd); hfi1_rcd_put(rcd); @@ -8341,7 +8341,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) u32 tail; int present; - if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) + if (!rcd->rcvhdrtail_kvaddr) present = (rcd->seq_cnt == rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); else /* is RDMA rtail */ @@ -11813,7 +11813,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, /* reset the tail and hdr addresses, and sequence count */ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, rcd->rcvhdrq_dma); - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) + if (rcd->rcvhdrtail_kvaddr) write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, rcd->rcvhdrqtailaddr_dma); rcd->seq_cnt = 1; @@ -11893,7 +11893,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; - if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma) + if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; if (op & HFI1_RCVCTRL_TAILUPD_DIS) { /* See comment on RcvCtxtCtrl.TailUpd above */ diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index 36ae1fd86502d0bc33f46e4e8ea600675a3e4ef3..f661b387e916ccb09e83cd29fc438ff9ac7756ec 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -1179,7 +1179,8 @@ DEBUGFS_FILE_OPS(fault_stats); static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd) { - debugfs_remove_recursive(ibd->fault_opcode->dir); + if (ibd->fault_opcode) + debugfs_remove_recursive(ibd->fault_opcode->dir); kfree(ibd->fault_opcode); ibd->fault_opcode = NULL; } @@ -1207,6 +1208,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd) &ibd->fault_opcode->attr); if (IS_ERR(ibd->fault_opcode->dir)) { kfree(ibd->fault_opcode); + ibd->fault_opcode = NULL; return -ENOENT; } @@ -1230,7 +1232,8 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd) static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd) { - debugfs_remove_recursive(ibd->fault_packet->dir); + if (ibd->fault_packet) + debugfs_remove_recursive(ibd->fault_packet->dir); kfree(ibd->fault_packet); ibd->fault_packet = NULL; } @@ -1256,6 +1259,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd) &ibd->fault_opcode->attr); if (IS_ERR(ibd->fault_packet->dir)) { kfree(ibd->fault_packet); + ibd->fault_packet = NULL; return -ENOENT; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index ee2253d069844c437e58225888744d29914e3954..9abc5a9c47a0d44790764d82bd8cea89073708dd 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -622,7 +622,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) ret = -EINVAL; goto done; } - if (flags & VM_WRITE) { + if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) { ret = -EPERM; goto done; } @@ -807,8 +807,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) * checks to default and disable the send context. */ if (uctxt->sc) { - set_pio_integrity(uctxt->sc); sc_disable(uctxt->sc); + set_pio_integrity(uctxt->sc); } hfi1_free_ctxt_rcv_groups(uctxt); diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index ccc7b9b8637ed4eaa1c27c706dafceb3a1307b28..13a7bcaa58e692e18c66b71cf3f9496c49b94753 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1851,6 +1851,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) #define HFI1_HAS_SDMA_TIMEOUT 0x8 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ +#define HFI1_SHUTDOWN 0x100 /* device is shutting down */ /* IB dword length mask in PBC (lower 11 bits); same for all chips */ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index d5c6ff843fc64b96b2d823f8de915d48797d2c9c..ee5cbdfeb3ab62396674e91d87f3ef8cdc006780 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -88,9 +88,9 @@ * pio buffers per ctxt, etc.) Zero means use one user context per CPU. */ int num_user_contexts = -1; -module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); +module_param_named(num_user_contexts, num_user_contexts, int, 0444); MODULE_PARM_DESC( - num_user_contexts, "Set max number of user contexts to use"); + num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)"); uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; @@ -1029,6 +1029,10 @@ static void shutdown_device(struct hfi1_devdata *dd) unsigned pidx; int i; + if (dd->flags & HFI1_SHUTDOWN) + return; + dd->flags |= HFI1_SHUTDOWN; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; @@ -1353,6 +1357,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd) static void remove_one(struct pci_dev *); static int init_one(struct pci_dev *, const struct pci_device_id *); +static void shutdown_one(struct pci_dev *); #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " #define PFX DRIVER_NAME ": " @@ -1369,6 +1374,7 @@ static struct pci_driver hfi1_pci_driver = { .name = DRIVER_NAME, .probe = init_one, .remove = remove_one, + .shutdown = shutdown_one, .id_table = hfi1_pci_tbl, .err_handler = &hfi1_pci_err_handler, }; @@ -1780,6 +1786,13 @@ static void remove_one(struct pci_dev *pdev) postinit_cleanup(dd); } +static void shutdown_one(struct pci_dev *pdev) +{ + struct hfi1_devdata *dd = pci_get_drvdata(pdev); + + shutdown_device(dd); +} + /** * hfi1_create_rcvhdrq - create a receive header queue * @dd: the hfi1_ib device @@ -1795,7 +1808,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) u64 reg; if (!rcd->rcvhdrq) { - dma_addr_t dma_hdrqtail; gfp_t gfp_flags; /* @@ -1821,13 +1833,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) goto bail; } - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { + if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || + HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( - &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, - gfp_flags); + &dd->pcidev->dev, PAGE_SIZE, + &rcd->rcvhdrqtailaddr_dma, gfp_flags); if (!rcd->rcvhdrtail_kvaddr) goto bail_free; - rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; } rcd->rcvhdrq_size = amt; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94cdef45f27722847065388a36a6f85..a95ac62465592080ea60a2db31416e2b7b335288 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -50,8 +50,6 @@ #include "qp.h" #include "trace.h" -#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */ - #define SC(name) SEND_CTXT_##name /* * Send Context functions @@ -977,15 +975,40 @@ void sc_disable(struct send_context *sc) } /* return SendEgressCtxtStatus.PacketOccupancy */ -#define packet_occupancy(r) \ - (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\ - >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT) +static u64 packet_occupancy(u64 reg) +{ + return (reg & + SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK) + >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT; +} /* is egress halted on the context? */ -#define egress_halted(r) \ - ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK) +static bool egress_halted(u64 reg) +{ + return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK); +} -/* wait for packet egress, optionally pause for credit return */ +/* is the send context halted? */ +static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) +{ + return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & + SC(STATUS_CTXT_HALTED_SMASK)); +} + +/** + * sc_wait_for_packet_egress + * @sc: valid send context + * @pause: wait for credit return + * + * Wait for packet egress, optionally pause for credit return + * + * Egress halt and Context halt are not necessarily the same thing, so + * check for both. + * + * NOTE: The context halt bit may not be set immediately. Because of this, + * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW + * context bit to determine if the context is halted. + */ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) { struct hfi1_devdata *dd = sc->dd; @@ -997,8 +1020,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) reg_prev = reg; reg = read_csr(dd, sc->hw_context * 8 + SEND_EGRESS_CTXT_STATUS); - /* done if egress is stopped */ - if (egress_halted(reg)) + /* done if any halt bits, SW or HW are set */ + if (sc->flags & SCF_HALTED || + is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) break; reg = packet_occupancy(reg); if (reg == 0) diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 0793a21d76f4c5bf82526e5cbc4ec5e5606f90b5..d604b3d5aa3e4a7d21fc22a9a669b6fd299ec8f7 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) "buf:%lld\n", wc.wr_id); break; default: - BUG_ON(1); break; } } else { diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index e80a7f764a7401d8cc3983d3f9974a98ad185d5c..1587cedee13e2df7a1515700d6eaabdce862bccc 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -131,6 +131,40 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, return err; } +static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start, + u64 length, u64 virt_addr, + int access_flags) +{ + /* + * Force registering the memory as writable if the underlying pages + * are writable. This is so rereg can change the access permissions + * from readable to writable without having to run through ib_umem_get + * again + */ + if (!ib_access_writable(access_flags)) { + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_sem); + /* + * FIXME: Ideally this would iterate over all the vmas that + * cover the memory, but for now it requires a single vma to + * entirely cover the MR to support RO mappings. + */ + vma = find_vma(current->mm, start); + if (vma && vma->vm_end >= start + length && + vma->vm_start <= start) { + if (vma->vm_flags & VM_WRITE) + access_flags |= IB_ACCESS_LOCAL_WRITE; + } else { + access_flags |= IB_ACCESS_LOCAL_WRITE; + } + + up_read(¤t->mm->mmap_sem); + } + + return ib_umem_get(context, start, length, access_flags, 0); +} + struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) @@ -145,10 +179,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - /* Force registering the memory as writable. */ - /* Used for memory re-registeration. HCA protects the access */ - mr->umem = ib_umem_get(pd->uobject->context, start, length, - access_flags | IB_ACCESS_LOCAL_WRITE, 0); + mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, + virt_addr, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; @@ -215,6 +247,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, } if (flags & IB_MR_REREG_ACCESS) { + if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) + return -EPERM; + err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); @@ -228,10 +263,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); - mmr->umem = ib_umem_get(mr->uobject->context, start, length, - mr_access_flags | - IB_ACCESS_LOCAL_WRITE, - 0); + mmr->umem = + mlx4_get_umem_mr(mr->uobject->context, start, length, + virt_addr, mr_access_flags); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index d804880d637a418ced9c798d025708cebd8ad7b5..be6612fc33acd813704d8fa183460f0416202c3a 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -646,7 +646,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, } static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, - struct ib_wc *wc) + struct ib_wc *wc, bool is_fatal_err) { struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); struct mlx5_ib_wc *soft_wc, *next; @@ -659,6 +659,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", cq->mcq.cqn); + if (unlikely(is_fatal_err)) { + soft_wc->wc.status = IB_WC_WR_FLUSH_ERR; + soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR; + } wc[npolled++] = soft_wc->wc; list_del(&soft_wc->list); kfree(soft_wc); @@ -679,12 +683,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_lock_irqsave(&cq->lock, flags); if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled); + /* make sure no soft wqe's are waiting */ + if (unlikely(!list_empty(&cq->wc_list))) + soft_polled = poll_soft_wc(cq, num_entries, wc, true); + + mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, + wc + soft_polled, &npolled); goto out; } if (unlikely(!list_empty(&cq->wc_list))) - soft_polled = poll_soft_wc(cq, num_entries, wc); + soft_polled = poll_soft_wc(cq, num_entries, wc, false); for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index f9e1c69603a5626c5f091bd687c0947e9e67d9ac..1dda4a2623c9a9126a5ca951286b83274382be77 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1250,6 +1250,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port) #define QIB_BADINTR 0x8000 /* severe interrupt problems */ #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */ #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */ +#define QIB_SHUTDOWN 0x40000 /* device is shutting down */ /* * values for ppd->lflags (_ib_port_ related flags) @@ -1448,8 +1449,7 @@ u64 qib_sps_ints(void); /* * dma_addr wrappers - all 0's invalid for hw */ -dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, - size_t, int); +int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr); const char *qib_get_unit_name(int unit); const char *qib_get_card_name(struct rvt_dev_info *rdi); struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9396c1807cc3ec5d023f1bb1ce60a2d359da0a2f..40efc9151ec4947acf24876f9eac1ad2a81a80e1 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, goto done; } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { + dma_addr_t daddr; + for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; @@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, ret = -ENOMEM; break; } + ret = qib_map_page(dd->pcidev, pagep[i], &daddr); + if (ret) + break; + tidlist[i] = tid + tidoff; /* we "know" system pages and TID pages are same size */ dd->pageshadow[ctxttid + tid] = pagep[i]; - dd->physshadow[ctxttid + tid] = - qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + dd->physshadow[ctxttid + tid] = daddr; /* * don't need atomic or it's overhead */ diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index c5a4c65636d635701d7c3df99c70679eedb72796..7ba7d2122f3b30e31135ba5d4d9361c92190b3c6 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -850,6 +850,10 @@ static void qib_shutdown_device(struct qib_devdata *dd) struct qib_pportdata *ppd; unsigned pidx; + if (dd->flags & QIB_SHUTDOWN) + return; + dd->flags |= QIB_SHUTDOWN; + for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; @@ -1189,6 +1193,7 @@ void qib_disable_after_error(struct qib_devdata *dd) static void qib_remove_one(struct pci_dev *); static int qib_init_one(struct pci_dev *, const struct pci_device_id *); +static void qib_shutdown_one(struct pci_dev *); #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " #define PFX QIB_DRV_NAME ": " @@ -1206,6 +1211,7 @@ static struct pci_driver qib_driver = { .name = QIB_DRV_NAME, .probe = qib_init_one, .remove = qib_remove_one, + .shutdown = qib_shutdown_one, .id_table = qib_pci_tbl, .err_handler = &qib_pci_err_handler, }; @@ -1556,6 +1562,13 @@ static void qib_remove_one(struct pci_dev *pdev) qib_postinit_cleanup(dd); } +static void qib_shutdown_one(struct pci_dev *pdev) +{ + struct qib_devdata *dd = pci_get_drvdata(pdev); + + qib_shutdown_device(dd); +} + /** * qib_create_rcvhdrq - create a receive header queue * @dd: the qlogic_ib device diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index ce83ba9a12eff6d44749dcde94dd85449d85d27b..16543d5e80c3ab8f53222c6d9d2765df400fbb07 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -99,23 +99,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, * * I'm sure we won't be so lucky with other iommu's, so FIXME. */ -dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) +int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr) { dma_addr_t phys; - phys = pci_map_page(hwdev, page, offset, size, direction); + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(hwdev, phys)) + return -ENOMEM; - if (phys == 0) { - pci_unmap_page(hwdev, phys, size, direction); - phys = pci_map_page(hwdev, page, offset, size, direction); + if (!phys) { + pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE); + phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(hwdev, phys)) + return -ENOMEM; /* * FIXME: If we get 0 again, we should keep this page, * map another, then free the 0 page. */ } - - return phys; + *daddr = phys; + return 0; } /** diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 88fa4d44ab5fbe8e504301b6990625bf4990edb7..76a86f805233475671e8f0c01717d1319334db5f 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -121,17 +121,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { + struct kthread_worker *worker; + /* * This will cause send_complete() to be called in * another thread. */ - spin_lock(&cq->rdi->n_cqs_lock); - if (likely(cq->rdi->worker)) { + rcu_read_lock(); + worker = rcu_dereference(cq->rdi->worker); + if (likely(worker)) { cq->notify = RVT_CQ_NONE; cq->triggered++; - kthread_queue_work(cq->rdi->worker, &cq->comptask); + kthread_queue_work(worker, &cq->comptask); } - spin_unlock(&cq->rdi->n_cqs_lock); + rcu_read_unlock(); } spin_unlock_irqrestore(&cq->lock, flags); @@ -513,7 +516,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) int cpu; struct kthread_worker *worker; - if (rdi->worker) + if (rcu_access_pointer(rdi->worker)) return 0; spin_lock_init(&rdi->n_cqs_lock); @@ -525,7 +528,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) return PTR_ERR(worker); set_user_nice(worker->task, MIN_NICE); - rdi->worker = worker; + RCU_INIT_POINTER(rdi->worker, worker); return 0; } @@ -537,15 +540,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi) { struct kthread_worker *worker; - /* block future queuing from send_complete() */ - spin_lock_irq(&rdi->n_cqs_lock); - worker = rdi->worker; + if (!rcu_access_pointer(rdi->worker)) + return; + + spin_lock(&rdi->n_cqs_lock); + worker = rcu_dereference_protected(rdi->worker, + lockdep_is_held(&rdi->n_cqs_lock)); if (!worker) { - spin_unlock_irq(&rdi->n_cqs_lock); + spin_unlock(&rdi->n_cqs_lock); return; } - rdi->worker = NULL; - spin_unlock_irq(&rdi->n_cqs_lock); + RCU_INIT_POINTER(rdi->worker, NULL); + spin_unlock(&rdi->n_cqs_lock); + synchronize_rcu(); kthread_destroy_worker(worker); } diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 61927c165b598af92e572bbc9ce9bbad2b6ecb32..4cf11063e0b597bee02532a52ea648d0a26c2eac 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK, + | RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 44b838ec9420dc6aabc0a52a9277bdd1840c41a1..54cc9cb1e3b765b848782ad9779225a1dde7e1b6 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -728,7 +728,6 @@ int rxe_requester(void *arg) rollback_state(wqe, qp, &rollback_wqe, rollback_psn); if (ret == -EAGAIN) { - kfree_skb(skb); rxe_run_task(&qp->req.task, 1); goto exit; } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 01f926fd90296e7bbbfcf9da191299bcf39e5146..bd43c1c7a42fdb3938d04b8bfefa7fcd63550942 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp, err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); if (err) { pr_err("Failed sending RDMA reply.\n"); - kfree_skb(skb); return RESPST_ERR_RNR; } @@ -955,10 +954,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, } err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); - if (err) { + if (err) pr_err_ratelimited("Failed sending ack\n"); - kfree_skb(skb); - } err1: return err; @@ -1151,7 +1148,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp, if (rc) { pr_err("Failed resending result. This flow is not handled - skb ignored\n"); rxe_drop_ref(qp); - kfree_skb(skb_copy); rc = RESPST_CLEANUP; goto out; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index e770c17cbca9201cc66d4dc2695e5836cb1290f2..ee3f630c92179dac8d4a7351bd43e459d9d72fc2 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -885,15 +885,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des } static void -isert_create_send_desc(struct isert_conn *isert_conn, - struct isert_cmd *isert_cmd, - struct iser_tx_desc *tx_desc) +__isert_create_send_desc(struct isert_device *device, + struct iser_tx_desc *tx_desc) { - struct isert_device *device = isert_conn->device; - struct ib_device *ib_dev = device->ib_device; - - ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, - ISER_HEADERS_LEN, DMA_TO_DEVICE); memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl)); tx_desc->iser_header.flags = ISCSI_CTRL; @@ -906,6 +900,20 @@ isert_create_send_desc(struct isert_conn *isert_conn, } } +static void +isert_create_send_desc(struct isert_conn *isert_conn, + struct isert_cmd *isert_cmd, + struct iser_tx_desc *tx_desc) +{ + struct isert_device *device = isert_conn->device; + struct ib_device *ib_dev = device->ib_device; + + ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + + __isert_create_send_desc(device, tx_desc); +} + static int isert_init_tx_hdrs(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc) @@ -993,7 +1001,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc; int ret; - isert_create_send_desc(isert_conn, NULL, tx_desc); + __isert_create_send_desc(device, tx_desc); memcpy(&tx_desc->iscsi_header, &login->rsp[0], sizeof(struct iscsi_hdr)); @@ -2108,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs) sig_attrs->check_mask = (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) | - (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) | + (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) | (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0); return 0; } diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig index c74ee9633041d7a566dc1c2d9febf676c5bd744d..99db8fe5173af0e96551082031889fa9a7419933 100644 --- a/drivers/infiniband/ulp/srp/Kconfig +++ b/drivers/infiniband/ulp/srp/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRP tristate "InfiniBand SCSI RDMA Protocol" - depends on SCSI + depends on SCSI && INFINIBAND_ADDR_TRANS select SCSI_SRP_ATTRS ---help--- Support for the SCSI RDMA Protocol over InfiniBand. This diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 31ee83d528d9b6d0bf5716b8d940f1c1b63cbf3a..fb8b7182f05ebd7413058d54e8be7fca974dcc44 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRPT tristate "InfiniBand SCSI RDMA Protocol target support" - depends on INFINIBAND && TARGET_CORE + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE ---help--- Support for the SCSI RDMA Protocol (SRP) Target driver. The diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d88d3e0f59fb832cd569330a2899cf9647a08945..53f775c41cd1be848254e30b38f694468d88dd37 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -126,6 +126,7 @@ static const struct xpad_device { u8 mapping; u8 xtype; } xpad_device[] = { + { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, @@ -411,6 +412,7 @@ static const signed short xpad_abs_triggers[] = { static const struct usb_device_id xpad_table[] = { { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ + XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ diff --git a/drivers/input/misc/qti-haptics.c b/drivers/input/misc/qti-haptics.c index c5be6b1a68f621d35c101f85bc6ae538038f9762..d63ced7937191cd96477753a4f00b5aeddb5e8c5 100644 --- a/drivers/input/misc/qti-haptics.c +++ b/drivers/input/misc/qti-haptics.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -176,6 +177,10 @@ struct qti_hap_effect { u16 play_rate_us; u8 wf_repeat_n; u8 wf_s_repeat_n; + u8 brake[HAP_BRAKE_PATTERN_MAX]; + int brake_pattern_length; + bool brake_en; + bool lra_auto_res_disable; }; struct qti_hap_play_info { @@ -194,9 +199,7 @@ struct qti_hap_config { u16 vmax_mv; u16 ilim_ma; u16 play_rate_us; - u8 brake[HAP_BRAKE_PATTERN_MAX]; - bool brake_en; - bool lra_auto_res_en; + bool lra_allow_variable_play_rate; bool use_ext_wf_src; }; @@ -211,6 +214,7 @@ struct qti_hap_chip { struct qti_hap_effect *predefined; struct qti_hap_effect constant; struct regulator *vdd_supply; + struct hrtimer stop_timer; spinlock_t bus_lock; ktime_t last_sc_time; int play_irq; @@ -550,6 +554,21 @@ static int qti_haptics_config_brake(struct qti_hap_chip *chip, u8 *brake) return rc; } +static int qti_haptics_lra_auto_res_enable(struct qti_hap_chip *chip, bool en) +{ + int rc; + u8 addr, val, mask; + + addr = REG_HAP_AUTO_RES_CTRL; + mask = HAP_AUTO_RES_EN_BIT; + val = en ? HAP_AUTO_RES_EN_BIT : 0; + rc = qti_haptics_masked_write(chip, addr, mask, val); + if (rc < 0) + dev_err(chip->dev, "set AUTO_RES_CTRL failed, rc=%d\n", rc); + + return rc; +} + static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip) { struct qti_hap_play_info *play = &chip->play; @@ -571,6 +590,13 @@ static int qti_haptics_load_constant_waveform(struct qti_hap_chip *chip) if (rc < 0) return rc; + /* Enable Auto-Resonance when VMAX wf-src is selected */ + if (config->act_type == ACT_LRA) { + rc = qti_haptics_lra_auto_res_enable(chip, true); + if (rc < 0) + return rc; + } + /* Set WF_SOURCE to VMAX */ rc = qti_haptics_config_wf_src(chip, INT_WF_VMAX); if (rc < 0) @@ -626,14 +652,22 @@ static int qti_haptics_load_predefined_effect(struct qti_hap_chip *chip, if (rc < 0) return rc; - /* override play-rate for ERM here, no need for LRA */ - if (config->act_type == ACT_ERM) { - rc = qti_haptics_config_play_rate_us(chip, - play->effect->play_rate_us); + rc = qti_haptics_config_play_rate_us(chip, play->effect->play_rate_us); + if (rc < 0) + return rc; + + if (config->act_type == ACT_LRA) { + rc = qti_haptics_lra_auto_res_enable(chip, + !play->effect->lra_auto_res_disable); if (rc < 0) return rc; } + /* Set brake pattern in the effect */ + rc = qti_haptics_config_brake(chip, play->effect->brake); + if (rc < 0) + return rc; + rc = qti_haptics_config_wf_buffer(chip); if (rc < 0) return rc; @@ -736,16 +770,14 @@ static irqreturn_t qti_haptics_sc_irq_handler(int irq, void *data) static inline void get_play_length(struct qti_hap_play_info *play, int *length_us) { - struct qti_hap_chip *chip = container_of(play, - struct qti_hap_chip, play); struct qti_hap_effect *effect = play->effect; int tmp; tmp = effect->pattern_length * effect->play_rate_us; tmp *= wf_s_repeat[effect->wf_s_repeat_n]; tmp *= wf_repeat[effect->wf_repeat_n]; - if (chip->config.brake_en) - tmp += effect->play_rate_us * HAP_BRAKE_PATTERN_MAX; + if (effect->brake_en) + tmp += effect->play_rate_us * effect->brake_pattern_length; *length_us = tmp; } @@ -871,6 +903,8 @@ static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) { struct qti_hap_chip *chip = input_get_drvdata(dev); struct qti_hap_play_info *play = &chip->play; + s64 secs; + unsigned long nsecs; int rc = 0; dev_dbg(chip->dev, "playback, val = %d\n", val); @@ -889,6 +923,11 @@ static int qti_haptics_playback(struct input_dev *dev, int effect_id, int val) disable_irq_nosync(chip->play_irq); chip->play_irq_en = false; } + secs = play->length_us / USEC_PER_SEC; + nsecs = (play->length_us % USEC_PER_SEC) * + NSEC_PER_USEC; + hrtimer_start(&chip->stop_timer, ktime_set(secs, nsecs), + HRTIMER_MODE_REL); } } else { play->length_us = 0; @@ -954,9 +993,8 @@ static int qti_haptics_hw_init(struct qti_hap_chip *chip) /* Set HAP_EN_CTL3 */ addr = REG_HAP_EN_CTL3; val = HAP_HBRIDGE_EN_BIT | HAP_PWM_SIGNAL_EN_BIT | HAP_ILIM_EN_BIT | - HAP_ILIM_CC_EN_BIT | HAP_DAC_EN_BIT | HAP_PWM_CTL_EN_BIT; - if (config->act_type == ACT_LRA && config->lra_auto_res_en) - val |= HAP_AUTO_RES_RBIAS_EN_BIT; + HAP_ILIM_CC_EN_BIT | HAP_AUTO_RES_RBIAS_EN_BIT | + HAP_DAC_EN_BIT | HAP_PWM_CTL_EN_BIT; rc = qti_haptics_write(chip, addr, &val, 1); if (rc < 0) { dev_err(chip->dev, "set EN_CTL3 failed, rc=%d\n", rc); @@ -981,11 +1019,6 @@ static int qti_haptics_hw_init(struct qti_hap_chip *chip) if (rc < 0) return rc; - /* Set default brake pattern */ - rc = qti_haptics_config_brake(chip, config->brake); - if (rc < 0) - return rc; - /* Set external waveform source if it's used */ if (config->use_ext_wf_src) { rc = qti_haptics_config_wf_src(chip, config->ext_src); @@ -1008,10 +1041,6 @@ static int qti_haptics_hw_init(struct qti_hap_chip *chip) return rc; } - /* Skip configurations below if auto-res-en is not set */ - if (!config->lra_auto_res_en) - return 0; - addr = REG_HAP_AUTO_RES_CFG; mask = HAP_AUTO_RES_MODE_BIT | HAP_CAL_EOP_EN_BIT | HAP_CAL_PERIOD_MASK; val = config->lra_auto_res_mode << HAP_AUTO_RES_MODE_SHIFT; @@ -1023,8 +1052,9 @@ static int qti_haptics_hw_init(struct qti_hap_chip *chip) } addr = REG_HAP_AUTO_RES_CTRL; - val = HAP_AUTO_RES_EN_BIT | HAP_SEL_AUTO_RES_PERIOD | AUTO_RES_EN_DLY(4) - | AUTO_RES_CNT_ERR_DELTA(2) | HAP_AUTO_RES_ERR_RECOVERY_BIT; + val = HAP_AUTO_RES_EN_BIT | HAP_SEL_AUTO_RES_PERIOD | + AUTO_RES_CNT_ERR_DELTA(2) | HAP_AUTO_RES_ERR_RECOVERY_BIT | + AUTO_RES_EN_DLY(4); rc = qti_haptics_write(chip, addr, &val, 1); if (rc < 0) { dev_err(chip->dev, "set AUTO_RES_CTRL failed, rc=%d\n", @@ -1035,6 +1065,26 @@ static int qti_haptics_hw_init(struct qti_hap_chip *chip) return 0; } +static enum hrtimer_restart qti_hap_stop_timer(struct hrtimer *timer) +{ + struct qti_hap_chip *chip = container_of(timer, struct qti_hap_chip, + stop_timer); + int rc; + + chip->play.length_us = 0; + rc = qti_haptics_play(chip, false); + if (rc < 0) { + dev_err(chip->dev, "Stop playing failed, rc=%d\n", rc); + goto err_out; + } + + rc = qti_haptics_module_en(chip, false); + if (rc < 0) + dev_err(chip->dev, "Disable module failed, rc=%d\n", rc); +err_out: + return HRTIMER_NORESTART; +} + static int qti_haptics_parse_dt(struct qti_hap_chip *chip) { struct qti_hap_config *config = &chip->config; @@ -1096,30 +1146,6 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) config->play_rate_us = (tmp >= HAP_PLAY_RATE_US_MAX) ? HAP_PLAY_RATE_US_MAX : tmp; - tmp = of_property_count_elems_of_size(node, "qcom,brake-pattern", - sizeof(u8)); - if (tmp > 0) { - if (tmp != HAP_BRAKE_PATTERN_MAX) { - dev_err(chip->dev, "brake-pattern should be %d bytes\n", - HAP_BRAKE_PATTERN_MAX); - return -EINVAL; - } - - rc = of_property_read_u8_array(node, "qcom,brake-pattern", - config->brake, HAP_BRAKE_PATTERN_MAX); - if (rc < 0) { - dev_err(chip->dev, "Failed to get brake-pattern, rc=%d\n", - rc); - return rc; - } - - for (val = 0, j = 0; j < HAP_BRAKE_PATTERN_MAX; j++) - val |= (config->brake[j] & HAP_BRAKE_PATTERN_MASK) << - j * HAP_BRAKE_PATTERN_SHIFT; - - config->brake_en = (val != 0); - } - if (of_find_property(node, "qcom,external-waveform-source", NULL)) { if (!of_property_read_string(node, "qcom,external-waveform-source", &str)) { @@ -1162,8 +1188,8 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) } } - config->lra_auto_res_en = of_property_read_bool(node, - "qcom,lra-auto-resonance-en"); + config->lra_allow_variable_play_rate = of_property_read_bool( + node, "qcom,lra-allow-variable-play-rate"); config->lra_auto_res_mode = AUTO_RES_MODE_ZXD; rc = of_property_read_string(node, @@ -1233,10 +1259,6 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) return rc; } - for (j = 0; j < effect->pattern_length; j++) - effect->pattern[j] = effect->pattern[j] << - HAP_WF_AMP_SHIFT; - effect->play_rate_us = config->play_rate_us; rc = of_property_read_u32(child_node, "qcom,wf-play-rate-us", &tmp); @@ -1247,6 +1269,7 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) effect->play_rate_us = tmp; if (config->act_type == ACT_LRA && + !config->lra_allow_variable_play_rate && config->play_rate_us != effect->play_rate_us) { dev_warn(chip->dev, "play rate should match with LRA resonance frequency\n"); effect->play_rate_us = config->play_rate_us; @@ -1277,6 +1300,41 @@ static int qti_haptics_parse_dt(struct qti_hap_chip *chip) effect->wf_s_repeat_n = j; } + + effect->lra_auto_res_disable = of_property_read_bool(node, + "qcom,lra-auto-resonance-disable"); + + tmp = of_property_count_elems_of_size(child_node, + "qcom,wf-brake-pattern", sizeof(u8)); + if (tmp <= 0) + continue; + + if (tmp > HAP_BRAKE_PATTERN_MAX) { + dev_err(chip->dev, "wf-brake-pattern shouldn't be more than %d bytes\n", + HAP_BRAKE_PATTERN_MAX); + return -EINVAL; + } + + rc = of_property_read_u8_array(child_node, + "qcom,wf-brake-pattern", effect->brake, tmp); + if (rc < 0) { + dev_err(chip->dev, "Failed to get wf-brake-pattern, rc=%d\n", + rc); + return rc; + } + + effect->brake_pattern_length = tmp; + for (j = tmp - 1; j >= 0; j--) { + if (effect->brake[j] != 0) + break; + effect->brake_pattern_length--; + } + + for (val = 0, j = 0; j < effect->brake_pattern_length; j++) + val |= (effect->brake[j] & HAP_BRAKE_PATTERN_MASK) + << j * HAP_BRAKE_PATTERN_SHIFT; + + effect->brake_en = (val != 0); } return 0; @@ -1338,6 +1396,9 @@ static int qti_haptics_probe(struct platform_device *pdev) return rc; } + hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + chip->stop_timer.function = qti_hap_stop_timer; + input_dev->name = "vibrator"; input_set_drvdata(input_dev, chip); chip->input_dev = input_dev; diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 599544c1a91cd365261b6ca2ec4e4f3149b0a63d..243e0fa6e3e3cb44ce22adc6e76421fda79f4ff2 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -27,6 +27,8 @@ #define ETP_DISABLE_POWER 0x0001 #define ETP_PRESSURE_OFFSET 25 +#define ETP_CALIBRATE_MAX_LEN 3 + /* IAP Firmware handling */ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index d6135900da649680e9e23cd924541f63a3423fa7..7b5fa501bbcf6651886cb16a2bcdbede026f382e 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev, int tries = 20; int retval; int error; - u8 val[3]; + u8 val[ETP_CALIBRATE_MAX_LEN]; retval = mutex_lock_interruptible(&data->sysfs_mutex); if (retval) @@ -1260,6 +1260,8 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN060B", 0 }, { "ELAN060C", 0 }, { "ELAN0611", 0 }, + { "ELAN0612", 0 }, + { "ELAN0618", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index cfcb32559925baf1acf070f908f3b91b1fc1b905..c060d270bc4d862ad7366bd87529dbdc032672b6 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -56,7 +56,7 @@ static int elan_smbus_initialize(struct i2c_client *client) { u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; - u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; + u8 values[I2C_SMBUS_BLOCK_MAX] = {0}; int len, error; /* Get hello packet */ @@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client) static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) { int error; + u8 buf[I2C_SMBUS_BLOCK_MAX] = {0}; + + BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf)); error = i2c_smbus_read_block_data(client, - ETP_SMBUS_CALIBRATE_QUERY, val); + ETP_SMBUS_CALIBRATE_QUERY, buf); if (error < 0) return error; + memcpy(val, buf, ETP_CALIBRATE_MAX_LEN); return 0; } @@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report) { int len; + BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN); + len = i2c_smbus_read_block_data(client, ETP_SMBUS_PACKET_QUERY, &report[ETP_SMBUS_REPORT_OFFSET]); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index a4aaa748e987f7e4db918e290d20d356f9eec21b..a250f433eb968b236ae2ddf99cf45f69d7f17fef 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) else if (ic_version == 7 && etd->samples[1] == 0x2A) sanity_check = ((packet[3] & 0x1c) == 0x10); else - sanity_check = ((packet[0] & 0x0c) == 0x04 && + sanity_check = ((packet[0] & 0x08) == 0x00 && (packet[3] & 0x1c) == 0x10); if (!sanity_check) @@ -1177,6 +1177,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { { } }; +static const char * const middle_button_pnp_ids[] = { + "LEN2131", /* ThinkPad P52 w/ NFC */ + "LEN2132", /* ThinkPad P52 */ + NULL +}; + /* * Set the appropriate event bits for the input subsystem */ @@ -1196,7 +1202,8 @@ static int elantech_set_input_params(struct psmouse *psmouse) __clear_bit(EV_REL, dev->evbit); __set_bit(BTN_LEFT, dev->keybit); - if (dmi_check_system(elantech_dmi_has_middle_button)) + if (dmi_check_system(elantech_dmi_has_middle_button) || + psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids)) __set_bit(BTN_MIDDLE, dev->keybit); __set_bit(BTN_RIGHT, dev->keybit); diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c index d97a85907ed6651bdfa6de359e75c8bd48a3ab3f..d0c3d275bf9f4f79d0249a9f27f7380943b45706 100644 --- a/drivers/input/rmi4/rmi_spi.c +++ b/drivers/input/rmi4/rmi_spi.c @@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi, if (len > RMI_SPI_XFER_SIZE_LIMIT) return -EINVAL; - if (rmi_spi->xfer_buf_size < len) - rmi_spi_manage_pools(rmi_spi, len); + if (rmi_spi->xfer_buf_size < len) { + ret = rmi_spi_manage_pools(rmi_spi, len); + if (ret < 0) + return ret; + } if (addr == 0) /* diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 64f25cf45382978272c983dd755b6458dd6e5ac9..721dd2da49678c894afed7e8420052baf7f109a7 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -1256,4 +1256,16 @@ config TOUCHSCREEN_ST source "drivers/input/touchscreen/st/Kconfig" +config TOUCHSCREEN_HIMAX_CHIPSET + bool "Himax touchpanel CHIPSET" + depends on I2C + help + Say Y here if you have a Himax CHIPSET touchscreen. + HIMAX controllers are multi touch controllers which can + report 10 touches at a time. + + If unsure, say N. + +source "drivers/input/touchscreen/hxchipset/Kconfig" + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 2809279cbcbfff484a000dc3b711ccf0935277ac..b4bd108ff616d7911eaba0ef4efe6930062e3f04 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o obj-$(CONFIG_TOUCHSCREEN_ST) += st/ +obj-$(CONFIG_TOUCHSCREEN_HIMAX_CHIPSET)» += hxchipset/ \ No newline at end of file diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 429b694405c7d4b5850ff91c5b8bef3ecbf817b7..fc149ea64be795f92616a920dd9ee492291bab74 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -275,7 +275,8 @@ struct mxt_data { char phys[64]; /* device physical location */ const struct mxt_platform_data *pdata; struct mxt_object *object_table; - struct mxt_info info; + struct mxt_info *info; + void *raw_info_block; unsigned int irq; unsigned int max_x; unsigned int max_y; @@ -450,12 +451,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry) { u8 appmode = data->client->addr; u8 bootloader; + u8 family_id = data->info ? data->info->family_id : 0; switch (appmode) { case 0x4a: case 0x4b: /* Chips after 1664S use different scheme */ - if (retry || data->info.family_id >= 0xa2) { + if (retry || family_id >= 0xa2) { bootloader = appmode - 0x24; break; } @@ -682,7 +684,7 @@ mxt_get_object(struct mxt_data *data, u8 type) struct mxt_object *object; int i; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (object->type == type) return object; @@ -1453,12 +1455,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) data_pos += offset; } - if (cfg_info.family_id != data->info.family_id) { + if (cfg_info.family_id != data->info->family_id) { dev_err(dev, "Family ID mismatch!\n"); return -EINVAL; } - if (cfg_info.variant_id != data->info.variant_id) { + if (cfg_info.variant_id != data->info->variant_id) { dev_err(dev, "Variant ID mismatch!\n"); return -EINVAL; } @@ -1503,7 +1505,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) /* Malloc memory to store configuration */ cfg_start_ofs = MXT_OBJECT_START + - data->info.object_num * sizeof(struct mxt_object) + + data->info->object_num * sizeof(struct mxt_object) + MXT_INFO_CHECKSUM_SIZE; config_mem_size = data->mem_size - cfg_start_ofs; config_mem = kzalloc(config_mem_size, GFP_KERNEL); @@ -1554,20 +1556,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) return ret; } -static int mxt_get_info(struct mxt_data *data) -{ - struct i2c_client *client = data->client; - struct mxt_info *info = &data->info; - int error; - - /* Read 7-byte info block starting at address 0 */ - error = __mxt_read_reg(client, 0, sizeof(*info), info); - if (error) - return error; - - return 0; -} - static void mxt_free_input_device(struct mxt_data *data) { if (data->input_dev) { @@ -1582,9 +1570,10 @@ static void mxt_free_object_table(struct mxt_data *data) video_unregister_device(&data->dbg.vdev); v4l2_device_unregister(&data->dbg.v4l2); #endif - - kfree(data->object_table); data->object_table = NULL; + data->info = NULL; + kfree(data->raw_info_block); + data->raw_info_block = NULL; kfree(data->msg_buf); data->msg_buf = NULL; data->T5_address = 0; @@ -1600,34 +1589,18 @@ static void mxt_free_object_table(struct mxt_data *data) data->max_reportid = 0; } -static int mxt_get_object_table(struct mxt_data *data) +static int mxt_parse_object_table(struct mxt_data *data, + struct mxt_object *object_table) { struct i2c_client *client = data->client; - size_t table_size; - struct mxt_object *object_table; - int error; int i; u8 reportid; u16 end_address; - table_size = data->info.object_num * sizeof(struct mxt_object); - object_table = kzalloc(table_size, GFP_KERNEL); - if (!object_table) { - dev_err(&data->client->dev, "Failed to allocate memory\n"); - return -ENOMEM; - } - - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, - object_table); - if (error) { - kfree(object_table); - return error; - } - /* Valid Report IDs start counting from 1 */ reportid = 1; data->mem_size = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { struct mxt_object *object = object_table + i; u8 min_id, max_id; @@ -1651,8 +1624,8 @@ static int mxt_get_object_table(struct mxt_data *data) switch (object->type) { case MXT_GEN_MESSAGE_T5: - if (data->info.family_id == 0x80 && - data->info.version < 0x20) { + if (data->info->family_id == 0x80 && + data->info->version < 0x20) { /* * On mXT224 firmware versions prior to V2.0 * read and discard unused CRC byte otherwise @@ -1707,24 +1680,102 @@ static int mxt_get_object_table(struct mxt_data *data) /* If T44 exists, T5 position has to be directly after */ if (data->T44_address && (data->T5_address != data->T44_address + 1)) { dev_err(&client->dev, "Invalid T44 position\n"); - error = -EINVAL; - goto free_object_table; + return -EINVAL; } data->msg_buf = kcalloc(data->max_reportid, data->T5_msg_size, GFP_KERNEL); - if (!data->msg_buf) { - dev_err(&client->dev, "Failed to allocate message buffer\n"); + if (!data->msg_buf) + return -ENOMEM; + + return 0; +} + +static int mxt_read_info_block(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int error; + size_t size; + void *id_buf, *buf; + uint8_t num_objects; + u32 calculated_crc; + u8 *crc_ptr; + + /* If info block already allocated, free it */ + if (data->raw_info_block) + mxt_free_object_table(data); + + /* Read 7-byte ID information block starting at address 0 */ + size = sizeof(struct mxt_info); + id_buf = kzalloc(size, GFP_KERNEL); + if (!id_buf) + return -ENOMEM; + + error = __mxt_read_reg(client, 0, size, id_buf); + if (error) + goto err_free_mem; + + /* Resize buffer to give space for rest of info block */ + num_objects = ((struct mxt_info *)id_buf)->object_num; + size += (num_objects * sizeof(struct mxt_object)) + + MXT_INFO_CHECKSUM_SIZE; + + buf = krealloc(id_buf, size, GFP_KERNEL); + if (!buf) { error = -ENOMEM; - goto free_object_table; + goto err_free_mem; + } + id_buf = buf; + + /* Read rest of info block */ + error = __mxt_read_reg(client, MXT_OBJECT_START, + size - MXT_OBJECT_START, + id_buf + MXT_OBJECT_START); + if (error) + goto err_free_mem; + + /* Extract & calculate checksum */ + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE; + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16); + + calculated_crc = mxt_calculate_crc(id_buf, 0, + size - MXT_INFO_CHECKSUM_SIZE); + + /* + * CRC mismatch can be caused by data corruption due to I2C comms + * issue or else device is not using Object Based Protocol (eg i2c-hid) + */ + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) { + dev_err(&client->dev, + "Info Block CRC error calculated=0x%06X read=0x%06X\n", + calculated_crc, data->info_crc); + error = -EIO; + goto err_free_mem; + } + + data->raw_info_block = id_buf; + data->info = (struct mxt_info *)id_buf; + + dev_info(&client->dev, + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", + data->info->family_id, data->info->variant_id, + data->info->version >> 4, data->info->version & 0xf, + data->info->build, data->info->object_num); + + /* Parse object table information */ + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START); + if (error) { + dev_err(&client->dev, "Error %d parsing object table\n", error); + mxt_free_object_table(data); + goto err_free_mem; } - data->object_table = object_table; + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START); return 0; -free_object_table: - mxt_free_object_table(data); +err_free_mem: + kfree(id_buf); return error; } @@ -2039,7 +2090,7 @@ static int mxt_initialize(struct mxt_data *data) int error; while (1) { - error = mxt_get_info(data); + error = mxt_read_info_block(data); if (!error) break; @@ -2070,16 +2121,9 @@ static int mxt_initialize(struct mxt_data *data) msleep(MXT_FW_RESET_TIME); } - /* Get object table information */ - error = mxt_get_object_table(data); - if (error) { - dev_err(&client->dev, "Error %d reading object table\n", error); - return error; - } - error = mxt_acquire_irq(data); if (error) - goto err_free_object_table; + return error; error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, &client->dev, GFP_KERNEL, data, @@ -2087,14 +2131,10 @@ static int mxt_initialize(struct mxt_data *data) if (error) { dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", error); - goto err_free_object_table; + return error; } return 0; - -err_free_object_table: - mxt_free_object_table(data); - return error; } static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) @@ -2155,7 +2195,7 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data) static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, unsigned int y) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; unsigned int ofs, page; unsigned int col = 0; @@ -2483,7 +2523,7 @@ static const struct video_device mxt_video_device = { static void mxt_debug_init(struct mxt_data *data) { - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; struct mxt_dbg *dbg = &data->dbg; struct mxt_object *object; int error; @@ -2569,7 +2609,6 @@ static int mxt_configure_objects(struct mxt_data *data, const struct firmware *cfg) { struct device *dev = &data->client->dev; - struct mxt_info *info = &data->info; int error; error = mxt_init_t7_power_cfg(data); @@ -2594,11 +2633,6 @@ static int mxt_configure_objects(struct mxt_data *data, mxt_debug_init(data); - dev_info(dev, - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", - info->family_id, info->variant_id, info->version >> 4, - info->version & 0xf, info->build, info->object_num); - return 0; } @@ -2607,7 +2641,7 @@ static ssize_t mxt_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", info->version >> 4, info->version & 0xf, info->build); } @@ -2617,7 +2651,7 @@ static ssize_t mxt_hw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mxt_data *data = dev_get_drvdata(dev); - struct mxt_info *info = &data->info; + struct mxt_info *info = data->info; return scnprintf(buf, PAGE_SIZE, "%u.%u\n", info->family_id, info->variant_id); } @@ -2656,7 +2690,7 @@ static ssize_t mxt_object_show(struct device *dev, return -ENOMEM; error = 0; - for (i = 0; i < data->info.object_num; i++) { + for (i = 0; i < data->info->object_num; i++) { object = data->object_table + i; if (!mxt_object_readable(object->type)) diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 5dafafad6351a09362484ba011991dea4eb9b174..2bfa89ec552c0b1c32c3d3befdd09b5d70ece75f 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -888,6 +888,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id); #ifdef CONFIG_ACPI static const struct acpi_device_id goodix_acpi_match[] = { { "GDIX1001", 0 }, + { "GDIX1002", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, goodix_acpi_match); diff --git a/drivers/input/touchscreen/hxchipset/Kconfig b/drivers/input/touchscreen/hxchipset/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..ead31d1aa4f5c8ceaaf9aedf21a5d33909f5d808 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/Kconfig @@ -0,0 +1,86 @@ +# +# Himax Touchscreen driver configuration +# + +config TOUCHSCREEN_HIMAX_I2C + tristate "HIMAX chipset i2c touchscreen" + depends on TOUCHSCREEN_HIMAX_CHIPSET + help + This enables support for HIMAX CHIPSET over I2C based touchscreens. + + +# ***************** On-cell Start ***************** +config TOUCHSCREEN_HIMAX_ONCELL + tristate "HIMAX chipset on-cell function" + depends on TOUCHSCREEN_HIMAX_I2C + help + This enables support for HIMAX CHIPSET of on-cell function. + +config TOUCHSCREEN_HIMAX_IC_HX852xH + tristate "HIMAX chipset HX852xH function" + depends on TOUCHSCREEN_HIMAX_ONCELL + help + This enables support for HIMAX CHIPSET of HX852xH. + +config TOUCHSCREEN_HIMAX_IC_HX852xG + tristate "HIMAX chipset HX852xG function" + depends on TOUCHSCREEN_HIMAX_ONCELL + help + This enables support for HIMAX CHIPSET of HX852xG. +# ***************** On-cell End ******************* + +# ***************** In-cell Start ***************** +config TOUCHSCREEN_HIMAX_INCELL + tristate "HIMAX chipset in-cell function" + depends on TOUCHSCREEN_HIMAX_I2C + help + This enables support for HIMAX CHIPSET of in-cell function. + +config TOUCHSCREEN_HIMAX_IC_HX83191 + tristate "HIMAX chipset HX83191 function" + depends on TOUCHSCREEN_HIMAX_INCELL + help + This enables support for HIMAX CHIPSET of HX83191. + +config TOUCHSCREEN_HIMAX_IC_HX83112 + tristate "HIMAX chipset HX83112 function" + depends on TOUCHSCREEN_HIMAX_INCELL + help + This enables support for HIMAX CHIPSET of HX83112. + +config TOUCHSCREEN_HIMAX_IC_HX83111 + tristate "HIMAX chipset HX83111 function" + depends on TOUCHSCREEN_HIMAX_INCELL + help + This enables support for HIMAX CHIPSET of HX83111. + +config TOUCHSCREEN_HIMAX_IC_HX83103 + tristate "HIMAX chipset HX83103 function" + depends on TOUCHSCREEN_HIMAX_INCELL + help + This enables support for HIMAX CHIPSET of HX83103. + +config TOUCHSCREEN_HIMAX_IC_HX83102 + tristate "HIMAX chipset HX83102 function" + depends on TOUCHSCREEN_HIMAX_INCELL + help + This enables support for HIMAX CHIPSET of HX83102. +# ***************** In-cell End ******************* + +config TOUCHSCREEN_HIMAX_DEBUG + tristate "HIMAX debug function" + depends on TOUCHSCREEN_HIMAX_I2C + help + This enables support for HIMAX debug function. + +config TOUCHSCREEN_HIMAX_INSPECT + tristate "HIMAX inspect function" + depends on TOUCHSCREEN_HIMAX_I2C + help + This enables support for HIMAX debug function. + +config HMX_DB + tristate "HIMAX driver test over Dragon Board" + depends on TOUCHSCREEN_HIMAX_I2C + help + This enables support for HIMAX driver test over Dragon Board. diff --git a/drivers/input/touchscreen/hxchipset/Makefile b/drivers/input/touchscreen/hxchipset/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c44932bf73524258ba1e68b40f1f6bd3a7aaa620 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/Makefile @@ -0,0 +1,13 @@ +# Makefile for the Himax touchscreen drivers. +obj-$(CONFIG_TOUCHSCREEN_HIMAX_ONCELL) += himax_ic_oncell_core.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX852xH) += himax_ic_HX852xH.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX852xG) += himax_ic_HX852xG.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_INCELL) += himax_ic_incell_core.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX83191) += himax_ic_HX83191.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX83112) += himax_ic_HX83112.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX83111) += himax_ic_HX83111.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX83103) += himax_ic_HX83103.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_IC_HX83102) += himax_ic_HX83102.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_I2C) += himax_common.o himax_platform.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_DEBUG) += himax_debug.o +obj-$(CONFIG_TOUCHSCREEN_HIMAX_INSPECT) += himax_inspection.o \ No newline at end of file diff --git a/drivers/input/touchscreen/hxchipset/himax_common.c b/drivers/input/touchscreen/hxchipset/himax_common.c new file mode 100644 index 0000000000000000000000000000000000000000..43ea1829c4afe96fde0ab7189ad1f27d67508239 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_common.c @@ -0,0 +1,2332 @@ +/* + * Himax Android Driver Sample Code for common functions + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "himax_common.h" +#include "himax_ic_core.h" +#include "himax_platform.h" + +#ifdef CONFIG_TOUCHSCREEN_HIMAX_DEBUG +#include "himax_debug.h" +#endif + +#define SUPPORT_FINGER_DATA_CHECKSUM 0x0F +#define TS_WAKE_LOCK_TIMEOUT (2 * HZ) +#define FRAME_COUNT 5 +#define COMMON_BUF_SZ 80 + +#if defined(HX_AUTO_UPDATE_FW) +char *i_CTPM_firmware_name = "Himax_firmware.bin"; +const struct firmware *i_CTPM_FW; +#endif + +struct himax_ts_data *private_ts; +struct himax_ic_data *ic_data; +struct himax_report_data *hx_touch_data; +struct himax_core_fp g_core_fp; +struct himax_debug *debug_data; + +struct proc_dir_entry *himax_touch_proc_dir; +#define HIMAX_PROC_TOUCH_FOLDER "android_touch" + +/* ts_work about start */ +struct himax_target_report_data *g_target_report_data; +/* ts_work about end */ + +static int HX_TOUCH_INFO_POINT_CNT; + +unsigned long FW_VER_MAJ_FLASH_ADDR; +unsigned long FW_VER_MIN_FLASH_ADDR; +unsigned long CFG_VER_MAJ_FLASH_ADDR; +unsigned long CFG_VER_MIN_FLASH_ADDR; +unsigned long CID_VER_MAJ_FLASH_ADDR; +unsigned long CID_VER_MIN_FLASH_ADDR; + +unsigned long FW_VER_MAJ_FLASH_LENG; +unsigned long FW_VER_MIN_FLASH_LENG; +unsigned long CFG_VER_MAJ_FLASH_LENG; +unsigned long CFG_VER_MIN_FLASH_LENG; +unsigned long CID_VER_MAJ_FLASH_LENG; +unsigned long CID_VER_MIN_FLASH_LENG; + +unsigned long FW_CFG_VER_FLASH_ADDR; + +#ifdef HX_AUTO_UPDATE_FW + int g_i_FW_VER = 0; + int g_i_CFG_VER = 0; + int g_i_CID_MAJ = 0; /* GUEST ID */ + int g_i_CID_MIN = 0; /* VER for GUEST */ +#endif + +unsigned char IC_CHECKSUM; + +#ifdef HX_ESD_RECOVERY + u8 HX_ESD_RESET_ACTIVATE = 0; + int hx_EB_event_flag = 0; + int hx_EC_event_flag = 0; + int hx_ED_event_flag = 0; + int g_zero_event_count = 0; +#endif +u8 HX_HW_RESET_ACTIVATE; + +static uint8_t AA_press; +static uint8_t EN_NoiseFilter; +static uint8_t Last_EN_NoiseFilter; + +static int p_point_num = 0xFFFF; +#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON) +static uint8_t vk_press; +static int tpd_key; +static int tpd_key_old; +#endif +static int probe_fail_flag; +#ifdef HX_USB_DETECT_GLOBAL + bool USB_detect_flag; +#endif + + +#if defined(CONFIG_DRM) +int drm_notifier_callback(struct notifier_block *self, unsigned long event, void *data); +#elif defined(CONFIG_FB) +int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); +#elif defined(CONFIG_HAS_EARLYSUSPEND) +static void himax_ts_early_suspend(struct early_suspend *h); +static void himax_ts_late_resume(struct early_suspend *h); +#endif + +#if defined(HX_PALM_REPORT) + int himax_palm_detect(uint8_t *buf); +#endif + +#ifdef HX_GESTURE_TRACK + static int gest_pt_cnt; + static int gest_pt_x[GEST_PT_MAX_NUM]; + static int gest_pt_y[GEST_PT_MAX_NUM]; + static int gest_start_x, gest_start_y, gest_end_x, gest_end_y; + static int gest_width, gest_height, gest_mid_x, gest_mid_y; + static int gn_gesture_coor[16]; +#endif + +int himax_report_data_init(void); + +int g_ts_dbg; + +/* File node for SMWP and HSEN - Start*/ +uint8_t HX_PROC_SEND_FLAG; +#ifdef HX_SMART_WAKEUP + #define HIMAX_PROC_SMWP_FILE "SMWP" + struct proc_dir_entry *himax_proc_SMWP_file = NULL; + #define HIMAX_PROC_GESTURE_FILE "GESTURE" + struct proc_dir_entry *himax_proc_GESTURE_file = NULL; + uint8_t HX_SMWP_EN = 0; + bool FAKE_POWER_KEY_SEND; +#endif + +#ifdef HX_HIGH_SENSE + #define HIMAX_PROC_HSEN_FILE "HSEN" + struct proc_dir_entry *himax_proc_HSEN_file = NULL; +#endif + +#ifdef HX_HIGH_SENSE +static ssize_t himax_HSEN_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + size_t count = 0; + char *temp_buf; + + if (!HX_PROC_SEND_FLAG) { + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + + count = snprintf(temp_buf, len, "%d\n", ts->HSEN_enable); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + } else + HX_PROC_SEND_FLAG = 0; + + return count; +} + +static ssize_t himax_HSEN_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + if (buf[0] == '0') + ts->HSEN_enable = 0; + else if (buf[0] == '1') + ts->HSEN_enable = 1; + else + return -EINVAL; + + g_core_fp.fp_set_HSEN_enable(ts->HSEN_enable, ts->suspended); + I("%s: HSEN_enable = %d.\n", __func__, ts->HSEN_enable); + return len; +} + +static const struct file_operations himax_proc_HSEN_ops = { + .owner = THIS_MODULE, + .read = himax_HSEN_read, + .write = himax_HSEN_write, +}; +#endif + +#ifdef HX_SMART_WAKEUP +static ssize_t himax_SMWP_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + size_t count = 0; + struct himax_ts_data *ts = private_ts; + char *temp_buf; + + if (!HX_PROC_SEND_FLAG) { + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + count = snprintf(temp_buf, len, "%d\n", ts->SMWP_enable); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + } else + HX_PROC_SEND_FLAG = 0; + + return count; +} + +static ssize_t himax_SMWP_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + if (buf[0] == '0') + ts->SMWP_enable = 0; + else if (buf[0] == '1') + ts->SMWP_enable = 1; + else + return -EINVAL; + + g_core_fp.fp_set_SMWP_enable(ts->SMWP_enable, ts->suspended); + HX_SMWP_EN = ts->SMWP_enable; + I("%s: SMART_WAKEUP_enable = %d.\n", __func__, HX_SMWP_EN); + return len; +} + +static const struct file_operations himax_proc_SMWP_ops = { + .owner = THIS_MODULE, + .read = himax_SMWP_read, + .write = himax_SMWP_write, +}; + +static ssize_t himax_GESTURE_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + int i = 0; + size_t ret = 0; + char *temp_buf; + + if (!HX_PROC_SEND_FLAG) { + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + + for (i = 0; i < 16; i++) + ret += snprintf(temp_buf + ret, len - ret, + "ges_en[%d]=%d\n", i, ts->gesture_cust_en[i]); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + } else { + HX_PROC_SEND_FLAG = 0; + ret = 0; + } + + return ret; +} + +static ssize_t himax_GESTURE_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + int i = 0; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + I("himax_GESTURE_store= %s\n", buf); + + for (i = 0; i < 16; i++) { + if (buf[i] == '0') + ts->gesture_cust_en[i] = 0; + else if (buf[i] == '1') + ts->gesture_cust_en[i] = 1; + else + ts->gesture_cust_en[i] = 0; + + I("gesture en[%d]=%d\n", i, ts->gesture_cust_en[i]); + } + + return len; +} + +static const struct file_operations himax_proc_Gesture_ops = { + .owner = THIS_MODULE, + .read = himax_GESTURE_read, + .write = himax_GESTURE_write, +}; +#endif + +#ifdef CONFIG_TOUCHSCREEN_HIMAX_INSPECT +extern void (*fp_himax_self_test_init)(void); +#endif + +int himax_common_proc_init(void) +{ + himax_touch_proc_dir = proc_mkdir(HIMAX_PROC_TOUCH_FOLDER, NULL); + + if (himax_touch_proc_dir == NULL) { + E(" %s: himax_touch_proc_dir file create failed!\n", __func__); + return -ENOMEM; + } +#ifdef CONFIG_TOUCHSCREEN_HIMAX_INSPECT + if (fp_himax_self_test_init != NULL) + fp_himax_self_test_init(); +#endif + +#ifdef HX_HIGH_SENSE + himax_proc_HSEN_file = proc_create(HIMAX_PROC_HSEN_FILE, 0666, + himax_touch_proc_dir, &himax_proc_HSEN_ops); + + if (himax_proc_HSEN_file == NULL) { + E(" %s: proc HSEN file create failed!\n", __func__); + goto fail_1; + } + +#endif +#ifdef HX_SMART_WAKEUP + himax_proc_SMWP_file = proc_create(HIMAX_PROC_SMWP_FILE, 0666, + himax_touch_proc_dir, &himax_proc_SMWP_ops); + + if (himax_proc_SMWP_file == NULL) { + E(" %s: proc SMWP file create failed!\n", __func__); + goto fail_2; + } + + himax_proc_GESTURE_file = proc_create(HIMAX_PROC_GESTURE_FILE, 0666, + himax_touch_proc_dir, &himax_proc_Gesture_ops); + + if (himax_proc_GESTURE_file == NULL) { + E(" %s: proc GESTURE file create failed!\n", __func__); + goto fail_3; + } +#endif + return 0; + +#ifdef HX_SMART_WAKEUP + remove_proc_entry(HIMAX_PROC_GESTURE_FILE, himax_touch_proc_dir); +fail_3: + remove_proc_entry(HIMAX_PROC_SMWP_FILE, himax_touch_proc_dir); +fail_2: +#endif +#ifdef HX_HIGH_SENSE + remove_proc_entry(HIMAX_PROC_HSEN_FILE, himax_touch_proc_dir); +fail_1: +#endif + return -ENOMEM; +} + +void himax_common_proc_deinit(void) +{ +#ifdef HX_SMART_WAKEUP + remove_proc_entry(HIMAX_PROC_GESTURE_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_SMWP_FILE, himax_touch_proc_dir); +#endif +#ifdef HX_HIGH_SENSE + remove_proc_entry(HIMAX_PROC_HSEN_FILE, himax_touch_proc_dir); +#endif +} + +/* File node for SMWP and HSEN - End*/ + +int himax_input_register(struct himax_ts_data *ts) +{ + int ret = 0; + + ret = himax_dev_set(ts); + if (ret < 0) + goto input_device_fail; + + set_bit(EV_SYN, ts->input_dev->evbit); + set_bit(EV_ABS, ts->input_dev->evbit); + set_bit(EV_KEY, ts->input_dev->evbit); +#if defined(HX_PLATFOME_DEFINE_KEY) + himax_platform_key(); +#else + set_bit(KEY_BACK, ts->input_dev->keybit); + set_bit(KEY_HOME, ts->input_dev->keybit); + set_bit(KEY_MENU, ts->input_dev->keybit); + set_bit(KEY_SEARCH, ts->input_dev->keybit); +#endif +#if defined(HX_SMART_WAKEUP) || defined(HX_PALM_REPORT) + set_bit(KEY_POWER, ts->input_dev->keybit); +#endif +#if defined(HX_SMART_WAKEUP) + set_bit(KEY_CUST_01, ts->input_dev->keybit); + set_bit(KEY_CUST_02, ts->input_dev->keybit); + set_bit(KEY_CUST_03, ts->input_dev->keybit); + set_bit(KEY_CUST_04, ts->input_dev->keybit); + set_bit(KEY_CUST_05, ts->input_dev->keybit); + set_bit(KEY_CUST_06, ts->input_dev->keybit); + set_bit(KEY_CUST_07, ts->input_dev->keybit); + set_bit(KEY_CUST_08, ts->input_dev->keybit); + set_bit(KEY_CUST_09, ts->input_dev->keybit); + set_bit(KEY_CUST_10, ts->input_dev->keybit); + set_bit(KEY_CUST_11, ts->input_dev->keybit); + set_bit(KEY_CUST_12, ts->input_dev->keybit); + set_bit(KEY_CUST_13, ts->input_dev->keybit); + set_bit(KEY_CUST_14, ts->input_dev->keybit); + set_bit(KEY_CUST_15, ts->input_dev->keybit); +#endif + set_bit(BTN_TOUCH, ts->input_dev->keybit); + set_bit(KEY_APPSELECT, ts->input_dev->keybit); + set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit); +#ifdef HX_PROTOCOL_A + /* ts->input_dev->mtsize = ts->nFinger_support; */ + input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID, 0, 3, 0, 0); +#else + set_bit(MT_TOOL_FINGER, ts->input_dev->keybit); +#if defined(HX_PROTOCOL_B_3PA) + input_mt_init_slots(ts->input_dev, ts->nFinger_support, INPUT_MT_DIRECT); +#else + input_mt_init_slots(ts->input_dev, ts->nFinger_support); +#endif +#endif + I("input_set_abs_params: mix_x %d, max_x %d, min_y %d, max_y %d\n", + ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_x_fuzz, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, ts->pdata->abs_y_min, ts->pdata->abs_y_max, ts->pdata->abs_y_fuzz, 0); + input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, ts->pdata->abs_pressure_min, ts->pdata->abs_pressure_max, ts->pdata->abs_pressure_fuzz, 0); +#ifndef HX_PROTOCOL_A + input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, ts->pdata->abs_pressure_min, ts->pdata->abs_pressure_max, ts->pdata->abs_pressure_fuzz, 0); + input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, ts->pdata->abs_width_min, ts->pdata->abs_width_max, ts->pdata->abs_pressure_fuzz, 0); +#endif + /* input_set_abs_params(ts->input_dev, ABS_MT_AMPLITUDE, 0, ((ts->pdata->abs_pressure_max << 16) | ts->pdata->abs_width_max), 0, 0); */ + /* input_set_abs_params(ts->input_dev, ABS_MT_POSITION, 0, (BIT(31) | (ts->pdata->abs_x_max << 16) | ts->pdata->abs_y_max), 0, 0); */ + + if (himax_input_register_device(ts->input_dev) == 0) + return NO_ERR; + + ret = INPUT_REGISTER_FAIL; + +input_device_fail: + I("%s, input device register fail!\n", __func__); + return ret; +} + +static void calcDataSize(uint8_t finger_num) +{ + struct himax_ts_data *ts_data = private_ts; + + ts_data->coord_data_size = 4 * finger_num; + ts_data->area_data_size = ((finger_num / 4) + (finger_num % 4 ? 1 : 0)) * 4; + ts_data->coordInfoSize = ts_data->coord_data_size + ts_data->area_data_size + 4; + ts_data->raw_data_frame_size = 128 - ts_data->coord_data_size - ts_data->area_data_size - 4 - 4 - 1; + + if (ts_data->raw_data_frame_size == 0) { + E("%s: could NOT calculate!\n", __func__); + return; + } + + ts_data->raw_data_nframes = ((uint32_t)ts_data->x_channel * ts_data->y_channel + + ts_data->x_channel + ts_data->y_channel) / ts_data->raw_data_frame_size + + (((uint32_t)ts_data->x_channel * ts_data->y_channel + + ts_data->x_channel + ts_data->y_channel) % ts_data->raw_data_frame_size) ? 1 : 0; + I("%s: coord_data_size: %d, area_data_size:%d, raw_data_frame_size:%d, raw_data_nframes:%d", __func__, ts_data->coord_data_size, ts_data->area_data_size, ts_data->raw_data_frame_size, ts_data->raw_data_nframes); +} + +static void calculate_point_number(void) +{ + HX_TOUCH_INFO_POINT_CNT = ic_data->HX_MAX_PT * 4; + + if ((ic_data->HX_MAX_PT % 4) == 0) + HX_TOUCH_INFO_POINT_CNT += (ic_data->HX_MAX_PT / 4) * 4; + else + HX_TOUCH_INFO_POINT_CNT += ((ic_data->HX_MAX_PT / 4) + 1) * 4; +} + +#ifdef HX_AUTO_UPDATE_FW +static int i_update_FW(void) +{ + int upgrade_times = 0; + unsigned char *ImageBuffer = NULL; + int fullFileLength = 0; + uint8_t ret = 0, result = 0; + + himax_int_enable(0); + + I("file name = %s\n", i_CTPM_firmware_name); + ret = request_firmware(&i_CTPM_FW, i_CTPM_firmware_name, private_ts->dev); + if (ret < 0) { + E("%s,fail in line%d error code=%d\n", __func__, __LINE__, ret); + return OPEN_FILE_FAIL; + } + + if (i_CTPM_FW != NULL) { + fullFileLength = i_CTPM_FW->size; + ImageBuffer = (unsigned char *)i_CTPM_FW->data; + } else { + I("%s: i_CTPM_FW = NULL\n", __func__); + return OPEN_FILE_FAIL; + } + +update_retry: + + if (fullFileLength == FW_SIZE_32k) + ret = g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_32k(ImageBuffer, fullFileLength, false); + else if (fullFileLength == FW_SIZE_60k) + ret = g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_60k(ImageBuffer, fullFileLength, false); + else if (fullFileLength == FW_SIZE_64k) + ret = g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_64k(ImageBuffer, fullFileLength, false); + else if (fullFileLength == FW_SIZE_124k) + ret = g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_124k(ImageBuffer, fullFileLength, false); + else if (fullFileLength == FW_SIZE_128k) + ret = g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_128k(ImageBuffer, fullFileLength, false); + + release_firmware(i_CTPM_FW); + + if (ret == 0) { + upgrade_times++; + E("%s: TP upgrade error, upgrade_times = %d\n", __func__, upgrade_times); + + if (upgrade_times < 3) + goto update_retry; + else + result = -1; /* upgrade fail */ + } else { + g_core_fp.fp_read_FW_ver(); + g_core_fp.fp_touch_information(); + result = 1;/* upgrade success */ + I("%s: TP upgrade OK\n", __func__); + } + +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(true, false); +#else + g_core_fp.fp_sense_on(0x00); +#endif + himax_int_enable(1); + return result; +} +#endif + +static int himax_loadSensorConfig(struct himax_i2c_platform_data *pdata) +{ + I("%s: initialization complete\n", __func__); + return NO_ERR; +} + +#ifdef HX_ESD_RECOVERY +static void himax_esd_hw_reset(void) +{ + if (g_ts_dbg != 0) + I("%s: Entering\n", __func__); + + I("START_Himax TP: ESD - Reset\n"); + + if (private_ts->in_self_test == 1) { + I("In self test , not TP: ESD - Reset\n"); + return; + } + + g_core_fp.fp_esd_ic_reset(); + I("END_Himax TP: ESD - Reset\n"); +} +#endif + +#ifdef HX_SMART_WAKEUP +#ifdef HX_GESTURE_TRACK +static void gest_pt_log_coordinate(int rx, int tx) +{ + /* driver report x y with range 0 - 255 , we scale it up to x/y pixel */ + gest_pt_x[gest_pt_cnt] = rx * (ic_data->HX_X_RES) / 255; + gest_pt_y[gest_pt_cnt] = tx * (ic_data->HX_Y_RES) / 255; +} +#endif +static int himax_wake_event_parse(struct himax_ts_data *ts, int ts_status) +{ + uint8_t *buf; +#ifdef HX_GESTURE_TRACK + int tmp_max_x = 0x00, tmp_min_x = 0xFFFF, tmp_max_y = 0x00, tmp_min_y = 0xFFFF; + int gest_len; +#endif + int i = 0, check_FC = 0, gesture_flag = 0; + + if (g_ts_dbg != 0) + I("%s: Entering!, ts_status=%d\n", __func__, ts_status); + + buf = kcalloc(hx_touch_data->event_size, sizeof(uint8_t), GFP_KERNEL); + if (!buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + memcpy(buf, hx_touch_data->hx_event_buf, hx_touch_data->event_size); + + for (i = 0; i < GEST_PTLG_ID_LEN; i++) { + if (check_FC == 0) { + if ((buf[0] != 0x00) && ((buf[0] <= 0x0F) || (buf[0] == 0x80))) { + check_FC = 1; + gesture_flag = buf[i]; + } else { + check_FC = 0; + I("ID START at %x , value = %x skip the event\n", i, buf[i]); + break; + } + } else { + if (buf[i] != gesture_flag) { + check_FC = 0; + I("ID NOT the same %x != %x So STOP parse event\n", buf[i], gesture_flag); + break; + } + } + + I("0x%2.2X ", buf[i]); + + if (i % 8 == 7) + I("\n"); + } + + I("Himax gesture_flag= %x\n", gesture_flag); + I("Himax check_FC is %d\n", check_FC); + + if (check_FC == 0) + return 0; + + if (buf[GEST_PTLG_ID_LEN] != GEST_PTLG_HDR_ID1 || + buf[GEST_PTLG_ID_LEN + 1] != GEST_PTLG_HDR_ID2) + return 0; + +#ifdef HX_GESTURE_TRACK + + if (buf[GEST_PTLG_ID_LEN] == GEST_PTLG_HDR_ID1 && + buf[GEST_PTLG_ID_LEN + 1] == GEST_PTLG_HDR_ID2) { + gest_len = buf[GEST_PTLG_ID_LEN + 2]; + I("gest_len = %d ", gest_len); + i = 0; + gest_pt_cnt = 0; + I("gest doornidate start\n %s", __func__); + + while (i < (gest_len + 1) / 2) { + gest_pt_log_coordinate(buf[GEST_PTLG_ID_LEN + 4 + i * 2], buf[GEST_PTLG_ID_LEN + 4 + i * 2 + 1]); + i++; + I("gest_pt_x[%d]=%d\n", gest_pt_cnt, gest_pt_x[gest_pt_cnt]); + I("gest_pt_y[%d]=%d\n", gest_pt_cnt, gest_pt_y[gest_pt_cnt]); + gest_pt_cnt += 1; + } + + if (gest_pt_cnt) { + for (i = 0; i < gest_pt_cnt; i++) { + if (tmp_max_x < gest_pt_x[i]) + tmp_max_x = gest_pt_x[i]; + + if (tmp_min_x > gest_pt_x[i]) + tmp_min_x = gest_pt_x[i]; + + if (tmp_max_y < gest_pt_y[i]) + tmp_max_y = gest_pt_y[i]; + + if (tmp_min_y > gest_pt_y[i]) + tmp_min_y = gest_pt_y[i]; + } + + I("gest_point x_min= %d, x_max= %d, y_min= %d, y_max= %d\n", tmp_min_x, tmp_max_x, tmp_min_y, tmp_max_y); + gest_start_x = gest_pt_x[0]; + gn_gesture_coor[0] = gest_start_x; + gest_start_y = gest_pt_y[0]; + gn_gesture_coor[1] = gest_start_y; + gest_end_x = gest_pt_x[gest_pt_cnt - 1]; + gn_gesture_coor[2] = gest_end_x; + gest_end_y = gest_pt_y[gest_pt_cnt - 1]; + gn_gesture_coor[3] = gest_end_y; + gest_width = tmp_max_x - tmp_min_x; + gn_gesture_coor[4] = gest_width; + gest_height = tmp_max_y - tmp_min_y; + gn_gesture_coor[5] = gest_height; + gest_mid_x = (tmp_max_x + tmp_min_x) / 2; + gn_gesture_coor[6] = gest_mid_x; + gest_mid_y = (tmp_max_y + tmp_min_y) / 2; + gn_gesture_coor[7] = gest_mid_y; + gn_gesture_coor[8] = gest_mid_x; /* gest_up_x */ + gn_gesture_coor[9] = gest_mid_y - gest_height / 2; /* gest_up_y */ + gn_gesture_coor[10] = gest_mid_x; /* gest_down_x */ + gn_gesture_coor[11] = gest_mid_y + gest_height / 2; /* gest_down_y */ + gn_gesture_coor[12] = gest_mid_x - gest_width / 2; /* gest_left_x */ + gn_gesture_coor[13] = gest_mid_y; /* gest_left_y */ + gn_gesture_coor[14] = gest_mid_x + gest_width / 2; /* gest_right_x */ + gn_gesture_coor[15] = gest_mid_y; /* gest_right_y */ + } + } + +#endif + + if (gesture_flag != 0x80) { + if (!ts->gesture_cust_en[gesture_flag]) { + I("%s NOT report customer key\n ", __func__); + g_target_report_data->SMWP_event_chk = 0; + return 0;/* NOT report customer key */ + } + } else { + if (!ts->gesture_cust_en[0]) { + I("%s NOT report report double click\n", __func__); + g_target_report_data->SMWP_event_chk = 0; + return 0;/* NOT report power key */ + } + } + + if (gesture_flag == 0x80) { + g_target_report_data->SMWP_event_chk = EV_GESTURE_PWR; + return EV_GESTURE_PWR; + } + + g_target_report_data->SMWP_event_chk = gesture_flag; + return gesture_flag; +} + +static void himax_wake_event_report(void) +{ + int ret_event = g_target_report_data->SMWP_event_chk; + int KEY_EVENT = 0; + + if (g_ts_dbg != 0) + I("%s: Entering!\n", __func__); + + switch (ret_event) { + case EV_GESTURE_PWR: + KEY_EVENT = KEY_POWER; + break; + + case EV_GESTURE_01: + KEY_EVENT = KEY_CUST_01; + break; + + case EV_GESTURE_02: + KEY_EVENT = KEY_CUST_02; + break; + + case EV_GESTURE_03: + KEY_EVENT = KEY_CUST_03; + break; + + case EV_GESTURE_04: + KEY_EVENT = KEY_CUST_04; + break; + + case EV_GESTURE_05: + KEY_EVENT = KEY_CUST_05; + break; + + case EV_GESTURE_06: + KEY_EVENT = KEY_CUST_06; + break; + + case EV_GESTURE_07: + KEY_EVENT = KEY_CUST_07; + break; + + case EV_GESTURE_08: + KEY_EVENT = KEY_CUST_08; + break; + + case EV_GESTURE_09: + KEY_EVENT = KEY_CUST_09; + break; + + case EV_GESTURE_10: + KEY_EVENT = KEY_CUST_10; + break; + + case EV_GESTURE_11: + KEY_EVENT = KEY_CUST_11; + break; + + case EV_GESTURE_12: + KEY_EVENT = KEY_CUST_12; + break; + + case EV_GESTURE_13: + KEY_EVENT = KEY_CUST_13; + break; + + case EV_GESTURE_14: + KEY_EVENT = KEY_CUST_14; + break; + + case EV_GESTURE_15: + KEY_EVENT = KEY_CUST_15; + break; + } + + if (ret_event) { + I(" %s SMART WAKEUP KEY event %x press\n", __func__, KEY_EVENT); + input_report_key(private_ts->input_dev, KEY_EVENT, 1); + input_sync(private_ts->input_dev); + I(" %s SMART WAKEUP KEY event %x release\n", __func__, KEY_EVENT); + input_report_key(private_ts->input_dev, KEY_EVENT, 0); + input_sync(private_ts->input_dev); + FAKE_POWER_KEY_SEND = true; +#ifdef HX_GESTURE_TRACK + I("gest_start_x= %d, gest_start_y= %d, gest_end_x= %d, gest_end_y= %d\n", gest_start_x, gest_start_y, + gest_end_x, gest_end_y); + I("gest_width= %d, gest_height= %d, gest_mid_x= %d, gest_mid_y= %d\n", gest_width, gest_height, + gest_mid_x, gest_mid_y); + I("gest_up_x= %d, gest_up_y= %d, gest_down_x= %d, gest_down_y= %d\n", gn_gesture_coor[8], gn_gesture_coor[9], + gn_gesture_coor[10], gn_gesture_coor[11]); + I("gest_left_x= %d, gest_left_y= %d, gest_right_x= %d, gest_right_y= %d\n", gn_gesture_coor[12], gn_gesture_coor[13], + gn_gesture_coor[14], gn_gesture_coor[15]); +#endif + g_target_report_data->SMWP_event_chk = 0; + } +} + +#endif + +int himax_report_data_init(void) +{ + if (hx_touch_data->hx_coord_buf != NULL) + kfree(hx_touch_data->hx_coord_buf); + + if (hx_touch_data->hx_rawdata_buf != NULL) + kfree(hx_touch_data->hx_rawdata_buf); + +#if defined(HX_SMART_WAKEUP) + hx_touch_data->event_size = g_core_fp.fp_get_touch_data_size(); + + if (hx_touch_data->hx_event_buf != NULL) + kfree(hx_touch_data->hx_event_buf); + +#endif + hx_touch_data->touch_all_size = g_core_fp.fp_get_touch_data_size(); + hx_touch_data->raw_cnt_max = ic_data->HX_MAX_PT / 4; + hx_touch_data->raw_cnt_rmd = ic_data->HX_MAX_PT % 4; + /* more than 4 fingers */ + if (hx_touch_data->raw_cnt_rmd != 0x00) { + hx_touch_data->rawdata_size = g_core_fp.fp_cal_data_len(hx_touch_data->raw_cnt_rmd, ic_data->HX_MAX_PT, hx_touch_data->raw_cnt_max); + hx_touch_data->touch_info_size = (ic_data->HX_MAX_PT + hx_touch_data->raw_cnt_max + 2) * 4; + } else { /* less than 4 fingers */ + hx_touch_data->rawdata_size = g_core_fp.fp_cal_data_len(hx_touch_data->raw_cnt_rmd, ic_data->HX_MAX_PT, hx_touch_data->raw_cnt_max); + hx_touch_data->touch_info_size = (ic_data->HX_MAX_PT + hx_touch_data->raw_cnt_max + 1) * 4; + } + + if ((ic_data->HX_TX_NUM * ic_data->HX_RX_NUM + ic_data->HX_TX_NUM + ic_data->HX_RX_NUM) % hx_touch_data->rawdata_size == 0) + hx_touch_data->rawdata_frame_size = (ic_data->HX_TX_NUM * ic_data->HX_RX_NUM + ic_data->HX_TX_NUM + ic_data->HX_RX_NUM) / hx_touch_data->rawdata_size; + else + hx_touch_data->rawdata_frame_size = (ic_data->HX_TX_NUM * ic_data->HX_RX_NUM + ic_data->HX_TX_NUM + ic_data->HX_RX_NUM) / hx_touch_data->rawdata_size + 1; + + + I("%s: rawdata_frame_size = %d ", __func__, hx_touch_data->rawdata_frame_size); + I("%s: ic_data->HX_MAX_PT:%d, hx_raw_cnt_max:%d, hx_raw_cnt_rmd:%d, g_hx_rawdata_size:%d, hx_touch_data->touch_info_size:%d\n", __func__, ic_data->HX_MAX_PT, hx_touch_data->raw_cnt_max, hx_touch_data->raw_cnt_rmd, hx_touch_data->rawdata_size, hx_touch_data->touch_info_size); + hx_touch_data->hx_coord_buf = kzalloc(sizeof(uint8_t) * (hx_touch_data->touch_info_size), GFP_KERNEL); + + if (hx_touch_data->hx_coord_buf == NULL) + goto mem_alloc_fail; + + g_target_report_data = kzalloc(sizeof(struct himax_target_report_data), GFP_KERNEL); + if (g_target_report_data == NULL) + goto mem_alloc_fail; + g_target_report_data->x = kzalloc(sizeof(int)*(ic_data->HX_MAX_PT), GFP_KERNEL); + if (g_target_report_data->x == NULL) + goto mem_alloc_fail; + g_target_report_data->y = kzalloc(sizeof(int)*(ic_data->HX_MAX_PT), GFP_KERNEL); + if (g_target_report_data->y == NULL) + goto mem_alloc_fail; + g_target_report_data->w = kzalloc(sizeof(int)*(ic_data->HX_MAX_PT), GFP_KERNEL); + if (g_target_report_data->w == NULL) + goto mem_alloc_fail; + g_target_report_data->finger_id = kzalloc(sizeof(int)*(ic_data->HX_MAX_PT), GFP_KERNEL); + if (g_target_report_data->finger_id == NULL) + goto mem_alloc_fail; +#ifdef HX_SMART_WAKEUP + g_target_report_data->SMWP_event_chk = 0; +#endif + + hx_touch_data->hx_rawdata_buf = kzalloc(sizeof(uint8_t) * (hx_touch_data->touch_all_size - hx_touch_data->touch_info_size), GFP_KERNEL); + + if (hx_touch_data->hx_rawdata_buf == NULL) + goto mem_alloc_fail; + +#if defined(HX_SMART_WAKEUP) + hx_touch_data->hx_event_buf = kzalloc(sizeof(uint8_t) * (hx_touch_data->event_size), GFP_KERNEL); + + if (hx_touch_data->hx_event_buf == NULL) + goto mem_alloc_fail; + +#endif + return NO_ERR; +mem_alloc_fail: + kfree(hx_touch_data->hx_coord_buf); + kfree(g_target_report_data->x); + kfree(g_target_report_data->y); + kfree(g_target_report_data->w); + kfree(g_target_report_data->finger_id); + kfree(hx_touch_data->hx_rawdata_buf); + +#if defined(HX_SMART_WAKEUP) + kfree(hx_touch_data->hx_event_buf); +#endif + I("%s: Memory allocate fail!\n", __func__); + return MEM_ALLOC_FAIL; +} + +/* start ts_work */ +#if defined(HX_USB_DETECT_GLOBAL) +void himax_cable_detect_func(bool force_renew) +{ + struct himax_ts_data *ts; + u32 connect_status = 0; + + connect_status = USB_detect_flag;/* upmu_is_chr_det(); */ + ts = private_ts; + + /* I("Touch: cable status=%d, cable_config=%p, usb_connected=%d\n", connect_status, ts->cable_config, ts->usb_connected); */ + if (ts->cable_config) { + if (((!!connect_status) != ts->usb_connected) || force_renew) { + if (!!connect_status) { + ts->cable_config[1] = 0x01; + ts->usb_connected = 0x01; + } else { + ts->cable_config[1] = 0x00; + ts->usb_connected = 0x00; + } + + g_core_fp.fp_usb_detect_set(ts->cable_config); + I("%s: Cable status change: 0x%2.2X\n", __func__, ts->usb_connected); + } + + /* else */ + /* I("%s: Cable status is the same as previous one, ignore.\n", __func__); */ + } +} +#endif + +static int himax_ts_work_status(struct himax_ts_data *ts) +{ + /* 1: normal, 2:SMWP */ + int result = HX_REPORT_COORD; + + hx_touch_data->diag_cmd = ts->diag_cmd; + if (hx_touch_data->diag_cmd) + result = HX_REPORT_COORD_RAWDATA; + +#ifdef HX_SMART_WAKEUP + if (atomic_read(&ts->suspend_mode) && (!FAKE_POWER_KEY_SEND) && (ts->SMWP_enable) && (!hx_touch_data->diag_cmd)) + result = HX_REPORT_SMWP_EVENT; +#endif + /* I("Now Status is %d\n", result); */ + return result; +} + +static int himax_touch_get(struct himax_ts_data *ts, uint8_t *buf, int ts_path, int ts_status) +{ + + if (g_ts_dbg != 0) + I("%s: Entering, ts_status=%d!\n", __func__, ts_status); + + switch (ts_path) { + /* normal */ + case HX_REPORT_COORD: + if ((HX_HW_RESET_ACTIVATE) +#ifdef HX_ESD_RECOVERY + || (HX_ESD_RESET_ACTIVATE) +#endif + ) { + if (!g_core_fp.fp_read_event_stack(buf, 128)) { + E("%s: can't read data from chip!\n", __func__); + ts_status = HX_TS_GET_DATA_FAIL; + goto END_FUNCTION; + } + } else { + if (!g_core_fp.fp_read_event_stack(buf, hx_touch_data->touch_info_size)) { + E("%s: can't read data from chip!\n", __func__); + ts_status = HX_TS_GET_DATA_FAIL; + goto END_FUNCTION; + } + } + break; +#if defined(HX_SMART_WAKEUP) + + /* SMWP */ + case HX_REPORT_SMWP_EVENT: + g_core_fp.fp_burst_enable(0); + + if (!g_core_fp.fp_read_event_stack(buf, hx_touch_data->event_size)) { + E("%s: can't read data from chip!\n", __func__); + ts_status = HX_TS_GET_DATA_FAIL; + goto END_FUNCTION; + } + break; +#endif + case HX_REPORT_COORD_RAWDATA: + if (!g_core_fp.fp_read_event_stack(buf, 128)) { + E("%s: can't read data from chip!\n", __func__); + ts_status = HX_TS_GET_DATA_FAIL; + goto END_FUNCTION; + } + break; + default: + break; + } + +END_FUNCTION: + return ts_status; +} + +/* start error_control*/ +static int himax_checksum_cal(struct himax_ts_data *ts, uint8_t *buf, int ts_path, int ts_status) +{ + uint16_t check_sum_cal = 0; + int32_t i = 0; + int length = 0; + int ret_val = ts_status; + + if (g_ts_dbg != 0) + I("%s: Entering, ts_status=%d!\n", __func__, ts_status); + + /* Normal */ + switch (ts_path) { + case HX_REPORT_COORD: + length = hx_touch_data->touch_info_size; + break; +#if defined(HX_SMART_WAKEUP) +/* SMWP */ + case HX_REPORT_SMWP_EVENT: + length = (GEST_PTLG_ID_LEN + GEST_PTLG_HDR_LEN); + break; +#endif + case HX_REPORT_COORD_RAWDATA: + length = hx_touch_data->touch_info_size; + break; + default: + I("%s, Neither Normal Nor SMWP error!\n", __func__); + ret_val = HX_PATH_FAIL; + goto END_FUNCTION; + + } + + for (i = 0; i < length; i++) + check_sum_cal += buf[i]; + if (check_sum_cal % 0x100 != 0) { + I("[HIMAX TP MSG] checksum fail : check_sum_cal: 0x%02X\n", check_sum_cal); + ret_val = HX_CHKSUM_FAIL; + } + +END_FUNCTION: + if (g_ts_dbg != 0) + I("%s: END, ret_val=%d!\n", __func__, ret_val); + return ret_val; +} + +#ifdef HX_ESD_RECOVERY +static int himax_ts_event_check(struct himax_ts_data *ts, uint8_t *buf, int ts_path, int ts_status) +{ + + int hx_EB_event = 0; + int hx_EC_event = 0; + int hx_ED_event = 0; + int hx_esd_event = 0; + int hx_zero_event = 0; + int shaking_ret = 0; + + int32_t loop_i = 0; + int length = 0; + int ret_val = ts_status; + + if (g_ts_dbg != 0) + I("%s: Entering, ts_status=%d!\n", __func__, ts_status); + + /* Normal */ + switch (ts_path) { + case HX_REPORT_COORD: + length = hx_touch_data->touch_info_size; + break; +#if defined(HX_SMART_WAKEUP) + /* SMWP */ + case HX_REPORT_SMWP_EVENT: + length = (GEST_PTLG_ID_LEN + GEST_PTLG_HDR_LEN); + break; +#endif + case HX_REPORT_COORD_RAWDATA: + length = hx_touch_data->touch_info_size; + break; + default: + I("%s, Neither Normal Nor SMWP error!\n", __func__); + ret_val = HX_PATH_FAIL; + goto END_FUNCTION; + } + + if (g_ts_dbg != 0) + I("Now Path=%d, Now status=%d, length=%d\n", ts_path, ts_status, length); + + for (loop_i = 0; loop_i < length; loop_i++) { + if (ts_path == HX_REPORT_COORD || ts_path == HX_REPORT_COORD_RAWDATA) { + /* case 1 ESD recovery flow */ + if (buf[loop_i] == 0xEB) + hx_EB_event++; + + else if (buf[loop_i] == 0xEC) + hx_EC_event++; + + else if (buf[loop_i] == 0xED) + hx_ED_event++; + + /* case 2 ESD recovery flow-Disable */ + else if (buf[loop_i] == 0x00) + hx_zero_event++; + else { + hx_EB_event = 0; + hx_EC_event = 0; + hx_ED_event = 0; + hx_zero_event = 0; + g_zero_event_count = 0; + } + } + } + + if (hx_EB_event == length) { + hx_esd_event = length; + hx_EB_event_flag++; + I("[HIMAX TP MSG]: ESD event checked - ALL 0xEB.\n"); + } else if (hx_EC_event == length) { + hx_esd_event = length; + hx_EC_event_flag++; + I("[HIMAX TP MSG]: ESD event checked - ALL 0xEC.\n"); + } else if (hx_ED_event == length) { + hx_esd_event = length; + hx_ED_event_flag++; + I("[HIMAX TP MSG]: ESD event checked - ALL 0xED.\n"); + } else { + hx_esd_event = 0; + } + + if ((hx_esd_event == length || hx_zero_event == length) + && (HX_HW_RESET_ACTIVATE == 0) + && (HX_ESD_RESET_ACTIVATE == 0) + && (hx_touch_data->diag_cmd == 0) + && (ts->in_self_test == 0)) { + + shaking_ret = g_core_fp.fp_ic_esd_recovery(hx_esd_event, hx_zero_event, length); + + if (shaking_ret == HX_ESD_EVENT) { + himax_esd_hw_reset(); + ret_val = HX_ESD_EVENT; + } else if (shaking_ret == HX_ZERO_EVENT_COUNT) + ret_val = HX_ZERO_EVENT_COUNT; + else { + I("I2C running. Nothing to be done!\n"); + ret_val = HX_IC_RUNNING; + } + } else if (HX_ESD_RESET_ACTIVATE) { + /* drop 1st interrupts after chip reset */ + HX_ESD_RESET_ACTIVATE = 0; + I("[HX_ESD_RESET_ACTIVATE]:%s: Back from reset, ready to serve.\n", __func__); + ret_val = HX_ESD_REC_OK; + } + +END_FUNCTION: + if (g_ts_dbg != 0) + I("%s: END, ret_val=%d!\n", __func__, ret_val); + + return ret_val; +} +#endif + +static int himax_err_ctrl(struct himax_ts_data *ts, uint8_t *buf, int ts_path, int ts_status) +{ + +#ifdef HX_RST_PIN_FUNC + if (HX_HW_RESET_ACTIVATE) { + /* drop 1st interrupts after chip reset */ + HX_HW_RESET_ACTIVATE = 0; + I("[HX_HW_RESET_ACTIVATE]:%s: Back from reset, ready to serve.\n", __func__); + ts_status = HX_RST_OK; + goto END_FUNCTION; + } +#endif + + ts_status = himax_checksum_cal(ts, buf, ts_path, ts_status); + if (ts_status == HX_CHKSUM_FAIL) + goto CHK_FAIL; + goto END_FUNCTION; + +CHK_FAIL: +#ifdef HX_ESD_RECOVERY + ts_status = himax_ts_event_check(ts, buf, ts_path, ts_status); +#endif + + +END_FUNCTION: + if (g_ts_dbg != 0) + I("%s: END, ts_status=%d!\n", __func__, ts_status); + return ts_status; +} +/* end error_control*/ + +/* start distribute_data*/ +static int himax_distribute_touch_data(uint8_t *buf, int ts_path, int ts_status) +{ + uint8_t hx_state_info_pos = hx_touch_data->touch_info_size - 3; + + if (g_ts_dbg != 0) + I("%s: Entering, ts_status=%d!\n", __func__, ts_status); + + if (ts_path == HX_REPORT_COORD) { + memcpy(hx_touch_data->hx_coord_buf, &buf[0], hx_touch_data->touch_info_size); + + if (buf[hx_state_info_pos] != 0xFF && buf[hx_state_info_pos + 1] != 0xFF) + memcpy(hx_touch_data->hx_state_info, &buf[hx_state_info_pos], 2); + else + memset(hx_touch_data->hx_state_info, 0x00, sizeof(hx_touch_data->hx_state_info)); + + if ((HX_HW_RESET_ACTIVATE) +#ifdef HX_ESD_RECOVERY + || (HX_ESD_RESET_ACTIVATE) +#endif + ) { + memcpy(hx_touch_data->hx_rawdata_buf, &buf[hx_touch_data->touch_info_size], hx_touch_data->touch_all_size - hx_touch_data->touch_info_size); + } + } else if (ts_path == HX_REPORT_COORD_RAWDATA) { + memcpy(hx_touch_data->hx_coord_buf, &buf[0], hx_touch_data->touch_info_size); + + if (buf[hx_state_info_pos] != 0xFF && buf[hx_state_info_pos + 1] != 0xFF) + memcpy(hx_touch_data->hx_state_info, &buf[hx_state_info_pos], 2); + else + memset(hx_touch_data->hx_state_info, 0x00, sizeof(hx_touch_data->hx_state_info)); + + memcpy(hx_touch_data->hx_rawdata_buf, &buf[hx_touch_data->touch_info_size], hx_touch_data->touch_all_size - hx_touch_data->touch_info_size); + } +#if defined(HX_SMART_WAKEUP) + else if (ts_path == HX_REPORT_SMWP_EVENT) + memcpy(hx_touch_data->hx_event_buf, buf, hx_touch_data->event_size); +#endif + else { + E("%s, Fail Path!\n", __func__); + ts_status = HX_PATH_FAIL; + } + + if (g_ts_dbg != 0) + I("%s: End, ts_status=%d!\n", __func__, ts_status); + return ts_status; +} +/* end assign_data*/ + +/* start parse_report_data*/ +int himax_parse_report_points(struct himax_ts_data *ts, int ts_path, int ts_status) +{ + int x = 0; + int y = 0; + int w = 0; + int base = 0; + int32_t loop_i = 0; + + if (g_ts_dbg != 0) + I("%s: start!\n", __func__); + + + ts->old_finger = ts->pre_finger_mask; + ts->pre_finger_mask = 0; + hx_touch_data->finger_num = hx_touch_data->hx_coord_buf[ts->coordInfoSize - 4] & 0x0F; + hx_touch_data->finger_on = 1; + AA_press = 1; + + g_target_report_data->finger_num = hx_touch_data->finger_num; + g_target_report_data->finger_on = hx_touch_data->finger_on; + + if (g_ts_dbg != 0) + I("%s:finger_num = 0x%2X, finger_on = %d\n", __func__, g_target_report_data->finger_num, g_target_report_data->finger_on); + + for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { + base = loop_i * 4; + x = hx_touch_data->hx_coord_buf[base] << 8 | hx_touch_data->hx_coord_buf[base + 1]; + y = (hx_touch_data->hx_coord_buf[base + 2] << 8 | hx_touch_data->hx_coord_buf[base + 3]); + w = hx_touch_data->hx_coord_buf[(ts->nFinger_support * 4) + loop_i]; + + if (g_ts_dbg != 0) + D("%s: now parsing[%d]:x=%d, y=%d, w=%d\n", __func__, loop_i, x, y, w); + + if (x >= 0 && x <= ts->pdata->abs_x_max && y >= 0 && y <= ts->pdata->abs_y_max) { + hx_touch_data->finger_num--; + + g_target_report_data->x[loop_i] = x; + g_target_report_data->y[loop_i] = y; + g_target_report_data->w[loop_i] = w; + g_target_report_data->finger_id[loop_i] = 1; + + /* I("%s: g_target_report_data->x[loop_i]=%d, g_target_report_data->y[loop_i]=%d, g_target_report_data->w[loop_i]=%d", */ + /* __func__, g_target_report_data->x[loop_i], g_target_report_data->y[loop_i], g_target_report_data->w[loop_i]); */ + + + if (!ts->first_pressed) { + ts->first_pressed = 1; + I("S1@%d, %d\n", x, y); + } + + ts->pre_finger_data[loop_i][0] = x; + ts->pre_finger_data[loop_i][1] = y; + + ts->pre_finger_mask = ts->pre_finger_mask + (1 << loop_i); + } else {/* report coordinates */ + g_target_report_data->x[loop_i] = x; + g_target_report_data->y[loop_i] = y; + g_target_report_data->w[loop_i] = w; + g_target_report_data->finger_id[loop_i] = 0; + + if (loop_i == 0 && ts->first_pressed == 1) { + ts->first_pressed = 2; + I("E1@%d, %d\n", ts->pre_finger_data[0][0], ts->pre_finger_data[0][1]); + } + } + } + + if (g_ts_dbg != 0) { + for (loop_i = 0; loop_i < 10; loop_i++) + D("DBG X=%d Y=%d ID=%d\n", g_target_report_data->x[loop_i], g_target_report_data->y[loop_i], g_target_report_data->finger_id[loop_i]); + + D("DBG finger number %d\n", g_target_report_data->finger_num); + } + + if (g_ts_dbg != 0) + I("%s: end!\n", __func__); + return ts_status; +} + +static int himax_parse_report_data(struct himax_ts_data *ts, int ts_path, int ts_status) +{ + + if (g_ts_dbg != 0) + I("%s: start now_status=%d!\n", __func__, ts_status); + + + EN_NoiseFilter = (hx_touch_data->hx_coord_buf[HX_TOUCH_INFO_POINT_CNT + 2] >> 3); + /* I("EN_NoiseFilter=%d\n", EN_NoiseFilter); */ + EN_NoiseFilter = EN_NoiseFilter & 0x01; + /* I("EN_NoiseFilter2=%d\n", EN_NoiseFilter); */ +#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON) + tpd_key = (hx_touch_data->hx_coord_buf[HX_TOUCH_INFO_POINT_CNT + 2] >> 4); + + /* All (VK+AA)leave */ + if (tpd_key == 0x0F) + tpd_key = 0x00; +#endif + p_point_num = ts->hx_point_num; + + switch (ts_path) { + case HX_REPORT_COORD: + ts_status = himax_parse_report_points(ts, ts_path, ts_status); + break; + case HX_REPORT_COORD_RAWDATA: + /* touch monitor rawdata */ + if (debug_data != NULL) { + if (debug_data->fp_set_diag_cmd(ic_data, hx_touch_data)) + I("%s: coordinate dump fail and bypass with checksum err\n", __func__); + } else + E("%s,There is no init set_diag_cmd\n", __func__); + + ts_status = himax_parse_report_points(ts, ts_path, ts_status); + break; +#ifdef HX_SMART_WAKEUP + case HX_REPORT_SMWP_EVENT: + himax_wake_event_parse(ts, ts_status); + break; +#endif + default: + E("%s:Fail Path!\n", __func__); + ts_status = HX_PATH_FAIL; + break; + } + if (g_ts_dbg != 0) + I("%s: end now_status=%d!\n", __func__, ts_status); + return ts_status; +} + +/* end parse_report_data */ + +static void himax_report_all_leave_event(struct himax_ts_data *ts) +{ + int loop_i = 0; + + for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { +#ifndef HX_PROTOCOL_A + input_mt_slot(ts->input_dev, loop_i); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); +#endif + } + input_report_key(ts->input_dev, BTN_TOUCH, 0); + input_sync(ts->input_dev); +} + +/* start report_data */ +#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON) +static void himax_key_report_operation(int tp_key_index, struct himax_ts_data *ts) +{ + uint16_t x_position = 0, y_position = 0; + + if (g_ts_dbg != 0) + I("%s: Entering\n", __func__); + + if (tp_key_index != 0x00) { + I("virtual key index =%x\n", tp_key_index); + + if (tp_key_index == 0x01) { + vk_press = 1; + I("back key pressed\n"); + + if (ts->pdata->virtual_key) { + if (ts->button[0].index) { + x_position = (ts->button[0].x_range_min + ts->button[0].x_range_max) / 2; + y_position = (ts->button[0].y_range_min + ts->button[0].y_range_max) / 2; + } + +#ifdef HX_PROTOCOL_A + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); + input_mt_sync(ts->input_dev); +#else + input_mt_slot(ts->input_dev, 0); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 100); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); +#endif + } else { + input_report_key(ts->input_dev, KEY_BACK, 1); + } + } else if (tp_key_index == 0x02) { + vk_press = 1; + I("home key pressed\n"); + + if (ts->pdata->virtual_key) { + if (ts->button[1].index) { + x_position = (ts->button[1].x_range_min + ts->button[1].x_range_max) / 2; + y_position = (ts->button[1].y_range_min + ts->button[1].y_range_max) / 2; + } + +#ifdef HX_PROTOCOL_A + input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); + input_mt_sync(ts->input_dev); +#else + input_mt_slot(ts->input_dev, 0); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 100); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); +#endif + } else + input_report_key(ts->input_dev, KEY_HOME, 1); + + } else if (tp_key_index == 0x04) { + vk_press = 1; + I("APP_switch key pressed\n"); + + if (ts->pdata->virtual_key) { + if (ts->button[2].index) { + x_position = (ts->button[2].x_range_min + ts->button[2].x_range_max) / 2; + y_position = (ts->button[2].y_range_min + ts->button[2].y_range_max) / 2; + } + +#ifdef HX_PROTOCOL_A + input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 0); + input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 100); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); + input_mt_sync(ts->input_dev); +#else + input_mt_slot(ts->input_dev, 0); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 100); + input_report_abs(ts->input_dev, ABS_MT_PRESSURE, 100); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x_position); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y_position); +#endif + } else + input_report_key(ts->input_dev, KEY_APPSELECT, 1); + } + + input_sync(ts->input_dev); + } else { /* tp_key_index =0x00 */ + I("virtual key released\n"); + vk_press = 0; +#ifndef HX_PROTOCOL_A + input_mt_slot(ts->input_dev, 0); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); +#else + input_mt_sync(ts->input_dev); +#endif + input_report_key(ts->input_dev, KEY_BACK, 0); + input_report_key(ts->input_dev, KEY_HOME, 0); + input_report_key(ts->input_dev, KEY_APPSELECT, 0); +#ifndef HX_PROTOCOL_A + input_sync(ts->input_dev); +#endif + } +} + +void himax_finger_report_key(struct himax_ts_data *ts) +{ + if (hx_point_num != 0) { + /* Touch KEY */ + if ((tpd_key_old != 0x00) && (tpd_key == 0x00)) { + /* temp_x[0] = 0xFFFF; */ + /* temp_y[0] = 0xFFFF; */ + /* temp_x[1] = 0xFFFF; */ + /* temp_y[1] = 0xFFFF; */ + hx_touch_data->finger_on = 0; +#ifdef HX_PROTOCOL_A + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); +#endif + himax_key_report_operation(tpd_key, ts); + } + +#ifndef HX_PROTOCOL_A + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); +#endif + input_sync(ts->input_dev); + } +} + +void himax_finger_leave_key(struct himax_ts_data *ts) +{ + if (tpd_key != 0x00) { + hx_touch_data->finger_on = 1; +#ifdef HX_PROTOCOL_A + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); +#endif + himax_key_report_operation(tpd_key, ts); + } else if ((tpd_key_old != 0x00) && (tpd_key == 0x00)) { + hx_touch_data->finger_on = 0; +#ifdef HX_PROTOCOL_A + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); +#endif + himax_key_report_operation(tpd_key, ts); + } + +#ifndef HX_PROTOCOL_A + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); +#endif + input_sync(ts->input_dev); +} + +static void himax_report_key(struct himax_ts_data *ts) +{ + if (hx_point_num != 0) + himax_finger_report_key(ts); /* Touch KEY */ + else + himax_finger_leave_key(ts); /* Key */ + + tpd_key_old = tpd_key; + Last_EN_NoiseFilter = EN_NoiseFilter; +} +#endif + +/* start report_point*/ +static void himax_finger_report(struct himax_ts_data *ts) +{ + int i = 0; + bool valid = false; + + if (g_ts_dbg != 0) { + I("%s:start\n", __func__); + I("hx_touch_data->finger_num=%d\n", hx_touch_data->finger_num); + } + for (i = 0; i < ts->nFinger_support; i++) { + if (g_target_report_data->x[i] >= 0 && g_target_report_data->x[i] <= ts->pdata->abs_x_max + && g_target_report_data->y[i] >= 0 && g_target_report_data->y[i] <= ts->pdata->abs_y_max) + valid = true; + else + valid = false; + if (g_ts_dbg != 0) + I("valid=%d\n", valid); + if (valid) { + if (g_ts_dbg != 0) + I("g_target_report_data->x[i]=%d, g_target_report_data->y[i]=%d, g_target_report_data->w[i]=%d\n", + g_target_report_data->x[i], g_target_report_data->y[i], g_target_report_data->w[i]); +#ifndef HX_PROTOCOL_A + input_mt_slot(ts->input_dev, i); +#endif + input_report_key(ts->input_dev, BTN_TOUCH, g_target_report_data->finger_on); + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, g_target_report_data->w[i]); + input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, i); +#ifndef HX_PROTOCOL_A + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, g_target_report_data->w[i]); + input_report_abs(ts->input_dev, ABS_MT_PRESSURE, g_target_report_data->w[i]); +#endif + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, g_target_report_data->x[i]); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, g_target_report_data->y[i]); +#ifndef HX_PROTOCOL_A + ts->last_slot = i; + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 1); +#else + input_mt_sync(ts->input_dev); +#endif + + } else { + input_mt_slot(ts->input_dev, i); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); + } + } + + input_report_key(ts->input_dev, BTN_TOUCH, g_target_report_data->finger_on); + input_sync(ts->input_dev); + + if (g_ts_dbg != 0) + I("%s:end\n", __func__); +} + +static void himax_finger_leave(struct himax_ts_data *ts) +{ + int32_t loop_i = 0; + + if (g_ts_dbg != 0) + I("%s: start!\n", __func__); +#if defined(HX_PALM_REPORT) + if (himax_palm_detect(hx_touch_data->hx_coord_buf) == NO_ERR) { + I(" %s HX_PALM_REPORT KEY power event press\n", __func__); + input_report_key(ts->input_dev, KEY_POWER, 1); + input_sync(ts->input_dev); + msleep(100); + + I(" %s HX_PALM_REPORT KEY power event release\n", __func__); + input_report_key(ts->input_dev, KEY_POWER, 0); + input_sync(ts->input_dev); + return; + } +#endif + + hx_touch_data->finger_on = 0; + AA_press = 0; + +#ifdef HX_PROTOCOL_A + input_mt_sync(ts->input_dev); +#endif + + for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { + if (((ts->pre_finger_mask >> loop_i) & 1) == 1) { + input_mt_slot(ts->input_dev, loop_i); + input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, 0); + } + } + + if (ts->pre_finger_mask > 0) + ts->pre_finger_mask = 0; + + if (ts->first_pressed == 1) { + ts->first_pressed = 2; + I("E1@%d, %d\n", ts->pre_finger_data[0][0], ts->pre_finger_data[0][1]); + } + + /* if (ts->debug_log_level & BIT(1)) */ + /* himax_log_touch_event(x, y, w, loop_i, EN_NoiseFilter, HX_FINGER_LEAVE); */ + + input_report_key(ts->input_dev, BTN_TOUCH, hx_touch_data->finger_on); + input_sync(ts->input_dev); + + + if (g_ts_dbg != 0) + I("%s: end!\n", __func__); +} + +static void himax_report_points(struct himax_ts_data *ts) +{ + if (g_ts_dbg != 0) + I("%s: start!\n", __func__); + + if (ts->hx_point_num != 0) + himax_finger_report(ts); + else + himax_finger_leave(ts); + + + Last_EN_NoiseFilter = EN_NoiseFilter; + + if (g_ts_dbg != 0) + I("%s: end!\n", __func__); +} +/* end report_points */ + +int himax_report_data(struct himax_ts_data *ts, int ts_path, int ts_status) +{ + if (g_ts_dbg != 0) + I("%s: Entering, ts_status=%d!\n", __func__, ts_status); + + if (ts_path == HX_REPORT_COORD || ts_path == HX_REPORT_COORD_RAWDATA) { + if (hx_touch_data->hx_coord_buf[HX_TOUCH_INFO_POINT_CNT] == 0xff) + ts->hx_point_num = 0; + else + ts->hx_point_num = hx_touch_data->hx_coord_buf[HX_TOUCH_INFO_POINT_CNT] & 0x0f; + + /* Touch Point information */ + himax_report_points(ts); + +#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON) + /* report key(question mark) */ + if (tpd_key && tpd_key_old) + himax_report_key(ts); +#endif + } + +#ifdef HX_SMART_WAKEUP + else if (ts_path == HX_REPORT_SMWP_EVENT) { + __pm_wakeup_event(&ts->ts_SMWP_wake_src, TS_WAKE_LOCK_TIMEOUT); + himax_wake_event_report(); + } +#endif + + else { + E("%s:Fail Path!\n", __func__); + ts_status = HX_PATH_FAIL; + } + + if (g_ts_dbg != 0) + I("%s: END, ts_status=%d!\n", __func__, ts_status); + return ts_status; +} +/* end report_data */ + +static int himax_ts_operation(struct himax_ts_data *ts, int ts_path, int ts_status) +{ + uint8_t hw_reset_check[2]; + uint8_t buf[128]; + + memset(buf, 0x00, sizeof(buf)); + memset(hw_reset_check, 0x00, sizeof(hw_reset_check)); + + ts_status = himax_touch_get(ts, buf, ts_path, ts_status); + if (ts_status == HX_TS_GET_DATA_FAIL) + goto END_FUNCTION; + + ts_status = himax_err_ctrl(ts, buf, ts_path, ts_status); + if (ts_status == HX_REPORT_DATA || ts_status == HX_TS_NORMAL_END) { + ts_status = himax_distribute_touch_data(buf, ts_path, ts_status); + ts_status = himax_parse_report_data(ts, ts_path, ts_status); + } else + goto END_FUNCTION; + + ts_status = himax_report_data(ts, ts_path, ts_status); + + +END_FUNCTION: + return ts_status; +} + +void himax_ts_work(struct himax_ts_data *ts) +{ + + int ts_status = HX_TS_NORMAL_END; + int ts_path = 0; + + if (debug_data != NULL) + debug_data->fp_ts_dbg_func(ts, HX_FINGER_ON); + +#if defined(HX_USB_DETECT_GLOBAL) + himax_cable_detect_func(false); +#endif + + ts_path = himax_ts_work_status(ts); + switch (ts_path) { + case HX_REPORT_COORD: + ts_status = himax_ts_operation(ts, ts_path, ts_status); + break; + case HX_REPORT_SMWP_EVENT: + ts_status = himax_ts_operation(ts, ts_path, ts_status); + break; + case HX_REPORT_COORD_RAWDATA: + ts_status = himax_ts_operation(ts, ts_path, ts_status); + break; + default: + E("%s:Path Fault! value=%d\n", __func__, ts_path); + goto END_FUNCTION; + } + + if (ts_status == HX_TS_GET_DATA_FAIL) + goto GET_TOUCH_FAIL; + else + goto END_FUNCTION; + +GET_TOUCH_FAIL: + I("%s: Now reset the Touch chip.\n", __func__); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, true); +#endif +END_FUNCTION: + if (debug_data != NULL) + debug_data->fp_ts_dbg_func(ts, HX_FINGER_LEAVE); + + +} +/* end ts_work */ +enum hrtimer_restart himax_ts_timer_func(struct hrtimer *timer) +{ + struct himax_ts_data *ts; + + ts = container_of(timer, struct himax_ts_data, timer); + queue_work(ts->himax_wq, &ts->work); + hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +#if defined(HX_USB_DETECT_CALLBACK) +static void himax_cable_tp_status_handler_func(int connect_status) +{ + struct himax_ts_data *ts; + + I("Touch: cable change to %d\n", connect_status); + ts = private_ts; + + if (ts->cable_config) { + if (!atomic_read(&ts->suspend_mode)) { + if ((!!connect_status) != ts->usb_connected) { + if (!!connect_status) { + ts->cable_config[1] = 0x01; + ts->usb_connected = 0x01; + } + + else { + ts->cable_config[1] = 0x00; + ts->usb_connected = 0x00; + } + + if (himax_bus_master_write(ts->cable_config, sizeof(ts->cable_config), HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail: cable_config!\n", __func__); + return; + } + I("%s: Cable status change: 0x%2.2X\n", __func__, ts->cable_config[1]); + } else + I("%s: Cable status is the same as previous one, ignore.\n", __func__); + + } else { + if (connect_status) + ts->usb_connected = 0x01; + else + ts->usb_connected = 0x00; + + I("%s: Cable status remembered: 0x%2.2X\n", __func__, ts->usb_connected); + } + } +} + +static const struct t_cable_status_notifier himax_cable_status_handler = { + .name = "usb_tp_connected", + .func = himax_cable_tp_status_handler_func, +}; + +#endif + +#ifdef HX_AUTO_UPDATE_FW +static void himax_update_register(struct work_struct *work) +{ + I(" %s in", __func__); + + if (i_update_FW() <= 0) + I("FW =NOT UPDATE=\n"); + else + I("Have new FW =UPDATE=\n"); + +} +#endif + +#ifdef CONFIG_DRM +static void himax_fb_register(struct work_struct *work) +{ + int ret = 0; + struct himax_ts_data *ts = container_of(work, struct himax_ts_data, work_att.work); + + I(" %s in\n", __func__); + ts->fb_notif.notifier_call = fb_notifier_callback; + ret = msm_drm_register_client(&ts->fb_notif); + if (ret) + E(" Unable to register fb_notifier: %d\n", ret); +} + +#elif defined CONFIG_FB +static void himax_fb_register(struct work_struct *work) +{ + int ret = 0; + struct himax_ts_data *ts = container_of(work, struct himax_ts_data, work_att.work); + + I(" %s in\n", __func__); + ts->fb_notif.notifier_call = fb_notifier_callback; + ret = fb_register_client(&ts->fb_notif); + + if (ret) + E(" Unable to register fb_notifier: %d\n", ret); +} +#endif + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) +static void himax_ito_test_work(struct work_struct *work) +{ + I(" %s in\n", __func__); + himax_ito_test(); +} +#endif + +int himax_chip_common_init(void) +{ +#if defined(HX_AUTO_UPDATE_FW) || defined(HX_ZERO_FLASH) + bool auto_update_flag = false; +#endif + int ret = 0, err = 0; + struct himax_ts_data *ts = private_ts; + struct himax_i2c_platform_data *pdata; + + I("PDATA START\n"); + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (pdata == NULL) { /* Allocate Platform data space */ + err = -ENOMEM; + goto err_dt_platform_data_fail; + } + + I("ic_data START\n"); + ic_data = kzalloc(sizeof(*ic_data), GFP_KERNEL); + if (ic_data == NULL) { /* Allocate IC data space */ + err = -ENOMEM; + goto err_dt_ic_data_fail; + } + + /* allocate report data */ + hx_touch_data = kzalloc(sizeof(struct himax_report_data), GFP_KERNEL); + if (hx_touch_data == NULL) { + err = -ENOMEM; + goto err_alloc_touch_data_failed; + } + + if (himax_parse_dt(ts, pdata) < 0) { + I(" pdata is NULL for DT\n"); + goto err_alloc_dt_pdata_failed; + } + +#ifdef HX_RST_PIN_FUNC + ts->rst_gpio = pdata->gpio_reset; +#endif + himax_gpio_power_config(pdata); +#ifndef CONFIG_OF + + if (pdata->power) { + ret = pdata->power(1); + + if (ret < 0) { + E("%s: power on failed\n", __func__); + goto err_power_failed; + } + } + +#endif + + if (g_core_fp.fp_chip_detect != NULL && g_core_fp.fp_chip_init != NULL) { + if (g_core_fp.fp_chip_detect() != false) { + g_core_fp.fp_chip_init(); + } else { + E("%s: chip detect failed!\n", __func__); + goto error_ic_detect_failed; + } + } else { + E("%s: function point is NULL!\n", __func__); + goto error_ic_detect_failed; + } + + if (pdata->virtual_key) + ts->button = pdata->virtual_key; + +#ifdef HX_AUTO_UPDATE_FW + auto_update_flag = (!g_core_fp.fp_calculateChecksum(false)); + auto_update_flag |= g_core_fp.fp_flash_lastdata_check(); + if (auto_update_flag) + goto FW_force_upgrade; +#endif + g_core_fp.fp_read_FW_ver(); + +#ifdef HX_AUTO_UPDATE_FW +FW_force_upgrade: + auto_update_flag |= ((ic_data->vendor_fw_ver < g_i_FW_VER) || (ic_data->vendor_config_ver < g_i_CFG_VER)); + /* Not sure to do */ + /* auto_update_flag |= ((ic_data->vendor_cid_maj_ver != g_i_CID_MAJ) || (ic_data->vendor_cid_min_ver < g_i_CID_MIN)); */ + if (auto_update_flag) { + ts->himax_update_wq = create_singlethread_workqueue("HMX_update_request"); + if (!ts->himax_update_wq) { + E(" allocate syn_update_wq failed\n"); + err = -ENOMEM; + goto err_update_wq_failed; + } + INIT_DELAYED_WORK(&ts->work_update, himax_update_register); + queue_delayed_work(ts->himax_update_wq, &ts->work_update, msecs_to_jiffies(2000)); + } +#endif +#ifdef HX_ZERO_FLASH + auto_update_flag = true; + ts->himax_0f_update_wq = create_singlethread_workqueue("HMX_0f_update_request"); + INIT_DELAYED_WORK(&ts->work_0f_update, g_core_fp.fp_0f_operation); + queue_delayed_work(ts->himax_0f_update_wq, &ts->work_0f_update, msecs_to_jiffies(2000)); +#endif + + /* Himax Power On and Load Config */ + if (himax_loadSensorConfig(pdata)) { + E("%s: Load Sesnsor configuration failed, unload driver.\n", __func__); + goto err_detect_failed; + } + + g_core_fp.fp_power_on_init(); + calculate_point_number(); + +#ifdef CONFIG_OF + ts->power = pdata->power; +#endif + ts->pdata = pdata; + ts->x_channel = ic_data->HX_RX_NUM; + ts->y_channel = ic_data->HX_TX_NUM; + ts->nFinger_support = ic_data->HX_MAX_PT; + /* calculate the i2c data size */ + calcDataSize(ts->nFinger_support); + I("%s: calcDataSize complete\n", __func__); +#ifdef CONFIG_OF + ts->pdata->abs_pressure_min = 0; + ts->pdata->abs_pressure_max = 200; + ts->pdata->abs_width_min = 0; + ts->pdata->abs_width_max = 200; + pdata->cable_config[0] = 0xF0; + pdata->cable_config[1] = 0x00; +#endif + ts->suspended = false; +#if defined(HX_USB_DETECT_CALLBACK) || defined(HX_USB_DETECT_GLOBAL) + ts->usb_connected = 0x00; + ts->cable_config = pdata->cable_config; +#endif +#ifdef HX_PROTOCOL_A + ts->protocol_type = PROTOCOL_TYPE_A; +#else + ts->protocol_type = PROTOCOL_TYPE_B; +#endif + I("%s: Use Protocol Type %c\n", __func__, + ts->protocol_type == PROTOCOL_TYPE_A ? 'A' : 'B'); + + ret = himax_input_register(ts); + if (ret) { + E("%s: Unable to register %s input device\n", + __func__, ts->input_dev->name); + goto err_input_register_device_failed; + } + +#if defined(CONFIG_DRM) || defined(CONFIG_FB) + + ts->himax_att_wq = create_singlethread_workqueue("HMX_ATT_request"); + + if (!ts->himax_att_wq) { + E(" allocate syn_att_wq failed\n"); + err = -ENOMEM; + goto err_get_intr_bit_failed; + } + + INIT_DELAYED_WORK(&ts->work_att, himax_fb_register); + queue_delayed_work(ts->himax_att_wq, &ts->work_att, msecs_to_jiffies(15000)); +#endif + +#ifdef HX_SMART_WAKEUP + ts->SMWP_enable = 0; + wakeup_source_init(&ts->ts_SMWP_wake_src, WAKE_LOCK_SUSPEND, HIMAX_common_NAME); +#endif +#ifdef HX_HIGH_SENSE + ts->HSEN_enable = 0; +#endif +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + ts->ito_test_wq = create_singlethread_workqueue("himax_ito_test_wq"); + + if (!ts->ito_test_wq) { + E("%s: ito test workqueue failed\n", __func__); + err = -ENOMEM; + goto err_ito_test_wq_failed; + } + + INIT_WORK(&ts->ito_test_work, himax_ito_test_work); +#endif + + /* touch data init */ + err = himax_report_data_init(); + if (err) + goto err_report_data_init_failed; + + if (himax_common_proc_init()) { + E(" %s: himax_common proc_init failed!\n", __func__); + goto err_creat_proc_file_failed; + } + +#if defined(HX_USB_DETECT_CALLBACK) + + if (ts->cable_config) + cable_detect_register_notifier(&himax_cable_status_handler); + +#endif + err = himax_ts_register_interrupt(); + + if (err) + goto err_register_interrupt_failed; + + +#ifdef CONFIG_TOUCHSCREEN_HIMAX_DEBUG + if (himax_debug_init()) + E(" %s: debug initial failed!\n", __func__); +#endif + +#if defined(HX_AUTO_UPDATE_FW) || defined(HX_ZERO_FLASH) + + if (auto_update_flag) + himax_int_enable(0); + +#endif + return 0; +err_register_interrupt_failed: +remove_proc_entry(HIMAX_PROC_TOUCH_FOLDER, NULL); +err_creat_proc_file_failed: +err_report_data_init_failed: +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + destroy_workqueue(ts->ito_test_wq); +err_ito_test_wq_failed: +#endif +#ifdef HX_SMART_WAKEUP + wakeup_source_trash(&ts->ts_SMWP_wake_src); +#endif +#if defined(CONFIG_FB) || defined(CONFIG_DRM) +err_get_intr_bit_failed: + cancel_delayed_work_sync(&ts->work_att); + destroy_workqueue(ts->himax_att_wq); +#endif +err_input_register_device_failed: + input_free_device(ts->input_dev); +err_detect_failed: +#ifdef HX_AUTO_UPDATE_FW + if (auto_update_flag) { + cancel_delayed_work_sync(&ts->work_update); + destroy_workqueue(ts->himax_update_wq); + } +err_update_wq_failed: +#endif + +error_ic_detect_failed: + if (gpio_is_valid(pdata->gpio_irq)) + gpio_free(pdata->gpio_irq); + +#ifdef HX_RST_PIN_FUNC + + if (gpio_is_valid(pdata->gpio_reset)) + gpio_free(pdata->gpio_reset); + +#endif + +#ifndef CONFIG_OF +err_power_failed: +#endif + +err_alloc_dt_pdata_failed: + kfree(hx_touch_data); +err_alloc_touch_data_failed: + kfree(ic_data); +err_dt_ic_data_fail: + kfree(pdata); +err_dt_platform_data_fail: + kfree(ts); + probe_fail_flag = 1; + return err; +} + +void himax_chip_common_deinit(void) +{ + struct himax_ts_data *ts = private_ts; + +#ifdef CONFIG_TOUCHSCREEN_HIMAX_DEBUG + himax_debug_remove(); +#endif + + remove_proc_entry(HIMAX_PROC_TOUCH_FOLDER, NULL); + himax_common_proc_deinit(); + + if (!ts->use_irq) { + hrtimer_cancel(&ts->timer); + destroy_workqueue(ts->himax_wq); + } + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + destroy_workqueue(ts->ito_test_wq); +#endif +#ifdef HX_SMART_WAKEUP + wakeup_source_trash(&ts->ts_SMWP_wake_src); +#endif + +#ifdef CONFIG_DRM + if (msm_drm_unregister_client(&ts->fb_notif)) + E("Error occurred while unregistering fb_notifier.\n"); + cancel_delayed_work_sync(&ts->work_att); + destroy_workqueue(ts->himax_att_wq); +#elif defined(CONFIG_FB) + if (fb_unregister_client(&ts->fb_notif)) + E("Error occurred while unregistering fb_notifier.\n"); + cancel_delayed_work_sync(&ts->work_att); + destroy_workqueue(ts->himax_att_wq); +#endif + input_free_device(ts->input_dev); +#ifdef HX_ZERO_FLASH + cancel_delayed_work_sync(&ts->work_0f_update); + destroy_workqueue(ts->himax_0f_update_wq); +#endif +#ifdef HX_AUTO_UPDATE_FW + cancel_delayed_work_sync(&ts->work_update); + destroy_workqueue(ts->himax_update_wq); +#endif + if (gpio_is_valid(ts->pdata->gpio_irq)) + gpio_free(ts->pdata->gpio_irq); +#ifdef HX_RST_PIN_FUNC + if (gpio_is_valid(ts->pdata->gpio_reset)) + gpio_free(ts->pdata->gpio_reset); +#endif + + kfree(hx_touch_data); + kfree(ic_data); + kfree(ts->pdata); + kfree(ts); + probe_fail_flag = 0; +} + +int himax_chip_common_suspend(struct himax_ts_data *ts) +{ + int ret; + + if (ts->suspended) { + I("%s: Already suspended. Skipped.\n", __func__); + return 0; + } + + ts->suspended = true; + I("%s: enter\n", __func__); + + if (debug_data != NULL && debug_data->flash_dump_going == true) { + I("[himax] %s: Flash dump is going, reject suspend\n", __func__); + return 0; + } + +#if defined(HX_SMART_WAKEUP) || defined(HX_HIGH_SENSE) || defined(HX_USB_DETECT_GLOBAL) +#ifndef HX_RESUME_SEND_CMD + g_core_fp.fp_resend_cmd_func(ts->suspended); +#endif +#endif +#ifdef HX_SMART_WAKEUP + + if (ts->SMWP_enable) { + atomic_set(&ts->suspend_mode, 1); + ts->pre_finger_mask = 0; + FAKE_POWER_KEY_SEND = false; + I("[himax] %s: SMART_WAKEUP enable, reject suspend\n", __func__); + return 0; + } + +#endif + himax_int_enable(0); + g_core_fp.fp_suspend_ic_action(); + + if (!ts->use_irq) { + ret = cancel_work_sync(&ts->work); + + if (ret) + himax_int_enable(1); + } + + /* ts->first_pressed = 0; */ + atomic_set(&ts->suspend_mode, 1); + ts->pre_finger_mask = 0; + + if (ts->pdata->powerOff3V3 && ts->pdata->power) + ts->pdata->power(0); + + I("%s: END\n", __func__); + return 0; +} + +int himax_chip_common_resume(struct himax_ts_data *ts) +{ + I("%s: enter\n", __func__); + + if (ts->suspended == false) { + I("%s: It had entered resume, skip this step\n", __func__); + return 0; + } + ts->suspended = false; + + atomic_set(&ts->suspend_mode, 0); + + if (ts->pdata->powerOff3V3 && ts->pdata->power) + ts->pdata->power(1); + +#if defined(HX_SMART_WAKEUP) || defined(HX_HIGH_SENSE) || defined(HX_USB_DETECT_GLOBAL) + g_core_fp.fp_resend_cmd_func(ts->suspended); +#elif defined(HX_RESUME_HW_RESET) + g_core_fp.fp_ic_reset(false, false); +#endif + himax_report_all_leave_event(ts); + + g_core_fp.fp_resume_ic_action(); + himax_int_enable(1); + + I("%s: END\n", __func__); + return 0; +} diff --git a/drivers/input/touchscreen/hxchipset/himax_common.h b/drivers/input/touchscreen/hxchipset/himax_common.h new file mode 100644 index 0000000000000000000000000000000000000000..e190490d469c650b00b84414201e0c3ae8778ae8 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_common.h @@ -0,0 +1,497 @@ +/* + * Himax Android Driver Sample Code for common functions + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HIMAX_COMMON_H +#define HIMAX_COMMON_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "himax_platform.h" + +#ifdef CONFIG_DRM + #include +#elif defined(CONFIG_FB) + #include + #include +#elif defined(CONFIG_HAS_EARLYSUSPEND) + #include +#endif + +#ifdef CONFIG_OF + #include +#endif + +#define HIMAX_DRIVER_VER "1.2.2.4_ABCD1234_01" + +#define FLASH_DUMP_FILE "/sdcard/HX_Flash_Dump.bin" + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG) + #define HX_TP_PROC_2T2R + /*#define HX_TP_SELF_TEST_DRIVER*/ /* if enable, selftest works in driver */ +#endif +/*===========Himax Option function=============*/ +#define HX_RST_PIN_FUNC +#define HX_RESUME_SEND_CMD +#define HX_ESD_RECOVERY +/*#define HX_AUTO_UPDATE_FW*/ +/*#define HX_SMART_WAKEUP*/ +/*#define HX_GESTURE_TRACK*/ +/*#define HX_HIGH_SENSE*/ +/*#define HX_PALM_REPORT*/ +/*#define HX_USB_DETECT_GLOBAL*/ +/*#define HX_USB_DETECT_CALLBACK*/ +/*#define HX_PROTOCOL_A*/ /* for MTK special platform.If turning on,it will report to system by using specific format. */ +/*#define HX_RESUME_HW_RESET*/ +#define HX_PROTOCOL_B_3PA +/*#define HX_FIX_TOUCH_INFO*/ /* if open, you need to change the touch info in the fix_touch_info*/ +/*#define HX_ZERO_FLASH*/ +#define HX_ORG_SELFTEST +/*#define CONFIG_CHIP_DTCFG*/ + +/*#define HX_EN_SEL_BUTTON*/ /* Support Self Virtual key, default is close*/ +/*#define HX_EN_MUT_BUTTON*/ /* Support Mutual Virtual Key, default is close*/ + +#if defined(HX_EN_SEL_BUTTON) || defined(HX_EN_MUT_BUTTON) +/*#define HX_PLATFOME_DEFINE_KEY*/ /* for specific platform to set key(button) */ +#endif + +#define HX_KEY_MAX_COUNT 4 +#define DEFAULT_RETRY_CNT 3 + +#define HX_85XX_A_SERIES_PWON "HX85xxA" +#define HX_85XX_B_SERIES_PWON "HX85xxB" +#define HX_85XX_C_SERIES_PWON "HX85xxC" +#define HX_85XX_D_SERIES_PWON "HX85xxD" +#define HX_85XX_E_SERIES_PWON "HX85xxE" +#define HX_85XX_ES_SERIES_PWON "HX85xxES" +#define HX_85XX_F_SERIES_PWON "HX85xxF" +#define HX_85XX_H_SERIES_PWON "HX85xxH" +#define HX_83100A_SERIES_PWON "HX83100A" +#define HX_83102A_SERIES_PWON "HX83102A" +#define HX_83102B_SERIES_PWON "HX83102B" +#define HX_83102D_SERIES_PWON "HX83102D" +#define HX_83103A_SERIES_PWON "HX83103A" +#define HX_83110A_SERIES_PWON "HX83110A" +#define HX_83110B_SERIES_PWON "HX83110B" +#define HX_83111B_SERIES_PWON "HX83111B" +#define HX_83112A_SERIES_PWON "HX83112A" +#define HX_83112B_SERIES_PWON "HX83112B" +#define HX_83112D_SERIES_PWON "HX83112D" +#define HX_83112E_SERIES_PWON "HX83112E" +#define HX_83191A_SERIES_PWON "HX83191A" + +#define HX_TP_BIN_CHECKSUM_SW 1 +#define HX_TP_BIN_CHECKSUM_HW 2 +#define HX_TP_BIN_CHECKSUM_CRC 3 + +#define SHIFTBITS 5 + +#define FW_SIZE_32k 32768 +#define FW_SIZE_60k 61440 +#define FW_SIZE_64k 65536 +#define FW_SIZE_124k 126976 +#define FW_SIZE_128k 131072 + +#define NO_ERR 0 +#define READY_TO_SERVE 1 +#define WORK_OUT 2 +#define I2C_FAIL -1 +#define MEM_ALLOC_FAIL -2 +#define CHECKSUM_FAIL -3 +#define GESTURE_DETECT_FAIL -4 +#define INPUT_REGISTER_FAIL -5 +#define FW_NOT_READY -6 +#define LENGTH_FAIL -7 +#define OPEN_FILE_FAIL -8 +#define ERR_WORK_OUT -10 +#define HW_CRC_FAIL 1 + +#define HX_FINGER_ON 1 +#define HX_FINGER_LEAVE 2 + + +enum HX_TS_PATH { + HX_REPORT_COORD = 1, + HX_REPORT_SMWP_EVENT, + HX_REPORT_COORD_RAWDATA, +}; + +enum HX_TS_STATUS { + HX_TS_GET_DATA_FAIL = -4, + HX_ESD_EVENT, + HX_CHKSUM_FAIL, + HX_PATH_FAIL, + HX_TS_NORMAL_END = 0, + HX_ESD_REC_OK, + HX_READY_SERVE, + HX_REPORT_DATA, + HX_ESD_WARNING, + HX_IC_RUNNING, + HX_ZERO_EVENT_COUNT, + HX_RST_OK, +}; + +enum cell_type { + CHIP_IS_ON_CELL, + CHIP_IS_IN_CELL +}; +#ifdef HX_FIX_TOUCH_INFO +enum fix_touch_info { + FIX_HX_RX_NUM = 0, + FIX_HX_TX_NUM = 0, + FIX_HX_BT_NUM = 0, + FIX_HX_X_RES = 0, + FIX_HX_Y_RES = 0, + FIX_HX_MAX_PT = 0, + FIX_HX_XY_REVERSE = false, + FIX_HX_INT_IS_EDGE = true, +#ifdef HX_TP_PROC_2T2R + FIX_HX_RX_NUM_2 = 0, + FIX_HX_TX_NUM_2 = 0, +#endif +}; +#endif + +#ifdef HX_ZERO_FLASH + #define HX_0F_DEBUG +#endif +struct himax_ic_data { + int vendor_fw_ver; + int vendor_config_ver; + int vendor_touch_cfg_ver; + int vendor_display_cfg_ver; + int vendor_cid_maj_ver; + int vendor_cid_min_ver; + int vendor_panel_ver; + int vendor_sensor_id; + int HX_RX_NUM; + int HX_TX_NUM; + int HX_BT_NUM; + int HX_X_RES; + int HX_Y_RES; + int HX_MAX_PT; + bool HX_XY_REVERSE; + bool HX_INT_IS_EDGE; +#ifdef HX_TP_PROC_2T2R + int HX_RX_NUM_2; + int HX_TX_NUM_2; +#endif +}; + +struct himax_virtual_key { + int index; + int keycode; + int x_range_min; + int x_range_max; + int y_range_min; + int y_range_max; +}; + +struct himax_target_report_data { + int *x; + int *y; + int *w; + int *finger_id; + int finger_on; + int finger_num; +#ifdef HX_PLATFORM_DEFINE_KEY + int key_size; + int *key_x; + int *key_y; + int *key_w; +#endif +#ifdef HX_SMART_WAKEUP + int SMWP_event_chk; +#endif +}; + +struct himax_report_data { + int touch_all_size; + int raw_cnt_max; + int raw_cnt_rmd; + int touch_info_size; + uint8_t finger_num; + uint8_t finger_on; + uint8_t *hx_coord_buf; + uint8_t hx_state_info[2]; +#if defined(HX_SMART_WAKEUP) + int event_size; + uint8_t *hx_event_buf; +#endif + + int rawdata_size; + uint8_t diag_cmd; + uint8_t *hx_rawdata_buf; + uint8_t rawdata_frame_size; +}; + +struct himax_ts_data { + bool suspended; + atomic_t suspend_mode; + uint8_t x_channel; + uint8_t y_channel; + uint8_t useScreenRes; + uint8_t diag_cmd; + char chip_name[30]; + uint8_t chip_cell_type; + + uint8_t protocol_type; + uint8_t first_pressed; + uint8_t coord_data_size; + uint8_t area_data_size; + uint8_t coordInfoSize; + uint8_t raw_data_frame_size; + uint8_t raw_data_nframes; + uint8_t nFinger_support; + uint8_t irq_enabled; + uint8_t diag_self[50]; + + uint16_t finger_pressed; + uint16_t last_slot; + uint16_t pre_finger_mask; + uint16_t old_finger; + int hx_point_num; + + uint32_t debug_log_level; + uint32_t widthFactor; + uint32_t heightFactor; + uint32_t tw_x_min; + uint32_t tw_x_max; + uint32_t tw_y_min; + uint32_t tw_y_max; + uint32_t pl_x_min; + uint32_t pl_x_max; + uint32_t pl_y_min; + uint32_t pl_y_max; + + int rst_gpio; + int use_irq; + int (*power)(int on); + int pre_finger_data[10][2]; + + struct device *dev; + struct workqueue_struct *himax_wq; + struct work_struct work; + struct input_dev *input_dev; + struct hrtimer timer; + struct i2c_client *client; + struct himax_i2c_platform_data *pdata; + struct himax_virtual_key *button; + struct mutex rw_lock; + +/******* SPI-start *******/ + struct mutex spi_lock; + struct spi_device *spi; + int hx_irq; +/******* SPI-end *******/ + + int in_self_test; + +#if defined(CONFIG_FB) + struct notifier_block fb_notif; + struct workqueue_struct *himax_att_wq; + struct delayed_work work_att; +#elif defined(CONFIG_HAS_EARLYSUSPEND) + struct early_suspend early_suspend; +#endif + + struct workqueue_struct *flash_wq; + struct work_struct flash_work; + +#ifdef HX_AUTO_UPDATE_FW + struct workqueue_struct *himax_update_wq; + struct delayed_work work_update; +#endif + +#ifdef HX_ZERO_FLASH + struct workqueue_struct *himax_0f_update_wq; + struct delayed_work work_0f_update; +#endif + + struct workqueue_struct *himax_diag_wq; + struct delayed_work himax_diag_delay_wrok; + +#ifdef HX_SMART_WAKEUP + uint8_t SMWP_enable; + uint8_t gesture_cust_en[16]; + struct wakeup_source ts_SMWP_wake_src; +#endif + +#ifdef HX_HIGH_SENSE + uint8_t HSEN_enable; +#endif + +#if defined(HX_USB_DETECT_CALLBACK) || defined(HX_USB_DETECT_GLOBAL) + uint8_t usb_connected; + uint8_t *cable_config; +#endif + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + struct workqueue_struct *ito_test_wq; + struct work_struct ito_test_work; +#endif + +}; + +struct himax_debug { + bool flash_dump_going; + void (*fp_ts_dbg_func)(struct himax_ts_data *ts, int start); + int (*fp_set_diag_cmd)(struct himax_ic_data *ic_data, struct himax_report_data *hx_touch_data); +}; + +enum input_protocol_type { + PROTOCOL_TYPE_A = 0x00, + PROTOCOL_TYPE_B = 0x01, +}; + +#ifdef HX_HIGH_SENSE + void himax_set_HSEN_func(uint8_t HSEN_enable); +#endif + +#ifdef HX_SMART_WAKEUP +void himax_set_SMWP_func(uint8_t SMWP_enable); + +#define GEST_PTLG_ID_LEN (4) +#define GEST_PTLG_HDR_LEN (4) +#define GEST_PTLG_HDR_ID1 (0xCC) +#define GEST_PTLG_HDR_ID2 (0x44) +#define GEST_PT_MAX_NUM (128) + +enum gesture_event_type { + EV_GESTURE_01 = 0x01, + EV_GESTURE_02, + EV_GESTURE_03, + EV_GESTURE_04, + EV_GESTURE_05, + EV_GESTURE_06, + EV_GESTURE_07, + EV_GESTURE_08, + EV_GESTURE_09, + EV_GESTURE_10, + EV_GESTURE_11, + EV_GESTURE_12, + EV_GESTURE_13, + EV_GESTURE_14, + EV_GESTURE_15, + EV_GESTURE_PWR = 0x80, +}; + +#define KEY_CUST_01 251 +#define KEY_CUST_02 252 +#define KEY_CUST_03 253 +#define KEY_CUST_04 254 +#define KEY_CUST_05 255 +#define KEY_CUST_06 256 +#define KEY_CUST_07 257 +#define KEY_CUST_08 258 +#define KEY_CUST_09 259 +#define KEY_CUST_10 260 +#define KEY_CUST_11 261 +#define KEY_CUST_12 262 +#define KEY_CUST_13 263 +#define KEY_CUST_14 264 +#define KEY_CUST_15 265 +#endif + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + extern uint8_t himax_ito_test(void); +#endif + +extern struct himax_ts_data *private_ts; +extern struct himax_ic_data *ic_data; +extern struct himax_report_data *hx_touch_data; +extern struct himax_core_fp g_core_fp; +extern struct himax_debug *debug_data; +extern uint8_t HX_PROC_SEND_FLAG; + +#ifdef HX_AUTO_UPDATE_FW +extern int g_i_FW_VER; +extern int g_i_CFG_VER; +extern int g_i_CID_MAJ; /* GUEST ID */ +extern int g_i_CID_MIN; /* VER for GUEST */ +extern unsigned char i_CTPM_FW[]; +#endif + +extern unsigned long FW_VER_MAJ_FLASH_ADDR; +extern unsigned long FW_VER_MIN_FLASH_ADDR; +extern unsigned long CFG_VER_MAJ_FLASH_ADDR; +extern unsigned long CFG_VER_MIN_FLASH_ADDR; +extern unsigned long CID_VER_MAJ_FLASH_ADDR; +extern unsigned long CID_VER_MIN_FLASH_ADDR; + +extern unsigned long FW_VER_MAJ_FLASH_LENG; +extern unsigned long FW_VER_MIN_FLASH_LENG; +extern unsigned long CFG_VER_MAJ_FLASH_LENG; +extern unsigned long CFG_VER_MIN_FLASH_LENG; +extern unsigned long CID_VER_MAJ_FLASH_LENG; +extern unsigned long CID_VER_MIN_FLASH_LENG; +extern unsigned char IC_CHECKSUM; + +extern unsigned long FW_CFG_VER_FLASH_ADDR; + +#ifdef HX_RST_PIN_FUNC + extern u8 HX_HW_RESET_ACTIVATE; + + extern void himax_rst_gpio_set(int pinnum, uint8_t value); +#endif + +/* void himax_HW_reset(uint8_t loadconfig,uint8_t int_off); */ + +extern int himax_chip_common_suspend(struct himax_ts_data *ts); +extern int himax_chip_common_resume(struct himax_ts_data *ts); +extern int himax_chip_common_init(void); +extern void himax_chip_common_deinit(void); +extern int himax_input_register(struct himax_ts_data *ts); +extern void himax_ts_work(struct himax_ts_data *ts); +extern enum hrtimer_restart himax_ts_timer_func(struct hrtimer *timer); +extern int himax_report_data_init(void); +extern struct proc_dir_entry *himax_touch_proc_dir; + +extern int himax_parse_dt(struct himax_ts_data *ts, struct himax_i2c_platform_data *pdata); +extern int himax_dev_set(struct himax_ts_data *ts); +extern int himax_input_register_device(struct input_dev *input_dev); + +#ifdef HX_USB_DETECT_GLOBAL +extern void himax_cable_detect_func(bool force_renew); +#endif + +/* ts_work about start */ +extern struct himax_target_report_data *g_target_report_data; +extern int himax_report_data(struct himax_ts_data *ts, int ts_path, int ts_status); +/* ts_work about end */ + +#endif + diff --git a/drivers/input/touchscreen/hxchipset/himax_debug.c b/drivers/input/touchscreen/hxchipset/himax_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..de60c9e148cf5d086f4541533c17b5775b93bdae --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_debug.c @@ -0,0 +1,3014 @@ +/* + * Himax Android Driver Sample Code for debug nodes + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "himax_debug.h" +#include "himax_ic_core.h" + +static struct proc_dir_entry *himax_proc_debug_level_file; +static struct proc_dir_entry *himax_proc_vendor_file; +static struct proc_dir_entry *himax_proc_attn_file; +static struct proc_dir_entry *himax_proc_int_en_file; +static struct proc_dir_entry *himax_proc_layout_file; +static struct proc_dir_entry *himax_proc_CRC_test_file; + +#ifdef HX_RST_PIN_FUNC + extern void himax_ic_reset(uint8_t loadconfig, uint8_t int_off); +#endif + +#ifdef HX_TP_PROC_2T2R + bool Is_2T2R = false; + int HX_RX_NUM_2 = 0; + int HX_TX_NUM_2 = 0; +#endif +uint8_t g_diag_arr_num; + +int g_max_mutual; +int g_min_mutual = 0xFFFF; +int g_max_self; +int g_min_self = 0xFFFF; + +struct timespec timeStart, timeEnd, timeDelta; +int g_switch_mode; + +#ifdef HX_TP_PROC_2T2R +static uint8_t x_channel_2; +static uint8_t y_channel_2; +static uint32_t *diag_mutual_2; + +int32_t *getMutualBuffer_2(void); +#endif + +#define HIMAX_PROC_REGISTER_FILE "register" +struct proc_dir_entry *himax_proc_register_file; +uint8_t byte_length; +uint8_t register_command[4]; +uint8_t cfg_flag; + +#define HIMAX_PROC_DIAG_FILE "diag" +struct proc_dir_entry *himax_proc_diag_file; +#define HIMAX_PROC_DIAG_ARR_FILE "diag_arr" +struct proc_dir_entry *himax_proc_diag_arrange_file; +struct file *diag_sram_fn; +uint8_t write_counter; +uint8_t write_max_count = 30; + +#define IIR_DUMP_FILE "/sdcard/HX_IIR_Dump.txt" +#define DC_DUMP_FILE "/sdcard/HX_DC_Dump.txt" +#define BANK_DUMP_FILE "/sdcard/HX_BANK_Dump.txt" + +uint8_t x_channel; +uint8_t y_channel; +int32_t *diag_mutual; +int32_t *diag_mutual_new; +int32_t *diag_mutual_old; +uint8_t diag_max_cnt; +uint8_t hx_state_info[2] = {0}; +uint8_t diag_coor[128]; +int32_t diag_self[100] = {0}; +int32_t diag_self_new[100] = {0}; +int32_t diag_self_old[100] = {0}; +int32_t *getMutualBuffer(void); +int32_t *getMutualNewBuffer(void); +int32_t *getMutualOldBuffer(void); +int32_t *getSelfBuffer(void); +int32_t *getSelfNewBuffer(void); +int32_t *getSelfOldBuffer(void); + +#define HIMAX_PROC_DEBUG_FILE "debug" +struct proc_dir_entry *himax_proc_debug_file; +#define HIMAX_PROC_FW_DEBUG_FILE "FW_debug" +struct proc_dir_entry *himax_proc_fw_debug_file; +#define HIMAX_PROC_DD_DEBUG_FILE "DD_debug" +struct proc_dir_entry *himax_proc_dd_debug_file; +bool fw_update_complete; +int handshaking_result; +unsigned char debug_level_cmd; +uint8_t cmd_set[8]; +uint8_t mutual_set_flag; + +#define HIMAX_PROC_FLASH_DUMP_FILE "flash_dump" +struct proc_dir_entry *himax_proc_flash_dump_file; +static int Flash_Size = 131072; +static uint8_t *flash_buffer; +static uint8_t flash_command; +static uint8_t flash_read_step; +static uint8_t flash_progress; +static uint8_t flash_dump_complete; +static uint8_t flash_dump_fail; +static uint8_t sys_operation; +static bool flash_dump_going; +static uint8_t getFlashDumpComplete(void); +static uint8_t getFlashDumpFail(void); +static uint8_t getFlashDumpProgress(void); +static uint8_t getFlashReadStep(void); +static void setFlashCommand(uint8_t command); +static void setFlashReadStep(uint8_t step); + +uint32_t **raw_data_array; +uint8_t X_NUM, Y_NUM; +uint8_t sel_type = 0x0D; + +#define HIMAX_PROC_RESET_FILE "reset" +struct proc_dir_entry *himax_proc_reset_file; + +#define HIMAX_PROC_SENSE_ON_OFF_FILE "SenseOnOff" +struct proc_dir_entry *himax_proc_SENSE_ON_OFF_file; + +#ifdef HX_ESD_RECOVERY + #define HIMAX_PROC_ESD_CNT_FILE "ESD_cnt" + struct proc_dir_entry *himax_proc_ESD_cnt_file; +#endif + +#define COMMON_BUF_SZ 80 +#define PROC_DD_BUF_SZ 20 +#define DEBUG_BUF_SZ 12 + +/* raw type */ +#define RAW_IIR 1 +#define RAW_DC 2 +#define RAW_BANK 3 +#define RAW_IIR2 4 +#define RAW_IIR2_N 5 +#define RAW_FIR2 6 +#define RAW_BASELINE 7 +#define RAW_DUMP_COORD 8 + +/* status type */ +#define START_TEST 0 +#define RAW_DATA 1 +#define PERCENT_TEST 2 +#define DEV_TEST 3 +#define NOISE_TEST 4 + +#define END_TEST 9 + +/* + *========================================================================= + * + * Segment : Himax PROC Debug Function + * + *========================================================================= + */ +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + +static int himax_read_i2c_status(void) +{ + return i2c_error_count; +} + +static ssize_t himax_ito_test_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + uint8_t result = 0; + uint8_t status = 0; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return ret; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + status = ito_get_step_status(); + + switch (status) { + case START_TEST: + ret += snprintf(temp_buf + ret, len - ret, "Step : START_TEST\n"); + break; + + case RAW_DATA: + ret += snprintf(temp_buf + ret, len - ret, "Step : RAW_DATA\n"); + break; + + case PERCENT_TEST: + ret += snprintf(temp_buf + ret, len - ret, "Step : PERCENT_TEST\n"); + break; + + case DEV_TEST: + ret += snprintf(temp_buf + ret, len - ret, "Step : DEV_TEST\n"); + break; + + case NOISE_TEST: + ret += snprintf(temp_buf + ret, len - ret, "Step : NOISE_TEST\n"); + break; + + case END_TEST: + ret += snprintf(temp_buf + ret, len - ret, "Step : END_TEST\n"); + break; + + default: + ret += snprintf(temp_buf + ret, len - ret, "Step : Null\n"); + } + + result = ito_get_result_status(); + + if (result == 0xF) + ret += snprintf(temp_buf + ret, len - ret, "ITO test is On-going!\n"); + else if (result == 0) + ret += snprintf(temp_buf + ret, len - ret, "ITO test is Pass!\n"); + else if (result == 2) + ret += snprintf(temp_buf + ret, len - ret, "Open config file fail!\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "ITO test is Fail!\n"); + + HX_PROC_SEND_FLAG = 1; + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + return ret; +} + +static ssize_t himax_ito_test_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + uint8_t result = 0; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + result = ito_get_result_status(); + I("%s: buf = %s, result = %d.\n", __func__, buf, result); + + if (buf[0] == '1' && result != 0xF) { + I("%s: buf[0] = %c.\n", __func__, buf[0]); + ito_set_step_status(0); + queue_work(ts->ito_test_wq, &ts->ito_test_work); + } + + return len; +} + +static const struct file_operations himax_proc_ito_test_ops = { + .owner = THIS_MODULE, + .read = himax_ito_test_read, + .write = himax_ito_test_write, +}; +#endif + +static ssize_t himax_CRC_test_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + uint8_t result = 0; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + g_core_fp.fp_sense_off(); + msleep(20); + result = g_core_fp.fp_calculateChecksum(false); + g_core_fp.fp_sense_on(0x01); + + if (result) + ret += snprintf(temp_buf + ret, len - ret, "CRC test is Pass!\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "CRC test is Fail!\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static const struct file_operations himax_proc_CRC_test_ops = { + .owner = THIS_MODULE, + .read = himax_CRC_test_read, +}; + +static ssize_t himax_vendor_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "FW_VER = 0x%2.2X\n", ic_data->vendor_fw_ver); + + if (private_ts->chip_cell_type == CHIP_IS_ON_CELL) + ret += snprintf(temp_buf + ret, len - ret, "CONFIG_VER = 0x%2.2X\n", ic_data->vendor_config_ver); + else { + ret += snprintf(temp_buf + ret, len - ret, "TOUCH_VER = 0x%2.2X\n", ic_data->vendor_touch_cfg_ver); + ret += snprintf(temp_buf + ret, len - ret, "DISPLAY_VER = 0x%2.2X\n", ic_data->vendor_display_cfg_ver); + } + + if (ic_data->vendor_cid_maj_ver < 0 && ic_data->vendor_cid_min_ver < 0) + ret += snprintf(temp_buf + ret, len - ret, "CID_VER = NULL\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "CID_VER = 0x%2.2X\n", (ic_data->vendor_cid_maj_ver << 8 | ic_data->vendor_cid_min_ver)); + + if (ic_data->vendor_panel_ver < 0) + ret += snprintf(temp_buf + ret, len - ret, "PANEL_VER = NULL\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "PANEL_VER = 0x%2.2X\n", ic_data->vendor_panel_ver); + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + ret += snprintf(temp_buf + ret, len - ret, "Himax Touch Driver Version:\n"); + ret += snprintf(temp_buf + ret, len - ret, "%s\n", HIMAX_DRIVER_VER); + HX_PROC_SEND_FLAG = 1; + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + + return ret; +} + +static const struct file_operations himax_proc_vendor_ops = { + .owner = THIS_MODULE, + .read = himax_vendor_read, +}; + +static ssize_t himax_attn_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + struct himax_ts_data *ts_data; + char *temp_buf; + + ts_data = private_ts; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "attn = %x\n", himax_int_gpio_read(ts_data->pdata->gpio_irq)); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + + +static const struct file_operations himax_proc_attn_ops = { + .owner = THIS_MODULE, + .read = himax_attn_read, +}; + +static ssize_t himax_int_en_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + size_t ret = 0; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "%d ", ts->irq_enabled); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_int_en_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + char buf_tmp[DEBUG_BUF_SZ] = {0}; + int value, ret = 0; + + if (len >= DEBUG_BUF_SZ) { + I("%s: no command exceeds 12 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf_tmp, buff, len)) + return -EFAULT; + + if (buf_tmp[0] == '0') + value = false; + else if (buf_tmp[0] == '1') + value = true; + else + return -EINVAL; + + if (value) { + ret = himax_int_en_set(); + + if (ret == 0) { + ts->irq_enabled = 1; + irq_enable_count = 1; + } + } else { + himax_int_enable(0); + free_irq(ts->client->irq, ts); + ts->irq_enabled = 0; + } + + return len; +} + +static const struct file_operations himax_proc_int_en_ops = { + .owner = THIS_MODULE, + .read = himax_int_en_read, + .write = himax_int_en_write, +}; + +static ssize_t himax_layout_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + size_t ret = 0; + char *temp_buf; + + if (!HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "%d ", ts->pdata->abs_x_min); + ret += snprintf(temp_buf + ret, len - ret, "%d ", ts->pdata->abs_x_max); + ret += snprintf(temp_buf + ret, len - ret, "%d ", ts->pdata->abs_y_min); + ret += snprintf(temp_buf + ret, len - ret, "%d ", ts->pdata->abs_y_max); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_layout_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts = private_ts; + char buf_tmp[5]; + int i = 0, j = 0, k = 0, ret; + unsigned long value; + int layout[4] = {0}; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + for (i = 0; i < 20; i++) { + if (buf[i] == ',' || buf[i] == '\n') { + memset(buf_tmp, 0x0, sizeof(buf_tmp)); + + if (i - j <= 5) + memcpy(buf_tmp, buf + j, i - j); + else { + I("buffer size is over 5 char\n"); + return len; + } + + j = i + 1; + + if (k < 4) { + ret = kstrtoul(buf_tmp, 10, &value); + layout[k++] = value; + } + } + } + + if (k == 4) { + ts->pdata->abs_x_min = layout[0]; + ts->pdata->abs_x_max = layout[1]; + ts->pdata->abs_y_min = layout[2]; + ts->pdata->abs_y_max = layout[3]; + I("%d, %d, %d, %d\n", + ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max); + input_unregister_device(ts->input_dev); + himax_input_register(ts); + } else + I("ERR@%d, %d, %d, %d\n", + ts->pdata->abs_x_min, ts->pdata->abs_x_max, ts->pdata->abs_y_min, ts->pdata->abs_y_max); + + return len; +} + +static const struct file_operations himax_proc_layout_ops = { + .owner = THIS_MODULE, + .read = himax_layout_read, + .write = himax_layout_write, +}; + +static ssize_t himax_debug_level_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts_data; + size_t ret = 0; + char *temp_buf; + + ts_data = private_ts; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "%d\n", ts_data->debug_log_level); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_debug_level_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + struct himax_ts_data *ts; + char buf_tmp[DEBUG_BUF_SZ]; + int i; + + ts = private_ts; + + if (len >= DEBUG_BUF_SZ) { + I("%s: no command exceeds 12 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf_tmp, buff, len)) + return -EFAULT; + + ts->debug_log_level = 0; + + for (i = 0; i < len - 1; i++) { + if (buf_tmp[i] >= '0' && buf_tmp[i] <= '9') + ts->debug_log_level |= (buf_tmp[i] - '0'); + else if (buf_tmp[i] >= 'A' && buf_tmp[i] <= 'F') + ts->debug_log_level |= (buf_tmp[i] - 'A' + 10); + else if (buf_tmp[i] >= 'a' && buf_tmp[i] <= 'f') + ts->debug_log_level |= (buf_tmp[i] - 'a' + 10); + + if (i != len - 2) + ts->debug_log_level <<= 4; + } + + if (ts->debug_log_level & BIT(3)) { + if (ts->pdata->screenWidth > 0 && ts->pdata->screenHeight > 0 && + (ts->pdata->abs_x_max - ts->pdata->abs_x_min) > 0 && + (ts->pdata->abs_y_max - ts->pdata->abs_y_min) > 0) { + ts->widthFactor = (ts->pdata->screenWidth << SHIFTBITS) / (ts->pdata->abs_x_max - ts->pdata->abs_x_min); + ts->heightFactor = (ts->pdata->screenHeight << SHIFTBITS) / (ts->pdata->abs_y_max - ts->pdata->abs_y_min); + + if (ts->widthFactor > 0 && ts->heightFactor > 0) + ts->useScreenRes = 1; + else { + ts->heightFactor = 0; + ts->widthFactor = 0; + ts->useScreenRes = 0; + } + } else + I("Enable finger debug with raw position mode!\n"); + } else { + ts->useScreenRes = 0; + ts->widthFactor = 0; + ts->heightFactor = 0; + } + + return len; +} + +static const struct file_operations himax_proc_debug_level_ops = { + .owner = THIS_MODULE, + .read = himax_debug_level_read, + .write = himax_debug_level_write, +}; + +static ssize_t himax_proc_register_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + int ret = 0; + uint16_t loop_i; + uint8_t data[128]; + char *temp_buf; + + memset(data, 0x00, sizeof(data)); + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + I("himax_register_show: %02X,%02X,%02X,%02X\n", register_command[3], register_command[2], register_command[1], register_command[0]); + g_core_fp.fp_register_read(register_command, 128, data, cfg_flag); + ret += snprintf(temp_buf + ret, len - ret, "command: %02X,%02X,%02X,%02X\n", register_command[3], register_command[2], register_command[1], register_command[0]); + + for (loop_i = 0; loop_i < 128; loop_i++) { + ret += snprintf(temp_buf + ret, len - ret, "0x%2.2X ", data[loop_i]); + if ((loop_i % 16) == 15) + ret += snprintf(temp_buf + ret, len - ret, "\n"); + } + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_proc_register_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + char buf[COMMON_BUF_SZ] = {0}; + char buf_tmp[16]; + uint8_t length = 0; + unsigned long result = 0; + uint8_t loop_i = 0; + uint16_t base = 2; + char *data_str = NULL; + uint8_t w_data[20]; + uint8_t x_pos[20]; + uint8_t count = 0; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + memset(buf_tmp, 0x0, sizeof(buf_tmp)); + memset(w_data, 0x0, sizeof(w_data)); + memset(x_pos, 0x0, sizeof(x_pos)); + memset(register_command, 0x0, sizeof(register_command)); + + I("himax %s\n", buf); + + if ((buf[0] == 'r' || buf[0] == 'w') && buf[1] == ':' && buf[2] == 'x') { + length = strlen(buf); + + /* I("%s: length = %d.\n", __func__,length); */ + for (loop_i = 0; loop_i < length; loop_i++) { /* find postion of 'x' */ + if (buf[loop_i] == 'x') { + x_pos[count] = loop_i; + count++; + } + } + + data_str = strrchr(buf, 'x'); + I("%s: %s.\n", __func__, data_str); + length = strlen(data_str + 1) - 1; + + if (buf[0] == 'r') { + if (buf[3] == 'F' && buf[4] == 'E' && length == 4) { + length = length - base; + cfg_flag = 1; + memcpy(buf_tmp, data_str + base + 1, length); + } else { + cfg_flag = 0; + memcpy(buf_tmp, data_str + 1, length); + } + + byte_length = length / 2; + + if (!kstrtoul(buf_tmp, 16, &result)) { + for (loop_i = 0 ; loop_i < byte_length ; loop_i++) + register_command[loop_i] = (uint8_t)(result >> loop_i * 8); + } + + if (strcmp(HX_85XX_H_SERIES_PWON, private_ts->chip_name) == 0 && cfg_flag == 0) + cfg_flag = 2; + } else if (buf[0] == 'w') { + if (buf[3] == 'F' && buf[4] == 'E') { + cfg_flag = 1; + memcpy(buf_tmp, buf + base + 3, length); + } else { + cfg_flag = 0; + memcpy(buf_tmp, buf + 3, length); + } + + if (count < 3) { + byte_length = length / 2; + + if (!kstrtoul(buf_tmp, 16, &result)) { /* command */ + for (loop_i = 0 ; loop_i < byte_length ; loop_i++) + register_command[loop_i] = (uint8_t)(result >> loop_i * 8); + } + + if (!kstrtoul(data_str + 1, 16, &result)) { /* data */ + for (loop_i = 0 ; loop_i < byte_length ; loop_i++) + w_data[loop_i] = (uint8_t)(result >> loop_i * 8); + } + + g_core_fp.fp_register_write(register_command, byte_length, w_data, cfg_flag); + } else { + for (loop_i = 0; loop_i < count; loop_i++) { /* parsing addr after 'x' */ + memset(buf_tmp, 0x0, sizeof(buf_tmp)); + if (cfg_flag != 0 && loop_i != 0) + byte_length = 2; + else + byte_length = x_pos[1] - x_pos[0] - 2; /* original */ + + memcpy(buf_tmp, buf + x_pos[loop_i] + 1, byte_length); + + /* I("%s: buf_tmp = %s\n", __func__,buf_tmp); */ + if (kstrtoul(buf_tmp, 16, &result) != 0) + continue; + + if (loop_i == 0) + register_command[loop_i] = (uint8_t)(result); + else + w_data[loop_i - 1] = (uint8_t)(result); + } + + byte_length = count - 1; + if (strcmp(HX_85XX_H_SERIES_PWON, private_ts->chip_name) == 0 && cfg_flag == 0) + cfg_flag = 2; + g_core_fp.fp_register_write(register_command, byte_length, &w_data[0], cfg_flag); + } + } else + return len; + } + + return len; +} + +static const struct file_operations himax_proc_register_ops = { + .owner = THIS_MODULE, + .read = himax_proc_register_read, + .write = himax_proc_register_write, +}; + +int32_t *getMutualBuffer(void) +{ + return diag_mutual; +} +int32_t *getMutualNewBuffer(void) +{ + return diag_mutual_new; +} +int32_t *getMutualOldBuffer(void) +{ + return diag_mutual_old; +} +int32_t *getSelfBuffer(void) +{ + return &diag_self[0]; +} +int32_t *getSelfNewBuffer(void) +{ + return &diag_self_new[0]; +} +int32_t *getSelfOldBuffer(void) +{ + return &diag_self_old[0]; +} +uint8_t getXChannel(void) +{ + return x_channel; +} +uint8_t getYChannel(void) +{ + return y_channel; +} +void setXChannel(uint8_t x) +{ + x_channel = x; +} +void setYChannel(uint8_t y) +{ + y_channel = y; +} +void setMutualBuffer(void) +{ + diag_mutual = kzalloc(x_channel * y_channel * sizeof(int32_t), GFP_KERNEL); + if (!diag_mutual) + E("%s: allocate memory failed!\n", __func__); +} +void setMutualNewBuffer(void) +{ + diag_mutual_new = kzalloc(x_channel * y_channel * sizeof(int32_t), GFP_KERNEL); + if (!diag_mutual_new) + E("%s: allocate memory failed!\n", __func__); +} +void setMutualOldBuffer(void) +{ + diag_mutual_old = kzalloc(x_channel * y_channel * sizeof(int32_t), GFP_KERNEL); + if (!diag_mutual_old) + E("%s: allocate memory failed!\n", __func__); +} + +#ifdef HX_TP_PROC_2T2R +int32_t *getMutualBuffer_2(void) +{ + return diag_mutual_2; +} + +uint8_t getXChannel_2(void) +{ + return x_channel_2; +} + +uint8_t getYChannel_2(void) +{ + return y_channel_2; +} + +void setXChannel_2(uint8_t x) +{ + x_channel_2 = x; +} + +void setYChannel_2(uint8_t y) +{ + y_channel_2 = y; +} + +void setMutualBuffer_2(void) +{ + diag_mutual_2 = kzalloc(x_channel_2 * y_channel_2 * sizeof(int32_t), GFP_KERNEL); +} +#endif + +int himax_set_diag_cmd(struct himax_ic_data *ic_data, struct himax_report_data *hx_touch_data) +{ + struct himax_ts_data *ts = private_ts; + int32_t *mutual_data; + int32_t *self_data; + int mul_num; + int self_num; + + /* int RawDataLen = 0; */ + hx_touch_data->diag_cmd = ts->diag_cmd; + + if (hx_touch_data->diag_cmd >= 1 && hx_touch_data->diag_cmd <= 7) { + /* Check event stack CRC */ + if (!g_core_fp.fp_diag_check_sum(hx_touch_data)) + goto bypass_checksum_failed_packet; + +#ifdef HX_TP_PROC_2T2R + + if (Is_2T2R && (hx_touch_data->diag_cmd >= 4 && hx_touch_data->diag_cmd <= 6)) { + mutual_data = getMutualBuffer_2(); + self_data = getSelfBuffer(); + /* initiallize the block number of mutual and self */ + mul_num = getXChannel_2() * getYChannel_2(); +#ifdef HX_EN_SEL_BUTTON + self_num = getXChannel_2() + getYChannel_2() + ic_data->HX_BT_NUM; +#else + self_num = getXChannel_2() + getYChannel_2(); +#endif + } else +#endif + { + mutual_data = getMutualBuffer(); + self_data = getSelfBuffer(); + /* initiallize the block number of mutual and self */ + mul_num = getXChannel() * getYChannel(); +#ifdef HX_EN_SEL_BUTTON + self_num = getXChannel() + getYChannel() + ic_data->HX_BT_NUM; +#else + self_num = getXChannel() + getYChannel(); +#endif + } + + g_core_fp.fp_diag_parse_raw_data(hx_touch_data, mul_num, self_num, hx_touch_data->diag_cmd, mutual_data, self_data); + } + + else + if (hx_touch_data->diag_cmd == 8) { + memset(diag_coor, 0x00, sizeof(diag_coor)); + memcpy(&(diag_coor[0]), &hx_touch_data->hx_coord_buf[0], hx_touch_data->touch_info_size); + } + + /* assign state info data */ + memcpy(&(hx_state_info[0]), &hx_touch_data->hx_state_info[0], 2); + return NO_ERR; +bypass_checksum_failed_packet: + return 1; +} + +/* #if defined(HX_DEBUG_LEVEL) */ + +void himax_log_touch_data(int start) +{ + int loop_i = 0; + int print_size = 0; + uint8_t *buf; + + if (start == 1) + return; + + if (hx_touch_data->diag_cmd == 0) { + print_size = hx_touch_data->touch_info_size; + buf = kcalloc(hx_touch_data->touch_info_size, sizeof(uint8_t), GFP_KERNEL); + if (!buf) { + E("%s: allocate memory failed!\n", __func__); + return; + } + memcpy(buf, hx_touch_data->hx_coord_buf, hx_touch_data->touch_info_size); + } else if (hx_touch_data->diag_cmd > 0) { + print_size = hx_touch_data->touch_all_size; + buf = kcalloc(hx_touch_data->touch_info_size, sizeof(uint8_t), GFP_KERNEL); + if (!buf) { + E("%s: allocate memory failed!\n", __func__); + return; + } + memcpy(buf, hx_touch_data->hx_coord_buf, hx_touch_data->touch_info_size); + memcpy(&buf[hx_touch_data->touch_info_size], hx_touch_data->hx_rawdata_buf, hx_touch_data->touch_all_size - hx_touch_data->touch_info_size); + } else + E("%s:cmd fault\n", __func__); + + for (loop_i = 0; loop_i < print_size; loop_i += 8) { + if ((loop_i + 7) >= print_size) { + I("%s: over flow\n", __func__); + break; + } + + I("P %2d = 0x%2.2X P %2d = 0x%2.2X ", loop_i, buf[loop_i], loop_i + 1, buf[loop_i + 1]); + I("P %2d = 0x%2.2X P %2d = 0x%2.2X ", loop_i + 2, buf[loop_i + 2], loop_i + 3, buf[loop_i + 3]); + I("P %2d = 0x%2.2X P %2d = 0x%2.2X ", loop_i + 4, buf[loop_i + 4], loop_i + 5, buf[loop_i + 5]); + I("P %2d = 0x%2.2X P %2d = 0x%2.2X ", loop_i + 6, buf[loop_i + 6], loop_i + 7, buf[loop_i + 7]); + I("\n"); + } + kfree(buf); +} + +void himax_log_touch_event(struct himax_ts_data *ts, int start) +{ + int loop_i = 0; + + if (g_target_report_data->finger_on > 0 && g_target_report_data->finger_num > 0) { + + for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { + if (g_target_report_data->x[loop_i] >= 0 && g_target_report_data->x[loop_i] <= ts->pdata->abs_x_max && g_target_report_data->y[loop_i] >= 0 && g_target_report_data->y[loop_i] <= ts->pdata->abs_y_max) { + I("Finger %d=> X:%d, Y:%d W:%d, Z:%d, F:%d\n", loop_i + 1, + g_target_report_data->x[loop_i], + g_target_report_data->y[loop_i], + g_target_report_data->w[loop_i], + g_target_report_data->w[loop_i], + loop_i + 1); + } + } + } else if (g_target_report_data->finger_on == 0 && g_target_report_data->finger_num == 0) + I("All Finger leave\n"); + else + I("%s : wrong input!\n", __func__); + +} + +void himax_log_touch_int_devation(int touched) +{ + if (touched == HX_FINGER_ON) { + getnstimeofday(&timeStart); + /* + * I(" Irq start time = %ld.%06ld s\n", + * timeStart.tv_sec, timeStart.tv_nsec/1000); + */ + } else if (touched == HX_FINGER_LEAVE) { + getnstimeofday(&timeEnd); + timeDelta.tv_nsec = (timeEnd.tv_sec * 1000000000 + timeEnd.tv_nsec) - (timeStart.tv_sec * 1000000000 + timeStart.tv_nsec); + /* + * I("Irq finish time = %ld.%06ld s\n", + * timeEnd.tv_sec, timeEnd.tv_nsec/1000); + */ + I("Touch latency = %ld us\n", timeDelta.tv_nsec / 1000); + } else + I("%s : wrong input!\n", __func__); +} + +void himax_log_touch_event_detail(struct himax_ts_data *ts, int start) +{ + int loop_i = 0; + + if (start == HX_FINGER_LEAVE) { + for (loop_i = 0; loop_i < ts->nFinger_support; loop_i++) { + if (((ts->old_finger >> loop_i & 1) == 0) && ((ts->pre_finger_mask >> loop_i & 1) == 1)) { + if (g_target_report_data->x[loop_i] >= 0 && g_target_report_data->x[loop_i] <= ts->pdata->abs_x_max && g_target_report_data->y[loop_i] >= 0 && g_target_report_data->y[loop_i] <= ts->pdata->abs_y_max) + I("status: Raw:F:%02d Down, X:%d, Y:%d, W:%d\n", loop_i + 1, g_target_report_data->x[loop_i], g_target_report_data->y[loop_i], g_target_report_data->w[loop_i]); + + } else if ((((ts->old_finger >> loop_i & 1) == 1) && ((ts->pre_finger_mask >> loop_i & 1) == 0))) + I("status: Raw:F:%02d Up, X:%d, Y:%d\n", loop_i + 1, ts->pre_finger_data[loop_i][0], ts->pre_finger_data[loop_i][1]); + /* + * else + * I("dbg hx_point_num=%d,old_finger=0x%02X,pre_finger_mask=0x%02X\n",ts->hx_point_num,ts->old_finger,ts->pre_finger_mask); + */ + + } + } + +} + +void himax_ts_dbg_func(struct himax_ts_data *ts, int start) +{ + switch (ts->debug_log_level) { + case 1: + himax_log_touch_data(start); + break; + case 2: + himax_log_touch_event(ts, start); + break; + case 4: + himax_log_touch_int_devation(start); + break; + case 8: + himax_log_touch_event_detail(ts, start); + break; + } +} + +/* #endif */ +static ssize_t himax_diag_arrange_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + g_diag_arr_num = buf[0] - '0'; + I("%s: g_diag_arr_num = %d\n", __func__, g_diag_arr_num); + return len; +} + +void himax_get_mutual_edge(void) +{ + int i = 0; + + for (i = 0; i < (x_channel * y_channel); i++) { + if (diag_mutual[i] > g_max_mutual) + g_max_mutual = diag_mutual[i]; + + if (diag_mutual[i] < g_min_mutual) + g_min_mutual = diag_mutual[i]; + } +} + +void himax_get_self_edge(void) +{ + int i = 0; + + for (i = 0; i < (x_channel + y_channel); i++) { + if (diag_self[i] > g_max_self) + g_max_self = diag_self[i]; + + if (diag_self[i] < g_min_self) + g_min_self = diag_self[i]; + } +} + +/* print first step which is row */ +static const struct file_operations himax_proc_diag_arrange_ops = { + .owner = THIS_MODULE, + .write = himax_diag_arrange_write, +}; + +static void print_state_info(struct seq_file *s) +{ + /* seq_printf(s, "State_info_2bytes:%3d, %3d\n",hx_state_info[0],hx_state_info[1]); */ + seq_printf(s, "ReCal = %d\t", hx_state_info[0] & 0x01); + seq_printf(s, "Palm = %d\t", hx_state_info[0] >> 1 & 0x01); + seq_printf(s, "AC mode = %d\t", hx_state_info[0] >> 2 & 0x01); + seq_printf(s, "Water = %d\n", hx_state_info[0] >> 3 & 0x01); + seq_printf(s, "Glove = %d\t", hx_state_info[0] >> 4 & 0x01); + seq_printf(s, "TX Hop = %d\t", hx_state_info[0] >> 5 & 0x01); + seq_printf(s, "Base Line = %d\t", hx_state_info[0] >> 6 & 0x01); + seq_printf(s, "OSR Hop = %d\t", hx_state_info[1] >> 3 & 0x01); + seq_printf(s, "KEY = %d\n", hx_state_info[1] >> 4 & 0x0F); +} + +static void himax_diag_arrange_print(struct seq_file *s, int i, int j, int transpose) +{ + if (transpose) + seq_printf(s, "%6d", diag_mutual[j + i * x_channel]); + else + seq_printf(s, "%6d", diag_mutual[i + j * x_channel]); +} + +/* ready to print second step which is column*/ +static void himax_diag_arrange_inloop(struct seq_file *s, int in_init, int out_init, bool transpose, int j) +{ + int i; + int in_max = 0; + + if (transpose) + in_max = y_channel; + else + in_max = x_channel; + + if (in_init > 0) { /* bit0 = 1 */ + for (i = in_init - 1; i >= 0; i--) + himax_diag_arrange_print(s, i, j, transpose); + + if (transpose) { + if (out_init > 0) + seq_printf(s, " %5d\n", diag_self[j]); + else + seq_printf(s, " %5d\n", diag_self[x_channel - j - 1]); + } + } else { /* bit0 = 0 */ + for (i = 0; i < in_max; i++) + himax_diag_arrange_print(s, i, j, transpose); + + if (transpose) { + if (out_init > 0) + seq_printf(s, " %5d\n", diag_self[x_channel - j - 1]); + else + seq_printf(s, " %5d\n", diag_self[j]); + } + } +} + +/* print first step which is row */ +static void himax_diag_arrange_outloop(struct seq_file *s, int transpose, int out_init, int in_init) +{ + int j; + int out_max = 0; + int self_cnt = 0; + + if (transpose) + out_max = x_channel; + else + out_max = y_channel; + + if (out_init > 0) { /* bit1 = 1 */ + self_cnt = 1; + + for (j = out_init - 1; j >= 0; j--) { + seq_printf(s, "%3c%02d%c", '[', j + 1, ']'); + himax_diag_arrange_inloop(s, in_init, out_init, transpose, j); + + if (!transpose) { + seq_printf(s, " %5d\n", diag_self[y_channel + x_channel - self_cnt]); + self_cnt++; + } + } + } else { /* bit1 = 0 */ + /* self_cnt = x_channel; */ + for (j = 0; j < out_max; j++) { + seq_printf(s, "%3c%02d%c", '[', j + 1, ']'); + himax_diag_arrange_inloop(s, in_init, out_init, transpose, j); + + if (!transpose) + seq_printf(s, " %5d\n", diag_self[j + x_channel]); + } + } +} + +/* determin the output format of diag */ +static void himax_diag_arrange(struct seq_file *s) +{ + int bit2, bit1, bit0; + int i; + + /* rotate bit */ + bit2 = g_diag_arr_num >> 2; + /* reverse Y */ + bit1 = g_diag_arr_num >> 1 & 0x1; + /* reverse X */ + bit0 = g_diag_arr_num & 0x1; + + if (g_diag_arr_num < 4) { + for (i = 0 ; i <= x_channel; i++) + seq_printf(s, "%3c%02d%c", '[', i, ']'); + + seq_puts(s, "\n"); + himax_diag_arrange_outloop(s, bit2, bit1 * y_channel, bit0 * x_channel); + seq_printf(s, "%6c", ' '); + + if (bit0 == 1) { + for (i = x_channel - 1; i >= 0; i--) + seq_printf(s, "%6d", diag_self[i]); + } else { + for (i = 0; i < x_channel; i++) + seq_printf(s, "%6d", diag_self[i]); + } + } else { + for (i = 0 ; i <= y_channel; i++) + seq_printf(s, "%3c%02d%c", '[', i, ']'); + + seq_puts(s, "\n"); + himax_diag_arrange_outloop(s, bit2, bit1 * x_channel, bit0 * y_channel); + seq_printf(s, "%6c", ' '); + + if (bit1 == 1) { + for (i = x_channel + y_channel - 1; i >= x_channel; i--) + seq_printf(s, "%6d", diag_self[i]); + + } else { + for (i = x_channel; i < x_channel + y_channel; i++) + seq_printf(s, "%6d", diag_self[i]); + + } + } +} + +static void *himax_diag_seq_start(struct seq_file *s, loff_t *pos) +{ + if (*pos >= 1) + return NULL; + + return (void *)((unsigned long) *pos + 1); +} + +static void *himax_diag_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + return NULL; +} + +static void himax_diag_seq_stop(struct seq_file *s, void *v) +{ +} + +static int himax_diag_seq_read(struct seq_file *s, void *v) +{ + struct himax_ts_data *ts = private_ts; + size_t ret = 0; + uint32_t loop_i; + uint16_t mutual_num, self_num, width; + int dsram_type = 0; + + dsram_type = ts->diag_cmd / 10; + +#ifdef HX_TP_PROC_2T2R + if (Is_2T2R && (ts->diag_cmd >= 4 && ts->diag_cmd <= 6)) { + mutual_num = x_channel_2 * y_channel_2; + self_num = x_channel_2 + y_channel_2; /* don't add KEY_COUNT */ + width = x_channel_2; + seq_printf(s, "ChannelStart: %4d, %4d\n\n", x_channel_2, y_channel_2); + } else +#endif + { + mutual_num = x_channel * y_channel; + self_num = x_channel + y_channel; /* don't add KEY_COUNT */ + width = x_channel; + seq_printf(s, "ChannelStart: %4d, %4d\n\n", x_channel, y_channel); + } + + /* start to show out the raw data in adb shell */ + if ((ts->diag_cmd >= 1 && ts->diag_cmd <= 3) || (ts->diag_cmd == 7)) { + himax_diag_arrange(s); + seq_puts(s, "\n"); +#ifdef HX_EN_SEL_BUTTON + seq_puts(s, "\n"); + + for (loop_i = 0; loop_i < ic_data->HX_BT_NUM; loop_i++) + seq_printf(s, "%6d", diag_self[ic_data->HX_RX_NUM + ic_data->HX_TX_NUM + loop_i]); + +#endif + seq_puts(s, "ChannelEnd"); + seq_puts(s, "\n"); + } + +#ifdef HX_TP_PROC_2T2R + + else if (Is_2T2R && ts->diag_cmd >= 4 && ts->diag_cmd <= 6) { + for (loop_i = 0; loop_i < mutual_num; loop_i++) { + seq_printf(s, "%4d", diag_mutual_2[loop_i]); + + if ((loop_i % width) == (width - 1)) + seq_printf(s, " %4d\n", diag_self[width + loop_i / width]); + } + + seq_puts(s, "\n"); + + for (loop_i = 0; loop_i < width; loop_i++) { + seq_printf(s, "%4d", diag_self[loop_i]); + + if (((loop_i) % width) == (width - 1)) + seq_puts(s, "\n"); + } + +#ifdef HX_EN_SEL_BUTTON + seq_puts(s, "\n"); + + for (loop_i = 0; loop_i < ic_data->HX_BT_NUM; loop_i++) + seq_printf(s, "%4d", diag_self[ic_data->HX_RX_NUM_2 + ic_data->HX_TX_NUM_2 + loop_i]); + +#endif + seq_puts(s, "ChannelEnd"); + seq_puts(s, "\n"); + } + +#endif + + else if (ts->diag_cmd == 8) { + for (loop_i = 0; loop_i < 128 ; loop_i++) { + if ((loop_i % 16) == 0) + seq_puts(s, "LineStart:"); + + seq_printf(s, "%4x", diag_coor[loop_i]); + + if ((loop_i % 16) == 15) + seq_puts(s, "\n"); + } + } else if (dsram_type > 0 && dsram_type <= 8) { + himax_diag_arrange(s); + seq_puts(s, "\n ChannelEnd"); + seq_puts(s, "\n"); + } + + if ((ts->diag_cmd >= 1 && ts->diag_cmd <= 7) || dsram_type > 0) { + /* print Mutual/Slef Maximum and Minimum */ + himax_get_mutual_edge(); + himax_get_self_edge(); + seq_printf(s, "Mutual Max:%3d, Min:%3d\n", g_max_mutual, g_min_mutual); + seq_printf(s, "Self Max:%3d, Min:%3d\n", g_max_self, g_min_self); + /* recovery status after print */ + g_max_mutual = 0; + g_min_mutual = 0xFFFF; + g_max_self = 0; + g_min_self = 0xFFFF; + } + + /* pring state info */ + print_state_info(s); + return ret; +} + +static const struct seq_operations himax_diag_seq_ops = { + .start = himax_diag_seq_start, + .next = himax_diag_seq_next, + .stop = himax_diag_seq_stop, + .show = himax_diag_seq_read, +}; + +static int himax_diag_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &himax_diag_seq_ops); +}; + +bool DSRAM_Flag; + +/* DSRAM thread */ +void himax_ts_diag_func(void) +{ + struct himax_ts_data *ts = private_ts; + int i = 0, j = 0; + unsigned int index = 0; + int total_size = (y_channel * x_channel + y_channel + x_channel) * 2; + uint8_t *info_data; + int32_t *mutual_data; + int32_t *mutual_data_new; + int32_t *mutual_data_old; + int32_t *self_data; + int32_t *self_data_new; + int32_t *self_data_old; + int32_t new_data; + /* 1:common dsram,2:100 frame Max,3:N-(N-1)frame */ + int dsram_type = 0; + char temp_buf[20]; + char write_buf[total_size * 3]; + + mutual_data = NULL; + mutual_data_new = NULL; + mutual_data_old = NULL; + self_data = NULL; + self_data_new = NULL; + self_data_old = NULL; + + info_data = kcalloc(total_size, sizeof(uint8_t), GFP_KERNEL); + if (info_data == NULL) { + E("%s: allocate memory failed!\n", __func__); + return; + } + memset(write_buf, '\0', sizeof(write_buf)); + memset(info_data, 0, total_size * sizeof(uint8_t)); + dsram_type = ts->diag_cmd / 10; + I("%s:Entering ts->diag_cmd=%d\n!", __func__, ts->diag_cmd); + + if (dsram_type == 8) { + dsram_type = 1; + I("%s Sorting Mode run sram type1 !\n", __func__); + } + + g_core_fp.fp_burst_enable(1); + + if (dsram_type == 1 || dsram_type == 2 || dsram_type == 4) { + mutual_data = getMutualBuffer(); + self_data = getSelfBuffer(); + + } else if (dsram_type == 3) { + mutual_data = getMutualBuffer(); + mutual_data_new = getMutualNewBuffer(); + mutual_data_old = getMutualOldBuffer(); + self_data = getSelfBuffer(); + self_data_new = getSelfNewBuffer(); + self_data_old = getSelfOldBuffer(); + } + + g_core_fp.fp_get_DSRAM_data(info_data, DSRAM_Flag); + index = 0; + + for (i = 0; i < y_channel; i++) { /* mutual data */ + for (j = 0; j < x_channel; j++) { + new_data = (((int8_t)info_data[index + 1] << 8) | info_data[index]); + + if (dsram_type == 1 || dsram_type == 4) + mutual_data[i * x_channel + j] = new_data; + else if (dsram_type == 2) { + /* Keep max data */ + if (mutual_data[i * x_channel + j] < new_data) + mutual_data[i * x_channel + j] = new_data; + } else if (dsram_type == 3) { + /* Cal data for [N]-[N-1] frame */ + mutual_data_new[i * x_channel + j] = new_data; + mutual_data[i * x_channel + j] = mutual_data_new[i * x_channel + j] - mutual_data_old[i * x_channel + j]; + } + + index += 2; + } + } + + for (i = 0; i < x_channel + y_channel; i++) { /* self data */ + new_data = (info_data[index + 1] << 8 | info_data[index]); + if (dsram_type == 1 || dsram_type == 4) + self_data[i] = new_data; + else if (dsram_type == 2) { + /* Keep max data */ + if (self_data[i] < new_data) + self_data[i] = new_data; + } else if (dsram_type == 3) { + /* Cal data for [N]-[N-1] frame */ + self_data_new[i] = new_data; + self_data[i] = self_data_new[i] - self_data_old[i]; + } + index += 2; + } + + kfree(info_data); + + if (dsram_type == 3) { + memcpy(mutual_data_old, mutual_data_new, x_channel * y_channel * sizeof(int32_t)); /* copy N data to N-1 array */ + memcpy(self_data_old, self_data_new, (x_channel + y_channel) * sizeof(int32_t)); /* copy N data to N-1 array */ + } + + diag_max_cnt++; + + if (dsram_type >= 1 && dsram_type <= 3) + queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 1 / 10 * HZ); + else if (dsram_type == 4) { + for (i = 0; i < x_channel * y_channel; i++) { + memset(temp_buf, '\0', sizeof(temp_buf)); + + if (i == (x_channel * y_channel - 1)) { + snprintf(temp_buf, sizeof(temp_buf), "%4d\t", mutual_data[i]); + snprintf(temp_buf, sizeof(temp_buf), "%4d\n", self_data[x_channel + y_channel - 1]); + I("%s :i = %d 3\n", __func__, i); + } else if (i % x_channel == (x_channel - 1)) { + snprintf(temp_buf, sizeof(temp_buf), "%4d\t", mutual_data[i]); + snprintf(temp_buf, sizeof(temp_buf), "%4d\n", self_data[x_channel + (i / x_channel) + 1]); + } else + snprintf(temp_buf, sizeof(temp_buf), "%4d\t", mutual_data[i]); + + strlcat(&write_buf[i*strlen(temp_buf)], temp_buf, strlen(temp_buf)); + } + + for (i = 0; i < x_channel; i++) { + memset(temp_buf, '\0', sizeof(temp_buf)); + if (i == x_channel - 1) + snprintf(temp_buf, sizeof(temp_buf), "%4d\n", self_data[i]); + else + snprintf(temp_buf, sizeof(temp_buf), "%4d\t", self_data[i]); + strlcat(&write_buf[(i+x_channel * y_channel)*strlen(temp_buf)], temp_buf, strlen(temp_buf)); + } + + /* save raw data in file */ + if (!IS_ERR(diag_sram_fn)) { + I("%s create file and ready to write\n", __func__); + diag_sram_fn->f_op->write(diag_sram_fn, write_buf, sizeof(write_buf), &diag_sram_fn->f_pos); + write_counter++; + + if (write_counter < write_max_count) + queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 1 / 10 * HZ); + else { + filp_close(diag_sram_fn, NULL); + write_counter = 0; + } + } + } +} + +static ssize_t himax_diag_write(struct file *filp, const char __user *buff, size_t len, loff_t *data) +{ + struct himax_ts_data *ts = private_ts; + char messages[COMMON_BUF_SZ] = {0}; + struct filename *vts_name; + uint8_t command[2] = {0x00, 0x00}; + uint8_t receive[1]; + /* 0: common , other: dsram */ + int storage_type = 0; + /* 1:IIR,2:DC,3:Bank,4:IIR2,5:IIR2_N,6:FIR2,7:Baseline,8:dump coord */ + int rawdata_type = 0; + + memset(receive, 0x00, sizeof(receive)); + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(messages, buff, len)) + return -EFAULT; + + I("%s:g_switch_mode = %d\n", __func__, g_switch_mode); + + if (messages[1] == 0x0A) + ts->diag_cmd = messages[0] - '0'; + else + ts->diag_cmd = (messages[0] - '0') * 10 + (messages[1] - '0'); + + storage_type = g_core_fp.fp_determin_diag_storage(ts->diag_cmd); + rawdata_type = g_core_fp.fp_determin_diag_rawdata(ts->diag_cmd); + + if (ts->diag_cmd > 0 && rawdata_type == 0) { + I("[Himax]ts->diag_cmd=0x%x ,storage_type=%d, rawdata_type=%d! Maybe no support!\n" + , ts->diag_cmd, storage_type, rawdata_type); + ts->diag_cmd = 0x00; + } else + I("[Himax]ts->diag_cmd=0x%x ,storage_type=%d, rawdata_type=%d\n", ts->diag_cmd, storage_type, rawdata_type); + + memset(diag_mutual, 0x00, x_channel * y_channel * sizeof(int32_t)); + memset(diag_self, 0x00, sizeof(diag_self)); + if (storage_type == 0 && rawdata_type >= RAW_IIR && rawdata_type < RAW_DUMP_COORD) { + I("%s,common\n", __func__); + + if (DSRAM_Flag) { + /* 1. Clear DSRAM flag */ + DSRAM_Flag = false; + /* 2. Stop DSRAM thread */ + cancel_delayed_work(&private_ts->himax_diag_delay_wrok); + /* 3. Enable ISR */ + himax_int_enable(1); + /* (4) FW leave sram and return to event stack */ + g_core_fp.fp_return_event_stack(); + } + + if (g_switch_mode == 2) { + g_core_fp.fp_idle_mode(0); + g_switch_mode = g_core_fp.fp_switch_mode(0); + } + + if (ts->diag_cmd == 0x04) { +#if defined(HX_TP_PROC_2T2R) + command[0] = ts->diag_cmd; +#else + ts->diag_cmd = 0x00; + command[0] = 0x00; +#endif + } else + command[0] = ts->diag_cmd; + + g_core_fp.fp_diag_register_set(command[0], storage_type); + } else if (storage_type > 0 && storage_type < 8 + && rawdata_type >= RAW_IIR && rawdata_type < RAW_DUMP_COORD) { + I("%s,dsram\n", __func__); + diag_max_cnt = 0; + + /* 0. set diag flag */ + if (DSRAM_Flag) { + /* (1) Clear DSRAM flag */ + DSRAM_Flag = false; + /* (2) Stop DSRAM thread */ + cancel_delayed_work(&private_ts->himax_diag_delay_wrok); + /* (3) Enable ISR */ + himax_int_enable(1); + /* (4) FW leave sram and return to event stack */ + g_core_fp.fp_return_event_stack(); + } + + /* close sorting if turn on */ + if (g_switch_mode == 2) { + g_core_fp.fp_idle_mode(0); + g_switch_mode = g_core_fp.fp_switch_mode(0); + } + + command[0] = rawdata_type;/* ts->diag_cmd; */ + g_core_fp.fp_diag_register_set(command[0], storage_type); + /* 1. Disable ISR */ + himax_int_enable(0); + + /* Open file for save raw data log */ + if (storage_type == 4) { + switch (rawdata_type) { + case RAW_IIR: + vts_name = getname_kernel(IIR_DUMP_FILE); + diag_sram_fn = file_open_name(vts_name, O_CREAT | O_WRONLY, 0); + break; + + case RAW_DC: + vts_name = getname_kernel(DC_DUMP_FILE); + diag_sram_fn = file_open_name(vts_name, O_CREAT | O_WRONLY, 0); + break; + + case RAW_BANK: + vts_name = getname_kernel(BANK_DUMP_FILE); + diag_sram_fn = file_open_name(vts_name, O_CREAT | O_WRONLY, 0); + break; + + default: + I("%s raw data type is not true. raw data type is %d\n", __func__, rawdata_type); + } + } + + /* 2. Start DSRAM thread */ + queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 2 * HZ / 100); + I("%s: Start get raw data in DSRAM\n", __func__); + + if (storage_type == 4) + msleep(6000); + + /* 3. Set DSRAM flag */ + DSRAM_Flag = true; + + } else if (storage_type == 8) { + I("Soritng mode!\n"); + + if (DSRAM_Flag) { + /* 1. Clear DSRAM flag */ + DSRAM_Flag = false; + /* 2. Stop DSRAM thread */ + cancel_delayed_work(&private_ts->himax_diag_delay_wrok); + /* 3. Enable ISR */ + himax_int_enable(1); + /* (4) FW leave sram and return to event stack */ + g_core_fp.fp_return_event_stack(); + } + + g_core_fp.fp_idle_mode(1); + g_switch_mode = g_core_fp.fp_switch_mode(1); + + if (g_switch_mode == 2) + g_core_fp.fp_diag_register_set(command[0], storage_type); + + queue_delayed_work(private_ts->himax_diag_wq, &private_ts->himax_diag_delay_wrok, 2 * HZ / 100); + DSRAM_Flag = true; + } else { + /* set diag flag */ + if (DSRAM_Flag) { + I("return and cancel sram thread!\n"); + /* (1) Clear DSRAM flag */ + DSRAM_Flag = false; + /* (2) Stop DSRAM thread */ + cancel_delayed_work(&private_ts->himax_diag_delay_wrok); + /* (3) Enable ISR */ + himax_int_enable(1); + /* (4) FW leave sram and return to event stack */ + g_core_fp.fp_return_event_stack(); + } + + if (g_switch_mode == 2) { + g_core_fp.fp_idle_mode(0); + g_switch_mode = g_core_fp.fp_switch_mode(0); + } + + if (ts->diag_cmd != 0x00) { + E("[Himax]ts->diag_cmd error!diag_command=0x%x so reset\n", ts->diag_cmd); + command[0] = 0x00; + + if (ts->diag_cmd != 0x08) + ts->diag_cmd = 0x00; + + g_core_fp.fp_diag_register_set(command[0], storage_type); + } else { + command[0] = 0x00; + ts->diag_cmd = 0x00; + g_core_fp.fp_diag_register_set(command[0], storage_type); + I("return to normal ts->diag_cmd=0x%x\n", ts->diag_cmd); + } + } + + return len; +} + +static const struct file_operations himax_proc_diag_ops = { + .owner = THIS_MODULE, + .open = himax_diag_proc_open, + .read = seq_read, + .write = himax_diag_write, +}; + +static ssize_t himax_reset_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + char buf_tmp[DEBUG_BUF_SZ]; + + if (len >= DEBUG_BUF_SZ) { + I("%s: no command exceeds 12 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf_tmp, buff, len)) + return -EFAULT; + +#ifdef HX_RST_PIN_FUNC + + if (buf_tmp[0] == '1') + g_core_fp.fp_ic_reset(false, false); + else if (buf_tmp[0] == '2') + g_core_fp.fp_ic_reset(false, true); + else if (buf_tmp[0] == '3') + g_core_fp.fp_ic_reset(true, false); + else if (buf_tmp[0] == '4') + g_core_fp.fp_ic_reset(true, true); + + /* else if (buf_tmp[0] == '5') */ + /* ESD_HW_REST(); */ +#endif + return len; +} + +static const struct file_operations himax_proc_reset_ops = { + .owner = THIS_MODULE, + .write = himax_reset_write, +}; + +static ssize_t himax_debug_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + size_t ret = 0; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + + if (debug_level_cmd == 't') { + if (fw_update_complete) + ret += snprintf(temp_buf + ret, len - ret, "FW Update Complete "); + else + ret += snprintf(temp_buf + ret, len - ret, "FW Update Fail "); + + } else if (debug_level_cmd == 'h') { + if (handshaking_result == 0) + ret += snprintf(temp_buf + ret, len - ret, "Handshaking Result = %d (MCU Running)\n", handshaking_result); + else if (handshaking_result == 1) + ret += snprintf(temp_buf + ret, len - ret, "Handshaking Result = %d (MCU Stop)\n", handshaking_result); + else if (handshaking_result == 2) + ret += snprintf(temp_buf + ret, len - ret, "Handshaking Result = %d (I2C Error)\n", handshaking_result); + else + ret += snprintf(temp_buf + ret, len - ret, "Handshaking Result = error\n"); + + } else if (debug_level_cmd == 'v') { + ret += snprintf(temp_buf + ret, len - ret, "FW_VER = 0x%2.2X\n", ic_data->vendor_fw_ver); + + if (private_ts->chip_cell_type == CHIP_IS_ON_CELL) + ret += snprintf(temp_buf + ret, len - ret, "CONFIG_VER = 0x%2.2X\n", ic_data->vendor_config_ver); + else { + ret += snprintf(temp_buf + ret, len - ret, "TOUCH_VER = 0x%2.2X\n", ic_data->vendor_touch_cfg_ver); + ret += snprintf(temp_buf + ret, len - ret, "DISPLAY_VER = 0x%2.2X\n", ic_data->vendor_display_cfg_ver); + } + if (ic_data->vendor_cid_maj_ver < 0 && ic_data->vendor_cid_min_ver < 0) + ret += snprintf(temp_buf + ret, len - ret, "CID_VER = NULL\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "CID_VER = 0x%2.2X\n", (ic_data->vendor_cid_maj_ver << 8 | ic_data->vendor_cid_min_ver)); + + if (ic_data->vendor_panel_ver < 0) + ret += snprintf(temp_buf + ret, len - ret, "PANEL_VER = NULL\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "PANEL_VER = 0x%2.2X\n", ic_data->vendor_panel_ver); + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + ret += snprintf(temp_buf + ret, len - ret, "Himax Touch Driver Version:\n"); + ret += snprintf(temp_buf + ret, len - ret, "%s\n", HIMAX_DRIVER_VER); + + } else if (debug_level_cmd == 'd') { + ret += snprintf(temp_buf + ret, len - ret, "Himax Touch IC Information :\n"); + ret += snprintf(temp_buf + ret, len - ret, "%s\n", private_ts->chip_name); + + switch (IC_CHECKSUM) { + case HX_TP_BIN_CHECKSUM_SW: + ret += snprintf(temp_buf + ret, len - ret, "IC Checksum : SW\n"); + break; + + case HX_TP_BIN_CHECKSUM_HW: + ret += snprintf(temp_buf + ret, len - ret, "IC Checksum : HW\n"); + break; + + case HX_TP_BIN_CHECKSUM_CRC: + ret += snprintf(temp_buf + ret, len - ret, "IC Checksum : CRC\n"); + break; + + default: + ret += snprintf(temp_buf + ret, len - ret, "IC Checksum error.\n"); + } + + if (ic_data->HX_INT_IS_EDGE) + ret += snprintf(temp_buf + ret, len - ret, "Driver register Interrupt : EDGE TIRGGER\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "Driver register Interrupt : LEVEL TRIGGER\n"); + + if (private_ts->protocol_type == PROTOCOL_TYPE_A) + ret += snprintf(temp_buf + ret, len - ret, "Protocol : TYPE_A\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "Protocol : TYPE_B\n"); + + ret += snprintf(temp_buf + ret, len - ret, "RX Num : %d\n", ic_data->HX_RX_NUM); + ret += snprintf(temp_buf + ret, len - ret, "TX Num : %d\n", ic_data->HX_TX_NUM); + ret += snprintf(temp_buf + ret, len - ret, "BT Num : %d\n", ic_data->HX_BT_NUM); + ret += snprintf(temp_buf + ret, len - ret, "X Resolution : %d\n", ic_data->HX_X_RES); + ret += snprintf(temp_buf + ret, len - ret, "Y Resolution : %d\n", ic_data->HX_Y_RES); + ret += snprintf(temp_buf + ret, len - ret, "Max Point : %d\n", ic_data->HX_MAX_PT); + ret += snprintf(temp_buf + ret, len - ret, "XY reverse : %d\n", ic_data->HX_XY_REVERSE); +#ifdef HX_TP_PROC_2T2R + + if (Is_2T2R) { + ret += snprintf(temp_buf + ret, len - ret, "2T2R panel\n"); + ret += snprintf(temp_buf + ret, len - ret, "RX Num_2 : %d\n", HX_RX_NUM_2); + ret += snprintf(temp_buf + ret, len - ret, "TX Num_2 : %d\n", HX_TX_NUM_2); + } + +#endif + } else if (debug_level_cmd == 'i') { + + if (g_core_fp.fp_read_i2c_status()) + ret += snprintf(temp_buf + ret, len - ret, "I2C communication is bad.\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "I2C communication is good.\n"); + } else if (debug_level_cmd == 'n') { + + if (g_core_fp.fp_read_ic_trigger_type() == 1) /* Edgd = 1, Level = 0 */ + ret += snprintf(temp_buf + ret, len - ret, "IC Interrupt type is edge trigger.\n"); + else if (g_core_fp.fp_read_ic_trigger_type() == 0) + ret += snprintf(temp_buf + ret, len - ret, "IC Interrupt type is level trigger.\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "Unknown IC trigger type.\n"); + + if (ic_data->HX_INT_IS_EDGE) + ret += snprintf(temp_buf + ret, len - ret, "Driver register Interrupt : EDGE TIRGGER\n"); + else + ret += snprintf(temp_buf + ret, len - ret, "Driver register Interrupt : LEVEL TRIGGER\n"); + } + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +extern int g_ts_dbg; +static ssize_t himax_debug_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + int result = 0; + char fileName[128]; + char buf[COMMON_BUF_SZ] = {0}; + int fw_type = 0; + const struct firmware *fw = NULL; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + if (buf[0] == 'h') { /* handshaking */ + debug_level_cmd = buf[0]; + himax_int_enable(0); + /* 0:Running, 1:Stop, 2:I2C Fail */ + handshaking_result = g_core_fp.fp_hand_shaking(); + himax_int_enable(1); + return len; + } else if (buf[0] == 'v') { /* firmware version */ + himax_int_enable(0); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#endif + debug_level_cmd = buf[0]; + g_core_fp.fp_read_FW_ver(); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(true, false); +#endif + himax_int_enable(1); + /* himax_check_chip_version(); */ + return len; + } else if (buf[0] == 'd') { /* ic information */ + debug_level_cmd = buf[0]; + return len; + } else if (buf[0] == 't') { + if (buf[1] == 's' && buf[2] == 'd' + && buf[3] == 'b' && buf[4] == 'g') { + if (buf[5] == '1') { + I("Open Ts Debug!\n"); + g_ts_dbg = 1; + } else if (buf[5] == '0') { + I("Close Ts Debug!\n"); + g_ts_dbg = 0; + } else + E("Parameter fault for ts debug\n"); + goto ENDFUCTION; + } + himax_int_enable(0); + debug_level_cmd = buf[0]; + fw_update_complete = false; + memset(fileName, 0, 128); + /* parse the file name */ + snprintf(fileName, len - 2, "%s", &buf[2]); + I("%s: upgrade from file(%s) start!\n", __func__, fileName); + result = request_firmware(&fw, fileName, private_ts->dev); + + if (result < 0) { + I("fail to request_firmware fwpath: %s (ret:%d)\n", fileName, result); + return result; + } + + I("%s: FW image: %02X, %02X, %02X, %02X\n", __func__, fw->data[0], fw->data[1], fw->data[2], fw->data[3]); + fw_type = (fw->size) / 1024; + /* start to upgrade */ + himax_int_enable(0); + I("Now FW size is : %dk\n", fw_type); + + switch (fw_type) { + case 32: + if (g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_32k((unsigned char *)fw->data, fw->size, false) == 0) { + E("%s: TP upgrade error, line: %d\n", __func__, __LINE__); + fw_update_complete = false; + } else { + I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__); + fw_update_complete = true; + } + + break; + + case 60: + if (g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_60k((unsigned char *)fw->data, fw->size, false) == 0) { + E("%s: TP upgrade error, line: %d\n", __func__, __LINE__); + fw_update_complete = false; + } else { + I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__); + fw_update_complete = true; + } + + break; + + case 64: + if (g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_64k((unsigned char *)fw->data, fw->size, false) == 0) { + E("%s: TP upgrade error, line: %d\n", __func__, __LINE__); + fw_update_complete = false; + } else { + I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__); + fw_update_complete = true; + } + + break; + + case 124: + if (g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_124k((unsigned char *)fw->data, fw->size, false) == 0) { + E("%s: TP upgrade error, line: %d\n", __func__, __LINE__); + fw_update_complete = false; + } else { + I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__); + fw_update_complete = true; + } + + break; + + case 128: + if (g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_128k((unsigned char *)fw->data, fw->size, false) == 0) { + E("%s: TP upgrade error, line: %d\n", __func__, __LINE__); + fw_update_complete = false; + } else { + I("%s: TP upgrade OK, line: %d\n", __func__, __LINE__); + fw_update_complete = true; + } + + break; + + default: + E("%s: Flash command fail: %d\n", __func__, __LINE__); + fw_update_complete = false; + break; + } + release_firmware(fw); + goto firmware_upgrade_done; + } else if (buf[0] == 'i' && buf[1] == '2' && buf[2] == 'c') { + /* i2c commutation */ + debug_level_cmd = 'i'; + return len; + } else if (buf[0] == 'i' && buf[1] == 'n' && buf[2] == 't') { + /* INT trigger */ + debug_level_cmd = 'n'; + return len; + } + +#ifdef HX_ZERO_FLASH + + else if (buf[0] == 'z') { + + if (buf[1] == '0') + g_core_fp.fp_0f_operation_check(0); + else + g_core_fp.fp_0f_operation_check(1); + return len; + + } else if (buf[0] == 'p') { + + I("NOW debug echo r!\n"); + /* himax_program_sram(); */ + private_ts->himax_0f_update_wq = create_singlethread_workqueue("HMX_update_0f_reqest_write"); + + if (!private_ts->himax_0f_update_wq) + E(" allocate syn_update_wq failed\n"); + + INIT_DELAYED_WORK(&private_ts->work_0f_update, g_core_fp.fp_0f_operation); + queue_delayed_work(private_ts->himax_0f_update_wq, &private_ts->work_0f_update, msecs_to_jiffies(100)); + return len; + } else if (buf[0] == 'x') { + g_core_fp.fp_sys_reset(); + return len; + } +#endif + + else { /* others,do nothing */ + debug_level_cmd = 0; + return len; + } + +firmware_upgrade_done: + g_core_fp.fp_read_FW_ver(); + g_core_fp.fp_touch_information(); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(true, false); +#else + g_core_fp.fp_sense_on(0x00); +#endif + himax_int_enable(1); +/* todo himax_chip->tp_firmware_upgrade_proceed = 0; */ +/* todo himax_chip->suspend_state = 0; */ +/* todo enable_irq(himax_chip->irq); */ +ENDFUCTION: + return len; +} + +static const struct file_operations himax_proc_debug_ops = { + .owner = THIS_MODULE, + .read = himax_debug_read, + .write = himax_debug_write, +}; + +static ssize_t himax_proc_FW_debug_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + uint8_t loop_i = 0; + uint8_t tmp_data[64]; + char *temp_buf; + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + cmd_set[0] = 0x01; + + if (g_core_fp.fp_read_FW_status(cmd_set, tmp_data) == NO_ERR) { + ret += snprintf(temp_buf + ret, len - ret, "0x%02X%02X%02X%02X :\t", cmd_set[5], cmd_set[4], cmd_set[3], cmd_set[2]); + + for (loop_i = 0; loop_i < cmd_set[1]; loop_i++) + ret += snprintf(temp_buf + ret, len - ret, "%5d\t", tmp_data[loop_i]); + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + } + + cmd_set[0] = 0x02; + + if (g_core_fp.fp_read_FW_status(cmd_set, tmp_data) == NO_ERR) { + for (loop_i = 0; loop_i < cmd_set[1]; loop_i = loop_i + 2) { + if ((loop_i % 16) == 0) + ret += snprintf(temp_buf + ret, len - ret, "0x%02X%02X%02X%02X :\t", + cmd_set[5], cmd_set[4], cmd_set[3] + (((cmd_set[2] + loop_i) >> 8) & 0xFF), (cmd_set[2] + loop_i) & 0xFF); + + ret += snprintf(temp_buf + ret, len - ret, "%5d\t", tmp_data[loop_i] + (tmp_data[loop_i + 1] << 8)); + + if ((loop_i % 16) == 14) + ret += snprintf(temp_buf + ret, len - ret, "\n"); + } + } + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static const struct file_operations himax_proc_fw_debug_ops = { + .owner = THIS_MODULE, + .read = himax_proc_FW_debug_read, +}; + +static ssize_t himax_proc_DD_debug_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + uint8_t tmp_data[64]; + uint8_t loop_i = 0; + char *temp_buf; + + if (!HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + if (mutual_set_flag == 1) { + if (g_core_fp.fp_read_DD_status(cmd_set, tmp_data) == NO_ERR) { + for (loop_i = 0; loop_i < cmd_set[0]; loop_i++) { + if ((loop_i % 8) == 0) + ret += snprintf(temp_buf + ret, len - ret, "0x%02X : ", loop_i); + + ret += snprintf(temp_buf + ret, len - ret, "0x%02X ", tmp_data[loop_i]); + + if ((loop_i % 8) == 7) + ret += snprintf(temp_buf + ret, len - ret, "\n"); + } + } + } + + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_proc_DD_debug_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + uint8_t i = 0; + uint8_t cnt = 2; + unsigned long result = 0; + char buf_tmp[PROC_DD_BUF_SZ]; + char buf_tmp2[4]; + + if (len >= PROC_DD_BUF_SZ) { + I("%s: no command exceeds 20 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf_tmp, buff, len)) + return -EFAULT; + + memset(buf_tmp2, 0x0, sizeof(buf_tmp2)); + + if (buf_tmp[2] == 'x' && buf_tmp[6] == 'x' && buf_tmp[10] == 'x') { + mutual_set_flag = 1; + + for (i = 3; i < 12; i = i + 4) { + memcpy(buf_tmp2, buf_tmp + i, 2); + + if (!kstrtoul(buf_tmp2, 16, &result)) + cmd_set[cnt] = (uint8_t)result; + else + I("String to oul is fail in cnt = %d, buf_tmp2 = %s", cnt, buf_tmp2); + + cnt--; + } + + I("cmd_set[2] = %02X, cmd_set[1] = %02X, cmd_set[0] = %02X\n", cmd_set[2], cmd_set[1], cmd_set[0]); + } else + mutual_set_flag = 0; + + return len; +} + +static const struct file_operations himax_proc_dd_debug_ops = { + .owner = THIS_MODULE, + .read = himax_proc_DD_debug_read, + .write = himax_proc_DD_debug_write, +}; + +uint8_t getFlashCommand(void) +{ + return flash_command; +} + +static uint8_t getFlashDumpProgress(void) +{ + return flash_progress; +} + +static uint8_t getFlashDumpComplete(void) +{ + return flash_dump_complete; +} + +static uint8_t getFlashDumpFail(void) +{ + return flash_dump_fail; +} + +uint8_t getSysOperation(void) +{ + return sys_operation; +} + +static uint8_t getFlashReadStep(void) +{ + return flash_read_step; +} + +bool getFlashDumpGoing(void) +{ + return flash_dump_going; +} + +void setFlashBuffer(void) +{ + flash_buffer = kcalloc(Flash_Size, sizeof(uint8_t), GFP_KERNEL); + if (!flash_buffer) + E("%s: allocate memory failed!\n", __func__); +} + +void setSysOperation(uint8_t operation) +{ + sys_operation = operation; +} + +void setFlashDumpProgress(uint8_t progress) +{ + flash_progress = progress; + /* I("setFlashDumpProgress : progress = %d ,flash_progress = %d\n",progress,flash_progress); */ +} + +void setFlashDumpComplete(uint8_t status) +{ + flash_dump_complete = status; +} + +void setFlashDumpFail(uint8_t fail) +{ + flash_dump_fail = fail; +} + +static void setFlashCommand(uint8_t command) +{ + flash_command = command; +} + +static void setFlashReadStep(uint8_t step) +{ + flash_read_step = step; +} + +void setFlashDumpGoing(bool going) +{ + flash_dump_going = going; + debug_data->flash_dump_going = going; +} + +static ssize_t himax_proc_flash_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + ssize_t ret = 0; + int loop_i; + uint8_t local_flash_read_step = 0; + uint8_t local_flash_complete = 0; + uint8_t local_flash_progress = 0; + uint8_t local_flash_command = 0; + uint8_t local_flash_fail = 0; + char *temp_buf; + + local_flash_complete = getFlashDumpComplete(); + local_flash_progress = getFlashDumpProgress(); + local_flash_command = getFlashCommand(); + local_flash_fail = getFlashDumpFail(); + I("flash_progress = %d\n", local_flash_progress); + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + + if (local_flash_fail) { + ret += snprintf(temp_buf + ret, len - ret, "FlashStart:Fail\n"); + ret += snprintf(temp_buf + ret, len - ret, "FlashEnd"); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + return ret; + } + + if (!local_flash_complete) { + ret += snprintf(temp_buf + ret, len - ret, "FlashStart:Ongoing:0x%2.2x\n", flash_progress); + ret += snprintf(temp_buf + ret, len - ret, "FlashEnd"); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + return ret; + } + + if (local_flash_command == 1 && local_flash_complete) { + ret += snprintf(temp_buf + ret, len - ret, "FlashStart:Complete\n"); + ret += snprintf(temp_buf + ret, len - ret, "FlashEnd"); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + return ret; + } + + if (local_flash_command == 3 && local_flash_complete) { + ret += snprintf(temp_buf + ret, len - ret, "FlashStart:\n"); + + for (loop_i = 0; loop_i < 128; loop_i++) { + ret += snprintf(temp_buf + ret, len - ret, "x%2.2x", flash_buffer[loop_i]); + + if ((loop_i % 16) == 15) + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + } + + ret += snprintf(temp_buf + ret, len - ret, "FlashEnd"); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + return ret; + } + + /* flash command == 0 , report the data */ + local_flash_read_step = getFlashReadStep(); + ret += snprintf(temp_buf + ret, len - ret, "FlashStart:%2.2x\n", local_flash_read_step); + + for (loop_i = 0; loop_i < 1024; loop_i++) { + ret += snprintf(temp_buf + ret, len - ret, "x%2.2X", flash_buffer[local_flash_read_step * 1024 + loop_i]); + + if ((loop_i % 16) == 15) + ret += snprintf(temp_buf + ret, len - ret, "\n"); + } + + ret += snprintf(temp_buf + ret, len - ret, "FlashEnd"); + ret += snprintf(temp_buf + ret, len - ret, "\n"); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_proc_flash_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + char buf_tmp[6]; + unsigned long result = 0; + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + memset(buf_tmp, 0x0, sizeof(buf_tmp)); + I("%s: buf = %s\n", __func__, buf); + + if (getSysOperation() == 1) { + E("%s: PROC is busy , return!\n", __func__); + return len; + } + + if (buf[0] == '0') { + setFlashCommand(0); + + if (buf[1] == ':' && buf[2] == 'x') { + memcpy(buf_tmp, buf + 3, 2); + I("%s: read_Step = %s\n", __func__, buf_tmp); + + if (!kstrtoul(buf_tmp, 16, &result)) { + I("%s: read_Step = %lu\n", __func__, result); + setFlashReadStep(result); + } + } + } else if (buf[0] == '1') { + /* 1_32,1_60,1_64,1_24,1_28 for flash size 32k,60k,64k,124k,128k */ + setSysOperation(1); + setFlashCommand(1); + setFlashDumpProgress(0); + setFlashDumpComplete(0); + setFlashDumpFail(0); + + if ((buf[1] == '_') && (buf[2] == '3') && (buf[3] == '2')) { + Flash_Size = FW_SIZE_32k; + } else if ((buf[1] == '_') && (buf[2] == '6')) { + if (buf[3] == '0') + Flash_Size = FW_SIZE_60k; + else if (buf[3] == '4') + Flash_Size = FW_SIZE_64k; + } else if ((buf[1] == '_') && (buf[2] == '2')) { + if (buf[3] == '4') + Flash_Size = FW_SIZE_124k; + else if (buf[3] == '8') + Flash_Size = FW_SIZE_128k; + } + queue_work(private_ts->flash_wq, &private_ts->flash_work); + } else if (buf[0] == '2') { + /* 2_32,2_60,2_64,2_24,2_28 for flash size 32k,60k,64k,124k,128k */ + setSysOperation(1); + setFlashCommand(2); + setFlashDumpProgress(0); + setFlashDumpComplete(0); + setFlashDumpFail(0); + + if ((buf[1] == '_') && (buf[2] == '3') && (buf[3] == '2')) + Flash_Size = FW_SIZE_32k; + else if ((buf[1] == '_') && (buf[2] == '6')) { + if (buf[3] == '0') + Flash_Size = FW_SIZE_60k; + else if (buf[3] == '4') + Flash_Size = FW_SIZE_64k; + } else if ((buf[1] == '_') && (buf[2] == '2')) { + if (buf[3] == '4') + Flash_Size = FW_SIZE_124k; + else if (buf[3] == '8') + Flash_Size = FW_SIZE_128k; + } + queue_work(private_ts->flash_wq, &private_ts->flash_work); + } + + return len; +} + +static const struct file_operations himax_proc_flash_ops = { + .owner = THIS_MODULE, + .read = himax_proc_flash_read, + .write = himax_proc_flash_write, +}; + +void himax_ts_flash_func(void) +{ + uint8_t local_flash_command = 0; + + himax_int_enable(0); + setFlashDumpGoing(true); + /* sector = getFlashDumpSector(); */ + /* page = getFlashDumpPage(); */ + local_flash_command = getFlashCommand(); + msleep(100); + I("%s: local_flash_command = %d enter.\n", __func__, local_flash_command); + + if ((local_flash_command == 1 || local_flash_command == 2) || (local_flash_command == 0x0F)) + g_core_fp.fp_flash_dump_func(local_flash_command, Flash_Size, flash_buffer); + + I("Complete~~~~~~~~~~~~~~~~~~~~~~~\n"); + + if (local_flash_command == 2) { + struct file *fn; + struct filename *vts_name; + + vts_name = getname_kernel(FLASH_DUMP_FILE); + fn = file_open_name(vts_name, O_CREAT | O_WRONLY, 0); + + if (!IS_ERR(fn)) { + I("%s create file and ready to write\n", __func__); + fn->f_op->write(fn, flash_buffer, Flash_Size * sizeof(uint8_t), &fn->f_pos); + filp_close(fn, NULL); + } + } + + himax_int_enable(1); + setFlashDumpGoing(false); + setFlashDumpComplete(1); + setSysOperation(0); + return; + /* Flash_Dump_i2c_transfer_error: */ + /* himax_int_enable(1); */ + /* setFlashDumpGoing(false); */ + /* setFlashDumpComplete(0); */ + /* setFlashDumpFail(1); */ + /* setSysOperation(0); */ + /* return; */ +} + + + +static ssize_t himax_sense_on_off_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + char buf[COMMON_BUF_SZ] = {0}; + + if (len >= COMMON_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + if (buf[0] == '0') { + g_core_fp.fp_sense_off(); + I("Sense off\n"); + } else if (buf[0] == '1') { + if (buf[1] == 's') { + g_core_fp.fp_sense_on(0x00); + I("Sense on re-map on, run sram\n"); + } else { + g_core_fp.fp_sense_on(0x01); + I("Sense on re-map off, run flash\n"); + } + } else + I("Do nothing\n"); + + return len; +} + +static const struct file_operations himax_proc_sense_on_off_ops = { + .owner = THIS_MODULE, + .write = himax_sense_on_off_write, +}; + +#ifdef HX_ESD_RECOVERY +static ssize_t himax_esd_cnt_read(struct file *file, char __user *buf, size_t len, loff_t *pos) +{ + size_t ret = 0; + char *temp_buf; + + I("%s: enter, %d\n", __func__, __LINE__); + + if (HX_PROC_SEND_FLAG) { + HX_PROC_SEND_FLAG = 0; + return 0; + } + + temp_buf = kzalloc(len, GFP_KERNEL); + if (!temp_buf) { + E("%s: allocate memory failed!\n", __func__); + return 0; + } + ret += snprintf(temp_buf + ret, len - ret, "EB_cnt = %d, EC_cnt = %d, ED_cnt = %d\n", hx_EB_event_flag, hx_EC_event_flag, hx_ED_event_flag); + + if (copy_to_user(buf, temp_buf, len)) + I("%s,here:%d\n", __func__, __LINE__); + + kfree(temp_buf); + HX_PROC_SEND_FLAG = 1; + + return ret; +} + +static ssize_t himax_esd_cnt_write(struct file *file, const char __user *buff, size_t len, loff_t *pos) +{ + int i = 0; + char buf[DEBUG_BUF_SZ] = {0}; + + if (len >= DEBUG_BUF_SZ) { + I("%s: no command exceeds 80 chars.\n", __func__); + return -EFAULT; + } + + if (copy_from_user(buf, buff, len)) + return -EFAULT; + + I("Clear ESD Flag\n"); + + if (buf[i] == '0') { + hx_EB_event_flag = 0; + hx_EC_event_flag = 0; + hx_ED_event_flag = 0; + } + + return len; +} + +static const struct file_operations himax_proc_esd_cnt_ops = { + .owner = THIS_MODULE, + .read = himax_esd_cnt_read, + .write = himax_esd_cnt_write, +}; +#endif + +static void himax_himax_data_init(void) +{ + debug_data->fp_ts_dbg_func = himax_ts_dbg_func; + debug_data->fp_set_diag_cmd = himax_set_diag_cmd; + debug_data->flash_dump_going = false; +} + +static void himax_ts_flash_work_func(struct work_struct *work) +{ + himax_ts_flash_func(); +} + +static void himax_ts_diag_work_func(struct work_struct *work) +{ + himax_ts_diag_func(); +} + +int himax_touch_proc_init(void) +{ + himax_proc_debug_level_file = proc_create(HIMAX_PROC_DEBUG_LEVEL_FILE, 0644, + himax_touch_proc_dir, &himax_proc_debug_level_ops); + if (himax_proc_debug_level_file == NULL) { + E(" %s: proc debug_level file create failed!\n", __func__); + goto fail_1; + } + + himax_proc_vendor_file = proc_create(HIMAX_PROC_VENDOR_FILE, 0444, + himax_touch_proc_dir, &himax_proc_vendor_ops); + if (himax_proc_vendor_file == NULL) { + E(" %s: proc vendor file create failed!\n", __func__); + goto fail_2; + } + + himax_proc_attn_file = proc_create(HIMAX_PROC_ATTN_FILE, 0444, + himax_touch_proc_dir, &himax_proc_attn_ops); + if (himax_proc_attn_file == NULL) { + E(" %s: proc attn file create failed!\n", __func__); + goto fail_3; + } + + himax_proc_int_en_file = proc_create(HIMAX_PROC_INT_EN_FILE, 0644, + himax_touch_proc_dir, &himax_proc_int_en_ops); + if (himax_proc_int_en_file == NULL) { + E(" %s: proc int en file create failed!\n", __func__); + goto fail_4; + } + + himax_proc_layout_file = proc_create(HIMAX_PROC_LAYOUT_FILE, 0644, + himax_touch_proc_dir, &himax_proc_layout_ops); + if (himax_proc_layout_file == NULL) { + E(" %s: proc layout file create failed!\n", __func__); + goto fail_5; + } + + himax_proc_reset_file = proc_create(HIMAX_PROC_RESET_FILE, 0200, + himax_touch_proc_dir, &himax_proc_reset_ops); + if (himax_proc_reset_file == NULL) { + E(" %s: proc reset file create failed!\n", __func__); + goto fail_6; + } + + himax_proc_diag_file = proc_create(HIMAX_PROC_DIAG_FILE, 0644, + himax_touch_proc_dir, &himax_proc_diag_ops); + if (himax_proc_diag_file == NULL) { + E(" %s: proc diag file create failed!\n", __func__); + goto fail_7; + } + + himax_proc_diag_arrange_file = proc_create(HIMAX_PROC_DIAG_ARR_FILE, 0644, + himax_touch_proc_dir, &himax_proc_diag_arrange_ops); + if (himax_proc_diag_arrange_file == NULL) { + E(" %s: proc diag file create failed!\n", __func__); + goto fail_7_1; + } + + himax_proc_register_file = proc_create(HIMAX_PROC_REGISTER_FILE, 0644, + himax_touch_proc_dir, &himax_proc_register_ops); + if (himax_proc_register_file == NULL) { + E(" %s: proc register file create failed!\n", __func__); + goto fail_8; + } + + himax_proc_debug_file = proc_create(HIMAX_PROC_DEBUG_FILE, 0644, + himax_touch_proc_dir, &himax_proc_debug_ops); + if (himax_proc_debug_file == NULL) { + E(" %s: proc debug file create failed!\n", __func__); + goto fail_9; + } + + himax_proc_fw_debug_file = proc_create(HIMAX_PROC_FW_DEBUG_FILE, 0644, + himax_touch_proc_dir, &himax_proc_fw_debug_ops); + if (himax_proc_fw_debug_file == NULL) { + E(" %s: proc fw debug file create failed!\n", __func__); + goto fail_9_1; + } + + himax_proc_dd_debug_file = proc_create(HIMAX_PROC_DD_DEBUG_FILE, 0644, + himax_touch_proc_dir, &himax_proc_dd_debug_ops); + if (himax_proc_dd_debug_file == NULL) { + E(" %s: proc DD debug file create failed!\n", __func__); + goto fail_9_2; + } + + himax_proc_flash_dump_file = proc_create(HIMAX_PROC_FLASH_DUMP_FILE, 0644, + himax_touch_proc_dir, &himax_proc_flash_ops); + if (himax_proc_flash_dump_file == NULL) { + E(" %s: proc flash dump file create failed!\n", __func__); + goto fail_10; + } + + himax_proc_SENSE_ON_OFF_file = proc_create(HIMAX_PROC_SENSE_ON_OFF_FILE, 0666, + himax_touch_proc_dir, &himax_proc_sense_on_off_ops); + if (himax_proc_SENSE_ON_OFF_file == NULL) { + E(" %s: proc SENSE_ON_OFF file create failed!\n", __func__); + goto fail_16; + } + +#ifdef HX_ESD_RECOVERY + himax_proc_ESD_cnt_file = proc_create(HIMAX_PROC_ESD_CNT_FILE, 0666, + himax_touch_proc_dir, &himax_proc_esd_cnt_ops); + + if (himax_proc_ESD_cnt_file == NULL) { + E(" %s: proc ESD cnt file create failed!\n", __func__); + goto fail_17; + } + +#endif + himax_proc_CRC_test_file = proc_create(HIMAX_PROC_CRC_TEST_FILE, 0666, + himax_touch_proc_dir, &himax_proc_CRC_test_ops); + + if (himax_proc_CRC_test_file == NULL) { + E(" %s: proc CRC test file create failed!\n", __func__); + goto fail_18; + } + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + himax_proc_ito_test_file = proc_create(HIMAX_PROC_ITO_TEST_FILE, 0777, + himax_touch_proc_dir, &himax_proc_ito_test_ops); + + if (himax_proc_ito_test_file == NULL) { + E(" %s: proc ITO test file create failed!\n", __func__); + goto fail_19; + } + +#endif + return 0; +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + remove_proc_entry(HIMAX_PROC_ITO_TEST_FILE, himax_touch_proc_dir); +fail_19: +#endif +fail_18: +#ifdef HX_ESD_RECOVERY + remove_proc_entry(HIMAX_PROC_ESD_CNT_FILE, himax_touch_proc_dir); +fail_17: +#endif + remove_proc_entry(HIMAX_PROC_SENSE_ON_OFF_FILE, himax_touch_proc_dir); +fail_16: + remove_proc_entry(HIMAX_PROC_FLASH_DUMP_FILE, himax_touch_proc_dir); +fail_10: + remove_proc_entry(HIMAX_PROC_DEBUG_FILE, himax_touch_proc_dir); +fail_9: + remove_proc_entry(HIMAX_PROC_FW_DEBUG_FILE, himax_touch_proc_dir); +fail_9_1: + remove_proc_entry(HIMAX_PROC_DD_DEBUG_FILE, himax_touch_proc_dir); +fail_9_2: + remove_proc_entry(HIMAX_PROC_REGISTER_FILE, himax_touch_proc_dir); +fail_8: + remove_proc_entry(HIMAX_PROC_DIAG_FILE, himax_touch_proc_dir); +fail_7: + remove_proc_entry(HIMAX_PROC_DIAG_ARR_FILE, himax_touch_proc_dir); +fail_7_1: + remove_proc_entry(HIMAX_PROC_RESET_FILE, himax_touch_proc_dir); +fail_6: + remove_proc_entry(HIMAX_PROC_LAYOUT_FILE, himax_touch_proc_dir); +fail_5: + remove_proc_entry(HIMAX_PROC_INT_EN_FILE, himax_touch_proc_dir); +fail_4: + remove_proc_entry(HIMAX_PROC_ATTN_FILE, himax_touch_proc_dir); +fail_3: + remove_proc_entry(HIMAX_PROC_VENDOR_FILE, himax_touch_proc_dir); +fail_2: + remove_proc_entry(HIMAX_PROC_DEBUG_LEVEL_FILE, himax_touch_proc_dir); +fail_1: + return -ENOMEM; +} + +void himax_touch_proc_deinit(void) +{ +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) + remove_proc_entry(HIMAX_PROC_ITO_TEST_FILE, himax_touch_proc_dir); +#endif + remove_proc_entry(HIMAX_PROC_CRC_TEST_FILE, himax_touch_proc_dir); +#ifdef HX_ESD_RECOVERY + remove_proc_entry(HIMAX_PROC_ESD_CNT_FILE, himax_touch_proc_dir); +#endif + remove_proc_entry(HIMAX_PROC_SENSE_ON_OFF_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_FLASH_DUMP_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_DEBUG_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_FW_DEBUG_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_DD_DEBUG_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_REGISTER_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_DIAG_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_RESET_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_LAYOUT_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_INT_EN_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_ATTN_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_VENDOR_FILE, himax_touch_proc_dir); + remove_proc_entry(HIMAX_PROC_DEBUG_LEVEL_FILE, himax_touch_proc_dir); +} + +int himax_debug_init(void) +{ + struct himax_ts_data *ts = private_ts; + int err = 0; + + I("%s:Enter\n", __func__); + + if (ts == NULL) { + E("%s: ts struct is NULL\n", __func__); + return -EPROBE_DEFER; + } + + debug_data = kzalloc(sizeof(*debug_data), GFP_KERNEL); + if (debug_data == NULL) { + E("%s: allocate memory failed!\n", __func__); + err = -ENOMEM; + goto err_alloc_debug_data_fail; + } + + himax_himax_data_init(); + + ts->flash_wq = create_singlethread_workqueue("himax_flash_wq"); + + if (!ts->flash_wq) { + E("%s: create flash workqueue failed\n", __func__); + err = -ENOMEM; + goto err_create_flash_dump_wq_failed; + } + + INIT_WORK(&ts->flash_work, himax_ts_flash_work_func); + setSysOperation(0); + setFlashBuffer(); + + ts->himax_diag_wq = create_singlethread_workqueue("himax_diag"); + + if (!ts->himax_diag_wq) { + E("%s: create diag workqueue failed\n", __func__); + err = -ENOMEM; + goto err_create_diag_wq_failed; + } + + INIT_DELAYED_WORK(&ts->himax_diag_delay_wrok, himax_ts_diag_work_func); + + setXChannel(ic_data->HX_RX_NUM); /* X channel */ + setYChannel(ic_data->HX_TX_NUM); /* Y channel */ + setMutualBuffer(); + setMutualNewBuffer(); + setMutualOldBuffer(); + + if (getMutualBuffer() == NULL) { + E("%s: mutual buffer allocate fail failed\n", __func__); + err = MEM_ALLOC_FAIL; + goto err_get_MutualBuffer_failed; + } +#ifdef HX_TP_PROC_2T2R + + if (Is_2T2R) { + setXChannel_2(ic_data->HX_RX_NUM_2); /* X channel */ + setYChannel_2(ic_data->HX_TX_NUM_2); /* Y channel */ + setMutualBuffer_2(); + + if (getMutualBuffer_2() == NULL) { + E("%s: mutual buffer 2 allocate fail failed\n", __func__); + err = MEM_ALLOC_FAIL; + goto err_get_MutualBuffer2_failed; + } + } +#endif + + himax_touch_proc_init(); + + return 0; + +err_get_MutualBuffer2_failed: +err_get_MutualBuffer_failed: + cancel_delayed_work_sync(&ts->himax_diag_delay_wrok); + destroy_workqueue(ts->himax_diag_wq); + +err_create_diag_wq_failed: + destroy_workqueue(ts->flash_wq); + +err_create_flash_dump_wq_failed: + kfree(debug_data); + +err_alloc_debug_data_fail: + + return err; +} + +int himax_debug_remove(void) +{ + struct himax_ts_data *ts = private_ts; + + himax_touch_proc_deinit(); + + cancel_delayed_work_sync(&ts->himax_diag_delay_wrok); + destroy_workqueue(ts->himax_diag_wq); + destroy_workqueue(ts->flash_wq); + + kfree(debug_data); + + return 0; +} + diff --git a/drivers/input/touchscreen/hxchipset/himax_debug.h b/drivers/input/touchscreen/hxchipset/himax_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..f1df7cbbb16f18689b019fbc3bf0327b7de04142 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_debug.h @@ -0,0 +1,60 @@ +/* + * Himax Android Driver Sample Code for debug nodes + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef H_HIMAX_DEBUG +#define H_HIMAX_DEBUG + +#include "himax_platform.h" +#include "himax_common.h" + +#define HIMAX_PROC_DEBUG_LEVEL_FILE "debug_level" +#define HIMAX_PROC_VENDOR_FILE "vendor" +#define HIMAX_PROC_ATTN_FILE "attn" +#define HIMAX_PROC_INT_EN_FILE "int_en" +#define HIMAX_PROC_LAYOUT_FILE "layout" +#define HIMAX_PROC_CRC_TEST_FILE "CRC_test" + +#ifdef HX_ESD_RECOVERY +extern u8 HX_ESD_RESET_ACTIVATE; +extern int hx_EB_event_flag; +extern int hx_EC_event_flag; +extern int hx_ED_event_flag; +#endif + +#ifdef HX_TP_PROC_2T2R + extern bool Is_2T2R; +#endif + +extern bool DSRAM_Flag; + +int himax_touch_proc_init(void); +void himax_touch_proc_deinit(void); +extern int himax_int_en_set(void); + +extern int himax_debug_init(void); +extern int himax_debug_remove(void); + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_ITO_TEST) +#define HIMAX_PROC_ITO_TEST_FILE "ITO_test" +static struct proc_dir_entry *himax_proc_ito_test_file; + +extern void ito_set_step_status(uint8_t status); +extern uint8_t ito_get_step_status(void); +extern void ito_set_result_status(uint8_t status); +extern uint8_t ito_get_result_status(void); +#endif + +#endif diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c new file mode 100644 index 0000000000000000000000000000000000000000..307ea4842ade41461c94357e9606412827ad05b5 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.c @@ -0,0 +1,298 @@ +/* + * Himax Android Driver Sample Code for HX83112 chipset + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "himax_ic_HX83112.h" + + +extern unsigned char IC_TYPE; + + +static bool hx83112_sense_off(void) +{ + uint8_t cnt = 0; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + + do { + /* + *=========================================== + * I2C_password[7:0] set Enter safe mode : 0x31 ==> 0x27 + *=========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_lb[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_lb[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *=========================================== + * I2C_password[15:8] set Enter safe mode :0x32 ==> 0x95 + *=========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_ub[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_ub[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *=========================================== + * I2C_password[7:0] set Enter safe mode : 0x31 ==> 0x00 + *=========================================== + */ + tmp_data[0] = 0x00; + + if (himax_bus_write(pic_op->adr_i2c_psw_lb[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *========================================== + * I2C_password[7:0] set Enter safe mode : 0x31 ==> 0x27 + *=========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_lb[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_lb[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *========================================== + * I2C_password[15:8] set Enter safe mode :0x32 ==> 0x95 + *========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_ub[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_ub[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *===================== + * Check enter_save_mode + *===================== + */ + g_core_fp.fp_register_read(pic_op->addr_cs_central_state, FOUR_BYTE_ADDR_SZ, tmp_data, 0); + I("%s: Check enter_save_mode data[0]=%X\n", __func__, tmp_data[0]); + + if (tmp_data[0] == 0x0C) { + /* + *=================================== + * Reset TCON + *==================================== + */ + g_core_fp.fp_flash_write_burst(pic_op->addr_tcon_on_rst, pic_op->data_rst); + msleep(20); + tmp_data[3] = pic_op->data_rst[3]; + tmp_data[2] = pic_op->data_rst[2]; + tmp_data[1] = pic_op->data_rst[1]; + tmp_data[0] = pic_op->data_rst[0] | 0x01; + g_core_fp.fp_flash_write_burst(pic_op->addr_tcon_on_rst, tmp_data); + /* + *=================================== + * Reset ADC + *==================================== + */ + g_core_fp.fp_flash_write_burst(pic_op->addr_adc_on_rst, pic_op->data_rst); + msleep(20); + tmp_data[3] = pic_op->data_rst[3]; + tmp_data[2] = pic_op->data_rst[2]; + tmp_data[1] = pic_op->data_rst[1]; + tmp_data[0] = pic_op->data_rst[0] | 0x01; + g_core_fp.fp_flash_write_burst(pic_op->addr_adc_on_rst, tmp_data); + return true; + } + + msleep(20); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#endif + + } while (cnt++ < 15); + + return false; +} + +static void hx83112_func_re_init(void) +{ + g_core_fp.fp_sense_off = hx83112_sense_off; +} + +static void hx83112_reg_re_init(void) +{ +} + +static bool hx83112_chip_detect(void) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + bool ret_data = false; + int i = 0; + + himax_mcu_in_cmd_struct_init(); + himax_mcu_in_cmd_init(); + + hx83112_reg_re_init(); + hx83112_func_re_init(); + + g_core_fp.fp_sense_off(); + + for (i = 0; i < 5; i++) { + g_core_fp.fp_register_read(pfw_op->addr_icid_addr, FOUR_BYTE_DATA_SZ, tmp_data, false); + I("%s:Read driver IC ID = %X, %X, %X\n", __func__, tmp_data[3], tmp_data[2], tmp_data[1]); + + if ((tmp_data[3] == 0x83) && (tmp_data[2] == 0x11) && ((tmp_data[1] == 0x2a) || (tmp_data[1] == 0x2b))) { + if (tmp_data[1] == 0x2a) + strlcpy(private_ts->chip_name, HX_83112A_SERIES_PWON, 30); + else if (tmp_data[1] == 0x2b) + strlcpy(private_ts->chip_name, HX_83112B_SERIES_PWON, 30); + + I("%s:IC name = %s\n", __func__, private_ts->chip_name); + + I("Himax IC package %x%x%x in\n", tmp_data[3], tmp_data[2], tmp_data[1]); + ret_data = true; + break; + } + + ret_data = false; + E("%s:Read driver ID register Fail:\n", __func__); + + } + + return ret_data; +} + +static void hx83112_chip_init(void) +{ + + private_ts->chip_cell_type = CHIP_IS_IN_CELL; + I("%s:IC cell type = %d\n", __func__, private_ts->chip_cell_type); + IC_CHECKSUM = HX_TP_BIN_CHECKSUM_CRC; + /* Himax: Set FW and CFG Flash Address */ + FW_VER_MAJ_FLASH_ADDR = 49157; /* 0x00C005 */ + FW_VER_MAJ_FLASH_LENG = 1; + FW_VER_MIN_FLASH_ADDR = 49158; /* 0x00C006 */ + FW_VER_MIN_FLASH_LENG = 1; + CFG_VER_MAJ_FLASH_ADDR = 49408; /* 0x00C100 */ + CFG_VER_MAJ_FLASH_LENG = 1; + CFG_VER_MIN_FLASH_ADDR = 49409; /* 0x00C101 */ + CFG_VER_MIN_FLASH_LENG = 1; + CID_VER_MAJ_FLASH_ADDR = 49154; /* 0x00C002 */ + CID_VER_MAJ_FLASH_LENG = 1; + CID_VER_MIN_FLASH_ADDR = 49155; /* 0x00C003 */ + CID_VER_MIN_FLASH_LENG = 1; + +#ifdef HX_AUTO_UPDATE_FW + g_i_FW_VER = (i_CTPM_FW[FW_VER_MAJ_FLASH_ADDR] << 8) | i_CTPM_FW[FW_VER_MIN_FLASH_ADDR]; + g_i_CFG_VER = (i_CTPM_FW[CFG_VER_MAJ_FLASH_ADDR] << 8) | i_CTPM_FW[CFG_VER_MIN_FLASH_ADDR]; + g_i_CID_MAJ = i_CTPM_FW[CID_VER_MAJ_FLASH_ADDR]; + g_i_CID_MIN = i_CTPM_FW[CID_VER_MIN_FLASH_ADDR]; +#endif +} + +#ifdef CONFIG_CHIP_DTCFG +static int himax_hx83112_probe(struct platform_device *pdev) +{ + I("%s:Enter\n", __func__); + g_core_fp.fp_chip_detect = hx83112_chip_detect; + g_core_fp.fp_chip_init = hx83112_chip_init; + return 0; +} + +static int himax_hx83112_remove(struct platform_device *pdev) +{ + g_core_fp.fp_chip_detect = NULL; + g_core_fp.fp_chip_init = NULL; + return 0; +} + + +#ifdef CONFIG_OF +static const struct of_device_id himax_hx83112_mttable[] = { + { .compatible = "himax,hx83112"}, + { }, +}; +#else +#define himax_hx83112_mttabl NULL +#endif + +static struct platform_driver himax_hx83112_driver = { + .probe = himax_hx83112_probe, + .remove = himax_hx83112_remove, + .driver = { + .name = "HIMAX_HX83112", + .owner = THIS_MODULE, + .of_match_table = himax_hx83112_mttable, + }, +}; + +static int __init himax_hx83112_init(void) +{ + I("%s\n", __func__); + platform_driver_register(&himax_hx83112_driver); + return 0; +} + +static void __exit himax_hx83112_exit(void) +{ + platform_driver_unregister(&himax_hx83112_driver); +} + +#else +static int himax_hx83112_probe(void) +{ + I("%s:Enter\n", __func__); + + g_core_fp.fp_chip_detect = hx83112_chip_detect; + g_core_fp.fp_chip_init = hx83112_chip_init; + + return 0; +} + +static int himax_hx83112_remove(void) +{ + g_core_fp.fp_chip_detect = NULL; + g_core_fp.fp_chip_init = NULL; + return 0; +} + +static int __init himax_hx83112_init(void) +{ + int ret = 0; + + I("%s\n", __func__); + ret = himax_hx83112_probe(); + return 0; +} + +static void __exit himax_hx83112_exit(void) +{ + himax_hx83112_remove(); +} +#endif + +module_init(himax_hx83112_init); +module_exit(himax_hx83112_exit); + +MODULE_DESCRIPTION("HIMAX HX83112 touch driver"); +MODULE_LICENSE("GPL v2"); + + diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.h b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.h new file mode 100644 index 0000000000000000000000000000000000000000..3576ec7c6cb9874edde1f3741536a25ba87fb654 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_ic_HX83112.h @@ -0,0 +1,22 @@ +/* + * Himax Android Driver Sample Code for HX83112 chipset + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "himax_platform.h" +#include "himax_common.h" +#include "himax_ic_core.h" + diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_core.h b/drivers/input/touchscreen/hxchipset/himax_ic_core.h new file mode 100644 index 0000000000000000000000000000000000000000..1986cbbe42c12ce7dc9c254c0973b35049843957 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_ic_core.h @@ -0,0 +1,716 @@ +/* + * Himax Android Driver Sample Code for IC Core + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "himax_platform.h" +#include "himax_common.h" + +#define EIGHT_BYTE_DATA_SZ 8 +#define FOUR_BYTE_DATA_SZ 4 +#define FOUR_BYTE_ADDR_SZ 4 +#define FLASH_RW_MAX_LEN 256 +#define FLASH_WRITE_BURST_SZ 8 +#define PROGRAM_SZ 48 +#define MAX_I2C_TRANS_SZ 128 +#define HIMAX_REG_RETRY_TIMES 5 +#define FW_BIN_16K_SZ 0x4000 +#define HIMAX_TOUCH_DATA_SIZE 128 +#define MASK_BIT_0 0x01 +#define MASK_BIT_1 0x02 +#define MASK_BIT_2 0x04 + +#define FW_SECTOR_PER_BLOCK 8 +#define FW_PAGE_PER_SECTOR 64 +#define FW_PAGE_SEZE 128 +#define HX256B 0x100 +#define HX4K 0x1000 +#define HX_32K_SZ 0x8000 +#define HX_48K_SZ 0xC000 +#define HX64K 0x10000 +#define HX124K 0x1f000 +#define HX4000K 0x1000000 + +#define HX_NORMAL_MODE 1 +#define HX_SORTING_MODE 2 +#define HX_CHANGE_MODE_FAIL (-1) +#define HX_RW_REG_FAIL (-1) + +#define CORE_INIT +#define CORE_IC +#define CORE_FW +#define CORE_FLASH +#define CORE_SRAM +#define CORE_DRIVER + +#define HX_0F_DEBUG + +#ifdef HX_ESD_RECOVERY + extern u8 HX_ESD_RESET_ACTIVATE; +#endif + +#ifdef CORE_INIT + void himax_mcu_in_cmd_struct_init(void); + /*void himax_mcu_in_cmd_struct_free(void);*/ + void himax_in_parse_assign_cmd(uint32_t addr, uint8_t *cmd, int len); + void himax_mcu_in_cmd_init(void); + + void himax_mcu_on_cmd_struct_init(void); + /*static void himax_mcu_on_cmd_struct_free(void);*/ + void himax_on_parse_assign_cmd(uint32_t addr, uint8_t *cmd, int len); + void himax_mcu_on_cmd_init(void); +#endif + +#if defined(CORE_IC) + #define ic_adr_ahb_addr_byte_0 0x00 + #define ic_adr_ahb_rdata_byte_0 0x08 + #define ic_adr_ahb_access_direction 0x0c + #define ic_adr_conti 0x13 + #define ic_adr_incr4 0x0D + #define ic_adr_i2c_psw_lb 0x31 + #define ic_adr_i2c_psw_ub 0x32 + #define ic_cmd_ahb_access_direction_read 0x00 + #define ic_cmd_conti 0x31 + #define ic_cmd_incr4 0x10 + #define ic_cmd_i2c_psw_lb 0x27 + #define ic_cmd_i2c_psw_ub 0x95 + #define ic_adr_tcon_on_rst 0x80020020 + #define ic_addr_adc_on_rst 0x80020094 + #define ic_adr_psl 0x900000A0 + #define ic_adr_cs_central_state 0x900000A8 + #define ic_cmd_rst 0x00000000 + + #define on_ic_adr_ahb_addr_byte_0 0x00 + #define on_ic_adr_ahb_rdata_byte_0 0x08 + #define on_ic_adr_ahb_access_direction 0x0c + #define on_ic_adr_conti 0x13 + #define on_ic_adr_incr4 0x0D + #define on_ic_cmd_ahb_access_direction_read 0x00 + #define on_ic_cmd_conti 0x31 + #define on_ic_cmd_incr4 0x10 + #define on_ic_adr_mcu_ctrl 0x82 + #define on_ic_cmd_mcu_on 0x25 + #define on_ic_cmd_mcu_off 0xDA + #define on_ic_adr_sleep_ctrl 0x99 + #define on_ic_cmd_sleep_in 0x80 + #define on_ic_adr_tcon_ctrl 0x80020000 + #define on_ic_cmd_tcon_on 0x00000000 + #define on_ic_adr_wdg_ctrl 0x9000800C + #define on_ic_cmd_wdg_psw 0x0000AC53 + #define on_ic_adr_wdg_cnt_ctrl 0x90008010 + #define on_ic_cmd_wdg_cnt_clr 0x000035CA +#endif + +#if defined(CORE_FW) + #define fw_addr_system_reset 0x90000018 + #define fw_addr_safe_mode_release_pw 0x90000098 + #define fw_addr_ctrl_fw 0x9000005c + #define fw_addr_flag_reset_event 0x900000e4 + #define fw_addr_hsen_enable 0x10007F14 + #define fw_addr_smwp_enable 0x10007F10 + #define fw_usb_detect_addr 0x10007F38 + #define fw_addr_program_reload_from 0x00000000 + #define fw_addr_program_reload_to 0x08000000 + #define fw_addr_program_reload_page_write 0x0000fb00 + #define fw_addr_raw_out_sel 0x800204b4 + #define fw_addr_reload_status 0x80050000 + #define fw_addr_reload_crc32_result 0x80050018 + #define fw_addr_reload_addr_from 0x80050020 + #define fw_addr_reload_addr_cmd_beat 0x80050028 + #define fw_data_system_reset 0x00000055 + #define fw_data_safe_mode_release_pw_active 0x00000053 + #define fw_data_safe_mode_release_pw_reset 0x00000000 + #define fw_data_clear 0x00000000 + #define fw_data_program_reload_start 0x0A3C3000 + #define fw_data_program_reload_compare 0x04663000 + #define fw_data_program_reload_break 0x15E75678 + #define fw_addr_selftest_addr_en 0x10007F18 + #define fw_addr_selftest_result_addr 0x10007f24 + #define fw_data_selftest_request 0x00006AA6 + #define fw_addr_criteria_addr 0x10007f1c + #define fw_data_criteria_aa_top 0xff + #define fw_data_criteria_aa_bot 0x00 + #define fw_data_criteria_key_top 0xff + #define fw_data_criteria_key_bot 0x00 + #define fw_data_criteria_avg_top 0xff + #define fw_data_criteria_avg_bot 0x00 + #define fw_addr_set_frame_addr 0x10007294 + #define fw_data_set_frame 0x0000000A + #define fw_data_selftest_ack_hb 0xa6 + #define fw_data_selftest_ack_lb 0x6a + #define fw_data_selftest_pass 0xaa + #define fw_data_normal_cmd 0x00 + #define fw_data_normal_status 0x99 + #define fw_data_sorting_cmd 0xaa + #define fw_data_sorting_status 0xcc + #define fw_data_idle_dis_pwd 0x17 + #define fw_data_idle_en_pwd 0x1f + #define fw_addr_sorting_mode_en 0x10007f04 + #define fw_addr_fw_mode_status 0x10007088 + #define fw_addr_icid_addr 0x900000d0 + #define fw_addr_trigger_addr 0x10007089 + #define fw_addr_fw_ver_addr 0x10007004 + #define fw_addr_fw_cfg_addr 0x10007084 + #define fw_addr_fw_vendor_addr 0x10007000 + #define fw_addr_fw_state_addr 0x900000f8 + #define fw_addr_fw_dbg_msg_addr 0x10007f44 + #define fw_addr_chk_fw_status 0x900000a8 + #define fw_addr_dd_handshak_addr 0x900000fc + #define fw_addr_dd_data_addr 0x10007f80 + #define fw_data_dd_request 0xaa + #define fw_data_dd_ack 0xbb + #define fw_data_rawdata_ready_hb 0xa3 + #define fw_data_rawdata_ready_lb 0x3a + #define fw_addr_ahb_addr 0x11 + #define fw_data_ahb_dis 0x00 + #define fw_data_ahb_en 0x01 + #define fw_addr_event_addr 0x30 + #define fw_func_handshaking_pwd 0xA55AA55A + #define fw_func_handshaking_end 0x77887788 + + #define on_fw_addr_smwp_enable 0xA2 + #define on_fw_usb_detect_addr 0xA4 + #define on_fw_addr_program_reload_from 0x00000000 + #define on_fw_addr_raw_out_sel 0x98 + #define on_fw_addr_flash_checksum 0x80000044 + #define on_fw_data_flash_checksum 0x00000491 + #define on_fw_addr_crc_value 0x80000050 + #define on_fw_data_safe_mode_release_pw_active 0x00000053 + #define on_fw_data_safe_mode_release_pw_reset 0x00000000 + #define on_fw_addr_criteria_addr 0x9A + #define on_fw_data_selftest_pass 0xaa + #define on_fw_addr_reK_crtl 0x8000000C + #define on_fw_data_reK_en 0x02 + #define on_fw_data_reK_dis 0xFD + #define on_fw_data_rst_init 0xF0 + #define on_fw_data_dc_set 0x02 + #define on_fw_data_bank_set 0x03 + #define on_fw_addr_selftest_addr_en 0x98 + #define on_fw_addr_selftest_result_addr 0x9B + #define on_fw_data_selftest_request 0x06 + #define on_fw_data_thx_avg_mul_dc_lsb 0x22 + #define on_fw_data_thx_avg_mul_dc_msb 0x0B + #define on_fw_data_thx_mul_dc_up_low_bud 0x64 + #define on_fw_data_thx_avg_slf_dc_lsb 0x14 + #define on_fw_data_thx_avg_slf_dc_msb 0x05 + #define on_fw_data_thx_slf_dc_up_low_bud 0x64 + #define on_fw_data_thx_slf_bank_up 0x40 + #define on_fw_data_thx_slf_bank_low 0x00 + #define on_fw_data_idle_dis_pwd 0x40 + #define on_fw_data_idle_en_pwd 0x00 + #define on_fw_addr_fw_mode_status 0x99 + #define on_fw_addr_icid_addr 0x900000d0 + #define on_fw_addr_trigger_addr 0x10007089 + #define on_fw_addr_fw_ver_start 0x90 + #define on_fw_data_rawdata_ready_hb 0xa3 + #define on_fw_data_rawdata_ready_lb 0x3a + #define on_fw_addr_ahb_addr 0x11 + #define on_fw_data_ahb_dis 0x00 + #define on_fw_data_ahb_en 0x01 + #define on_fw_addr_event_addr 0x30 +#endif + +#if defined(CORE_FLASH) + #define flash_addr_ctrl_base 0x80000000 + #define flash_addr_spi200_trans_fmt (flash_addr_ctrl_base + 0x10) + #define flash_addr_spi200_trans_ctrl (flash_addr_ctrl_base + 0x20) + #define flash_addr_spi200_cmd (flash_addr_ctrl_base + 0x24) + #define flash_addr_spi200_addr (flash_addr_ctrl_base + 0x28) + #define flash_addr_spi200_data (flash_addr_ctrl_base + 0x2c) + #define flash_addr_spi200_bt_num (flash_addr_ctrl_base + 0xe8) + #define flash_data_spi200_trans_fmt 0x00020780 + #define flash_data_spi200_trans_ctrl_1 0x42000003 + #define flash_data_spi200_trans_ctrl_2 0x47000000 + #define flash_data_spi200_trans_ctrl_3 0x67000000 + #define flash_data_spi200_trans_ctrl_4 0x610ff000 + #define flash_data_spi200_trans_ctrl_5 0x694002ff + #define flash_data_spi200_cmd_1 0x00000005 + #define flash_data_spi200_cmd_2 0x00000006 + #define flash_data_spi200_cmd_3 0x000000C7 + #define flash_data_spi200_cmd_4 0x00000052 + #define flash_data_spi200_cmd_5 0x00000020 + #define flash_data_spi200_cmd_6 0x00000002 + #define flash_data_spi200_cmd_7 0x0000003b + #define flash_data_spi200_addr 0x00000000 + + #define on_flash_addr_ctrl_base 0x80000000 + #define on_flash_addr_ctrl_auto 0x80000004 + #define on_flash_data_main_erase 0x0000A50D + #define on_flash_data_auto 0xA5 + #define on_flash_data_main_read 0x03 + #define on_flash_data_page_write 0x05 + #define on_flash_data_spp_read 0x10 + #define on_flash_data_sfr_read 0x14 + #define on_flash_addr_ahb_ctrl 0x80000020 + #define on_flash_data_ahb_squit 0x00000001 + #define on_flash_addr_unlock_0 0x00000000 + #define on_flash_addr_unlock_4 0x00000004 + #define on_flash_addr_unlock_8 0x00000008 + #define on_flash_addr_unlock_c 0x0000000C + #define on_flash_data_cmd0 0x28178EA0 + #define on_flash_data_cmd1 0x0A0E03FF + #define on_flash_data_cmd2 0x8C203D0C + #define on_flash_data_cmd3 0x00300263 + #define on_flash_data_lock 0x03400000 +#endif + +#if defined(CORE_SRAM) + #define sram_adr_mkey 0x100070E8 + #define sram_adr_rawdata_addr 0x10000000 + #define sram_adr_rawdata_end 0x00000000 + #define sram_cmd_conti 0x44332211 + #define sram_cmd_fin 0x00000000 + #define sram_passwrd_start 0x5AA5 + #define sram_passwrd_end 0xA55A + + #define on_sram_adr_rawdata_addr 0x080002E0 + #define on_sram_adr_rawdata_end 0x00000000 + #define on_sram_cmd_conti 0x44332211 + #define on_sram_cmd_fin 0x00000000 + #define on_sram_passwrd_start 0x5AA5 + #define on_sram_passwrd_end 0xA55A +#endif + +#if defined(CORE_DRIVER) + #define driver_addr_fw_define_flash_reload 0x10007f00 + #define driver_addr_fw_define_2nd_flash_reload 0x100072c0 + #define driver_data_fw_define_flash_reload_dis 0x0000a55a + #define driver_data_fw_define_flash_reload_en 0x00000000 + #define driver_addr_fw_define_int_is_edge 0x10007089 + #define driver_addr_fw_define_rxnum_txnum_maxpt 0x100070f4 + #define driver_data_fw_define_rxnum_txnum_maxpt_sorting 0x00000008 + #define driver_data_fw_define_rxnum_txnum_maxpt_normal 0x00000014 + #define driver_addr_fw_define_xy_res_enable 0x100070fa + #define driver_addr_fw_define_x_y_res 0x100070fc + + #define on_driver_addr_fw_define_int_is_edge 0x10007089 + #define on_driver_addr_fw_rx_tx_maxpt_num 0x0800001C + #define on_driver_addr_fw_xy_rev_int_edge 0x0800000C + #define on_driver_addr_fw_define_x_y_res 0x08000030 +#endif + +#if defined(HX_ZERO_FLASH) + #define zf_addr_dis_flash_reload 0x10007f00 + #define zf_data_dis_flash_reload 0x00009AA9 + #define zf_addr_system_reset 0x90000018 + #define zf_data_system_reset 0x00000055 + #define zf_data_sram_start_addr 0x08000000 + #define zf_data_sram_clean 0x10000000 + #define zf_data_cfg_info 0x10007000 + #define zf_data_fw_cfg 0x10007084 + #define zf_data_adc_cfg_1 0x10007800 + #define zf_data_adc_cfg_2 0x10007978 + #define zf_data_adc_cfg_3 0x10007AF0 +#endif + +struct ic_operation { + uint8_t addr_ahb_addr_byte_0[1]; + uint8_t addr_ahb_rdata_byte_0[1]; + uint8_t addr_ahb_access_direction[1]; + uint8_t addr_conti[1]; + uint8_t addr_incr4[1]; + uint8_t adr_i2c_psw_lb[1]; + uint8_t adr_i2c_psw_ub[1]; + uint8_t data_ahb_access_direction_read[1]; + uint8_t data_conti[1]; + uint8_t data_incr4[1]; + uint8_t data_i2c_psw_lb[1]; + uint8_t data_i2c_psw_ub[1]; + uint8_t addr_tcon_on_rst[4]; + uint8_t addr_adc_on_rst[4]; + uint8_t addr_psl[4]; + uint8_t addr_cs_central_state[4]; + uint8_t data_rst[4]; +}; + +struct fw_operation { + uint8_t addr_system_reset[4]; + uint8_t addr_safe_mode_release_pw[4]; + uint8_t addr_ctrl_fw_isr[4]; + uint8_t addr_flag_reset_event[4]; + uint8_t addr_hsen_enable[4]; + uint8_t addr_smwp_enable[4]; + uint8_t addr_program_reload_from[4]; + uint8_t addr_program_reload_to[4]; + uint8_t addr_program_reload_page_write[4]; + uint8_t addr_raw_out_sel[4]; + uint8_t addr_reload_status[4]; + uint8_t addr_reload_crc32_result[4]; + uint8_t addr_reload_addr_from[4]; + uint8_t addr_reload_addr_cmd_beat[4]; + uint8_t addr_selftest_addr_en[4]; + uint8_t addr_criteria_addr[4]; + uint8_t addr_set_frame_addr[4]; + uint8_t addr_selftest_result_addr[4]; + uint8_t addr_sorting_mode_en[4]; + uint8_t addr_fw_mode_status[4]; + uint8_t addr_icid_addr[4]; + uint8_t addr_trigger_addr[4]; + uint8_t addr_fw_ver_addr[4]; + uint8_t addr_fw_cfg_addr[4]; + uint8_t addr_fw_vendor_addr[4]; + uint8_t addr_fw_state_addr[4]; + uint8_t addr_fw_dbg_msg_addr[4]; + uint8_t addr_chk_fw_status[4]; + uint8_t addr_dd_handshak_addr[4]; + uint8_t addr_dd_data_addr[4]; + uint8_t data_system_reset[4]; + uint8_t data_safe_mode_release_pw_active[4]; + uint8_t data_safe_mode_release_pw_reset[4]; + uint8_t data_clear[4]; + uint8_t data_program_reload_start[4]; + uint8_t data_program_reload_compare[4]; + uint8_t data_program_reload_break[4]; + uint8_t data_selftest_request[4]; + uint8_t data_criteria_aa_top[1]; + uint8_t data_criteria_aa_bot[1]; + uint8_t data_criteria_key_top[1]; + uint8_t data_criteria_key_bot[1]; + uint8_t data_criteria_avg_top[1]; + uint8_t data_criteria_avg_bot[1]; + uint8_t data_set_frame[4]; + uint8_t data_selftest_ack_hb[1]; + uint8_t data_selftest_ack_lb[1]; + uint8_t data_selftest_pass[1]; + uint8_t data_normal_cmd[1]; + uint8_t data_normal_status[1]; + uint8_t data_sorting_cmd[1]; + uint8_t data_sorting_status[1]; + uint8_t data_dd_request[1]; + uint8_t data_dd_ack[1]; + uint8_t data_idle_dis_pwd[1]; + uint8_t data_idle_en_pwd[1]; + uint8_t data_rawdata_ready_hb[1]; + uint8_t data_rawdata_ready_lb[1]; + uint8_t addr_ahb_addr[1]; + uint8_t data_ahb_dis[1]; + uint8_t data_ahb_en[1]; + uint8_t addr_event_addr[1]; + uint8_t addr_usb_detect[4]; +}; + +struct flash_operation { + uint8_t addr_spi200_trans_fmt[4]; + uint8_t addr_spi200_trans_ctrl[4]; + uint8_t addr_spi200_cmd[4]; + uint8_t addr_spi200_addr[4]; + uint8_t addr_spi200_data[4]; + uint8_t addr_spi200_bt_num[4]; + + uint8_t data_spi200_trans_fmt[4]; + uint8_t data_spi200_trans_ctrl_1[4]; + uint8_t data_spi200_trans_ctrl_2[4]; + uint8_t data_spi200_trans_ctrl_3[4]; + uint8_t data_spi200_trans_ctrl_4[4]; + uint8_t data_spi200_trans_ctrl_5[4]; + uint8_t data_spi200_cmd_1[4]; + uint8_t data_spi200_cmd_2[4]; + uint8_t data_spi200_cmd_3[4]; + uint8_t data_spi200_cmd_4[4]; + uint8_t data_spi200_cmd_5[4]; + uint8_t data_spi200_cmd_6[4]; + uint8_t data_spi200_cmd_7[4]; + uint8_t data_spi200_addr[4]; +}; + +struct sram_operation { + uint8_t addr_mkey[4]; + uint8_t addr_rawdata_addr[4]; + uint8_t addr_rawdata_end[4]; + uint8_t data_conti[4]; + uint8_t data_fin[4]; + uint8_t passwrd_start[2]; + uint8_t passwrd_end[2]; +}; + +struct driver_operation { + uint8_t addr_fw_define_flash_reload[4]; + uint8_t addr_fw_define_2nd_flash_reload[4]; + uint8_t addr_fw_define_int_is_edge[4]; + uint8_t addr_fw_define_rxnum_txnum_maxpt[4]; + uint8_t addr_fw_define_xy_res_enable[4]; + uint8_t addr_fw_define_x_y_res[4]; + uint8_t data_fw_define_flash_reload_dis[4]; + uint8_t data_fw_define_flash_reload_en[4]; + uint8_t data_fw_define_rxnum_txnum_maxpt_sorting[4]; + uint8_t data_fw_define_rxnum_txnum_maxpt_normal[4]; +}; + +struct zf_operation { + uint8_t addr_dis_flash_reload[4]; + uint8_t data_dis_flash_reload[4]; + uint8_t addr_system_reset[4]; + uint8_t data_system_reset[4]; + uint8_t data_sram_start_addr[4]; + uint8_t data_sram_clean[4]; + uint8_t data_cfg_info[4]; + uint8_t data_fw_cfg[4]; + uint8_t data_adc_cfg_1[4]; + uint8_t data_adc_cfg_2[4]; + uint8_t data_adc_cfg_3[4]; +}; + +struct himax_core_command_operation { + struct ic_operation *ic_op; + struct fw_operation *fw_op; + struct flash_operation *flash_op; + struct sram_operation *sram_op; + struct driver_operation *driver_op; + struct zf_operation *zf_op; +}; + +struct on_ic_operation { + uint8_t addr_ahb_addr_byte_0[1]; + uint8_t addr_ahb_rdata_byte_0[1]; + uint8_t addr_ahb_access_direction[1]; + uint8_t addr_conti[1]; + uint8_t addr_incr4[1]; + uint8_t adr_mcu_ctrl[1]; + uint8_t data_ahb_access_direction_read[1]; + uint8_t data_conti[1]; + uint8_t data_incr4[1]; + uint8_t cmd_mcu_on[1]; + uint8_t cmd_mcu_off[1]; + uint8_t adr_sleep_ctrl[1]; + uint8_t cmd_sleep_in[1]; + uint8_t adr_tcon_ctrl[4]; + uint8_t cmd_tcon_on[4]; + uint8_t adr_wdg_ctrl[4]; + uint8_t cmd_wdg_psw[4]; + uint8_t adr_wdg_cnt_ctrl[4]; + uint8_t cmd_wdg_cnt_clr[4]; +}; + +struct on_fw_operation { + uint8_t addr_smwp_enable[1]; + uint8_t addr_program_reload_from[4]; + uint8_t addr_raw_out_sel[1]; + uint8_t addr_flash_checksum[4]; + uint8_t data_flash_checksum[4]; + uint8_t addr_crc_value[4]; + uint8_t addr_reload_status[4]; + uint8_t addr_reload_crc32_result[4]; + uint8_t addr_reload_addr_from[4]; + uint8_t addr_reload_addr_cmd_beat[4]; + uint8_t addr_set_frame_addr[4]; + uint8_t addr_fw_mode_status[1]; + uint8_t addr_icid_addr[4]; + uint8_t addr_trigger_addr[4]; + uint8_t addr_fw_ver_start[1]; + uint8_t data_safe_mode_release_pw_active[4]; + uint8_t data_safe_mode_release_pw_reset[4]; + uint8_t data_clear[4]; + uint8_t addr_criteria_addr[1]; + uint8_t data_selftest_pass[1]; + uint8_t addr_reK_crtl[4]; + uint8_t data_reK_en[1]; + uint8_t data_reK_dis[1]; + uint8_t data_rst_init[1]; + uint8_t data_dc_set[1]; + uint8_t data_bank_set[1]; + uint8_t addr_selftest_addr_en[1]; + uint8_t addr_selftest_result_addr[1]; + uint8_t data_selftest_request[1]; + uint8_t data_thx_avg_mul_dc_lsb[1]; + uint8_t data_thx_avg_mul_dc_msb[1]; + uint8_t data_thx_mul_dc_up_low_bud[1]; + uint8_t data_thx_avg_slf_dc_lsb[1]; + uint8_t data_thx_avg_slf_dc_msb[1]; + uint8_t data_thx_slf_dc_up_low_bud[1]; + uint8_t data_thx_slf_bank_up[1]; + uint8_t data_thx_slf_bank_low[1]; + uint8_t data_idle_dis_pwd[1]; + uint8_t data_idle_en_pwd[1]; + uint8_t data_rawdata_ready_hb[1]; + uint8_t data_rawdata_ready_lb[1]; + uint8_t addr_ahb_addr[1]; + uint8_t data_ahb_dis[1]; + uint8_t data_ahb_en[1]; + uint8_t addr_event_addr[1]; + uint8_t addr_usb_detect[1]; +}; + +struct on_flash_operation { + uint8_t addr_ctrl_base[4]; + uint8_t addr_ctrl_auto[4]; + uint8_t data_main_erase[4]; + uint8_t data_auto[1]; + uint8_t data_main_read[1]; + uint8_t data_page_write[1]; + uint8_t data_sfr_read[1]; + uint8_t data_spp_read[1]; + uint8_t addr_ahb_ctrl[4]; + uint8_t data_ahb_squit[4]; + + uint8_t addr_unlock_0[4]; + uint8_t addr_unlock_4[4]; + uint8_t addr_unlock_8[4]; + uint8_t addr_unlock_c[4]; + uint8_t data_cmd0[4]; + uint8_t data_cmd1[4]; + uint8_t data_cmd2[4]; + uint8_t data_cmd3[4]; + uint8_t data_lock[4]; +}; + +struct on_sram_operation { + uint8_t addr_rawdata_addr[4]; + uint8_t addr_rawdata_end[4]; + uint8_t data_conti[4]; + uint8_t data_fin[4]; + uint8_t passwrd_start[2]; + uint8_t passwrd_end[2]; +}; + +struct on_driver_operation { + uint8_t addr_fw_define_int_is_edge[4]; + uint8_t addr_fw_rx_tx_maxpt_num[4]; + uint8_t addr_fw_xy_rev_int_edge[4]; + uint8_t addr_fw_define_x_y_res[4]; + uint8_t data_fw_define_rxnum_txnum_maxpt_sorting[4]; + uint8_t data_fw_define_rxnum_txnum_maxpt_normal[4]; +}; + +struct himax_on_core_command_operation { + struct on_ic_operation *ic_op; + struct on_fw_operation *fw_op; + struct on_flash_operation *flash_op; + struct on_sram_operation *sram_op; + struct on_driver_operation *driver_op; +}; + +struct himax_core_fp { +#ifdef CORE_IC + void (*fp_burst_enable)(uint8_t auto_add_4_byte); + int (*fp_register_read)(uint8_t *read_addr, uint32_t read_length, uint8_t *read_data, uint8_t cfg_flag); + int (*fp_flash_write_burst)(uint8_t *reg_byte, uint8_t *write_data); + void (*fp_flash_write_burst_length)(uint8_t *reg_byte, uint8_t *write_data, uint32_t length); + void (*fp_register_write)(uint8_t *write_addr, uint32_t write_length, uint8_t *write_data, uint8_t cfg_flag); + void (*fp_interface_on)(void); + void (*fp_sense_on)(uint8_t FlashMode); + void (*fp_tcon_on)(void); + bool (*fp_watch_dog_off)(void); + bool (*fp_sense_off)(void); + void (*fp_sleep_in)(void); + bool (*fp_wait_wip)(int Timing); + void (*fp_init_psl)(void); + void (*fp_resume_ic_action)(void); + void (*fp_suspend_ic_action)(void); + void (*fp_power_on_init)(void); +#endif + +#ifdef CORE_FW + void (*fp_parse_raw_data)(struct himax_report_data *hx_touch_data, int mul_num, int self_num, uint8_t diag_cmd, int16_t *mutual_data, int16_t *self_data); + void (*fp_system_reset)(void); + bool (*fp_Calculate_CRC_with_AP)(unsigned char *FW_content, int CRC_from_FW, int mode); + uint32_t (*fp_check_CRC)(uint8_t *start_addr, int reload_length); + void (*fp_set_reload_cmd)(uint8_t *write_data, int idx, uint32_t cmd_from, uint32_t cmd_to, uint32_t cmd_beat); + bool (*fp_program_reload)(void); + void (*fp_set_SMWP_enable)(uint8_t SMWP_enable, bool suspended); + void (*fp_set_HSEN_enable)(uint8_t HSEN_enable, bool suspended); + void (*fp_diag_register_set)(uint8_t diag_command, uint8_t storage_type); +#ifdef HX_TP_SELF_TEST_DRIVER + void (*fp_control_reK)(bool enable); +#endif + int (*fp_chip_self_test)(void); + void (*fp_idle_mode)(int disable); + void (*fp_reload_disable)(int disable); + bool (*fp_check_chip_version)(void); + int (*fp_read_ic_trigger_type)(void); + int (*fp_read_i2c_status)(void); + void (*fp_read_FW_ver)(void); + bool (*fp_read_event_stack)(uint8_t *buf, uint8_t length); + void (*fp_return_event_stack)(void); + bool (*fp_calculateChecksum)(bool change_iref); + int (*fp_read_FW_status)(uint8_t *state_addr, uint8_t *tmp_addr); + void (*fp_irq_switch)(int switch_on); + int (*fp_assign_sorting_mode)(uint8_t *tmp_data); + int (*fp_check_sorting_mode)(uint8_t *tmp_data); + int (*fp_switch_mode)(int mode); + uint8_t (*fp_read_DD_status)(uint8_t *cmd_set, uint8_t *tmp_data); +#endif + +#ifdef CORE_FLASH + void (*fp_chip_erase)(void); + bool (*fp_block_erase)(int start_addr, int length); + bool (*fp_sector_erase)(int start_addr); + void (*fp_flash_programming)(uint8_t *FW_content, int FW_Size); + void (*fp_flash_page_write)(uint8_t *write_addr, int length, uint8_t *write_data); + int (*fp_fts_ctpm_fw_upgrade_with_sys_fs_32k)(unsigned char *fw, int len, bool change_iref); + int (*fp_fts_ctpm_fw_upgrade_with_sys_fs_60k)(unsigned char *fw, int len, bool change_iref); + int (*fp_fts_ctpm_fw_upgrade_with_sys_fs_64k)(unsigned char *fw, int len, bool change_iref); + int (*fp_fts_ctpm_fw_upgrade_with_sys_fs_124k)(unsigned char *fw, int len, bool change_iref); + int (*fp_fts_ctpm_fw_upgrade_with_sys_fs_128k)(unsigned char *fw, int len, bool change_iref); + void (*fp_flash_dump_func)(uint8_t local_flash_command, int Flash_Size, uint8_t *flash_buffer); + bool (*fp_flash_lastdata_check)(void); + bool (*fp_ahb_squit)(void); + void (*fp_flash_read)(uint8_t *r_data, int start_addr, int length); + bool (*fp_sfr_rw)(uint8_t *addr, int length, uint8_t *data, uint8_t rw_ctrl); + bool (*fp_lock_flash)(void); + bool (*fp_unlock_flash)(void); + void (*fp_init_auto_func)(void); +#endif + +#ifdef CORE_SRAM + void (*fp_sram_write)(uint8_t *FW_content); + bool (*fp_sram_verify)(uint8_t *FW_File, int FW_Size); + void (*fp_get_DSRAM_data)(uint8_t *info_data, bool DSRAM_Flag); +#endif + +#ifdef CORE_DRIVER + bool (*fp_chip_detect)(void); + void (*fp_chip_init)(void); + void (*fp_pin_reset)(void); + void (*fp_touch_information)(void); + void (*fp_reload_config)(void); + int (*fp_get_touch_data_size)(void); + void (*fp_usb_detect_set)(uint8_t *cable_config); + int (*fp_hand_shaking)(void); + int (*fp_determin_diag_rawdata)(int diag_command); + int (*fp_determin_diag_storage)(int diag_command); + int (*fp_cal_data_len)(int raw_cnt_rmd, int HX_MAX_PT, int raw_cnt_max); + bool (*fp_diag_check_sum)(struct himax_report_data *hx_touch_data); + void (*fp_diag_parse_raw_data)(struct himax_report_data *hx_touch_data, int mul_num, int self_num, uint8_t diag_cmd, int32_t *mutual_data, int32_t *self_data); + void (*fp_ic_reset)(uint8_t loadconfig, uint8_t int_off); + int (*fp_ic_esd_recovery)(int hx_esd_event, int hx_zero_event, int length); + void (*fp_esd_ic_reset)(void); + void (*fp_resend_cmd_func)(bool suspended); +#endif +#ifdef HX_ZERO_FLASH + void (*fp_sys_reset)(void); + void (*fp_clean_sram_0f)(uint8_t *addr, int write_len, int type); + void (*fp_write_sram_0f)(const struct firmware *fw_entry, uint8_t *addr, int start_index, uint32_t write_len); + void (*fp_firmware_update_0f)(const struct firmware *fw_entry); + void (*fp_0f_operation)(struct work_struct *work); +#ifdef HX_0F_DEBUG + void (*fp_read_sram_0f)(const struct firmware *fw_entry, uint8_t *addr, int start_index, int read_len); + void (*fp_read_all_sram)(uint8_t *addr, int read_len); + void (*fp_firmware_read_0f)(const struct firmware *fw_entry, int type); + void (*fp_0f_operation_check)(int type); +#endif +#endif +}; + +#ifdef HX_ESD_RECOVERY +extern int g_zero_event_count; +#endif + +extern struct ic_operation *pic_op; +extern struct fw_operation *pfw_op; +#ifdef HX_ZERO_FLASH +extern struct zf_operation *pzf_op; +#endif + diff --git a/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c b/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c new file mode 100644 index 0000000000000000000000000000000000000000..a95fb26a3bc25d30d561121bc5eae6d29cfb6215 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_ic_incell_core.c @@ -0,0 +1,2727 @@ +/* + * Himax Android Driver Sample Code for IC Core + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "himax_ic_core.h" + +struct himax_core_command_operation *g_core_cmd_op; +struct ic_operation *pic_op; +struct fw_operation *pfw_op; +struct flash_operation *pflash_op; +struct sram_operation *psram_op; +struct driver_operation *pdriver_op; +#ifdef HX_ZERO_FLASH +struct zf_operation *pzf_op; +#endif + +#ifdef CORE_IC +/* IC side start */ +static void himax_mcu_burst_enable(uint8_t auto_add_4_byte) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + /* I("%s,Entering\n",__func__); */ + tmp_data[0] = pic_op->data_conti[0]; + + if (himax_bus_write(pic_op->addr_conti[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + + tmp_data[0] = (pic_op->data_incr4[0] | auto_add_4_byte); + + if (himax_bus_write(pic_op->addr_incr4[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } +} + +static int himax_mcu_register_read(uint8_t *read_addr, uint32_t read_length, uint8_t *read_data, uint8_t cfg_flag) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + int i = 0; + int address = 0; + + /* I("%s,Entering\n",__func__); */ + + if (cfg_flag == false) { + if (read_length > FLASH_RW_MAX_LEN) { + E("%s: read len over %d!\n", __func__, FLASH_RW_MAX_LEN); + return LENGTH_FAIL; + } + + if (read_length > FOUR_BYTE_DATA_SZ) + g_core_fp.fp_burst_enable(1); + else + g_core_fp.fp_burst_enable(0); + + address = (read_addr[3] << 24) + (read_addr[2] << 16) + (read_addr[1] << 8) + read_addr[0]; + i = address; + tmp_data[0] = (uint8_t)i; + tmp_data[1] = (uint8_t)(i >> 8); + tmp_data[2] = (uint8_t)(i >> 16); + tmp_data[3] = (uint8_t)(i >> 24); + + if (himax_bus_write(pic_op->addr_ahb_addr_byte_0[0], tmp_data, FOUR_BYTE_DATA_SZ, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return I2C_FAIL; + } + + tmp_data[0] = pic_op->data_ahb_access_direction_read[0]; + + if (himax_bus_write(pic_op->addr_ahb_access_direction[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return I2C_FAIL; + } + + if (himax_bus_read(pic_op->addr_ahb_rdata_byte_0[0], read_data, read_length, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return I2C_FAIL; + } + + if (read_length > FOUR_BYTE_DATA_SZ) + g_core_fp.fp_burst_enable(0); + + } else { + if (himax_bus_read(read_addr[0], read_data, read_length, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return I2C_FAIL; + } + } + return NO_ERR; +} + +static int himax_mcu_flash_write_burst(uint8_t *reg_byte, uint8_t *write_data) +{ + uint8_t data_byte[FLASH_WRITE_BURST_SZ]; + int i = 0, j = 0; + int data_byte_sz = sizeof(data_byte); + + for (i = 0; i < FOUR_BYTE_ADDR_SZ; i++) + data_byte[i] = reg_byte[i]; + + for (j = FOUR_BYTE_ADDR_SZ; j < data_byte_sz; j++) + data_byte[j] = write_data[j - FOUR_BYTE_ADDR_SZ]; + + if (himax_bus_write(pic_op->addr_ahb_addr_byte_0[0], data_byte, data_byte_sz, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return I2C_FAIL; + } + return NO_ERR; +} + +static void himax_mcu_flash_write_burst_length(uint8_t *reg_byte, uint8_t *write_data, uint32_t length) +{ + uint8_t *data_byte; + int i = 0, j = 0; + + /* + * if (length + FOUR_BYTE_ADDR_SZ > FLASH_RW_MAX_LEN) { + * E("%s: write len over %d!\n", __func__, FLASH_RW_MAX_LEN); + * return; + * } + */ + + data_byte = kcalloc((length + 4), sizeof(uint8_t), GFP_KERNEL); + if (!data_byte) { + E("%s: allocate memory failed!\n", __func__); + return; + } + + for (i = 0; i < FOUR_BYTE_ADDR_SZ; i++) + data_byte[i] = reg_byte[i]; + + for (j = FOUR_BYTE_ADDR_SZ; j < length + FOUR_BYTE_ADDR_SZ; j++) + data_byte[j] = write_data[j - FOUR_BYTE_ADDR_SZ]; + + if (himax_bus_write(pic_op->addr_ahb_addr_byte_0[0], data_byte, length + FOUR_BYTE_ADDR_SZ, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + kfree(data_byte); + return; + } + kfree(data_byte); +} + +static void himax_mcu_register_write(uint8_t *write_addr, uint32_t write_length, uint8_t *write_data, uint8_t cfg_flag) +{ + int i, address; + + /* I("%s,Entering\n", __func__); */ + if (cfg_flag == false) { + address = (write_addr[3] << 24) + (write_addr[2] << 16) + (write_addr[1] << 8) + write_addr[0]; + + for (i = address; i < address + write_length; i++) { + if (write_length > FOUR_BYTE_DATA_SZ) + g_core_fp.fp_burst_enable(1); + else + g_core_fp.fp_burst_enable(0); + + g_core_fp.fp_flash_write_burst_length(write_addr, write_data, write_length); + } + } else if (cfg_flag == true) { + if (himax_bus_write(write_addr[0], write_data, write_length, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + } else { + E("%s: cfg_flag = %d, value is wrong!\n", __func__, cfg_flag); + return; + } +} + +static int himax_write_read_reg(uint8_t *tmp_addr, uint8_t *tmp_data, uint8_t hb, uint8_t lb) +{ + int cnt = 0; + + do { + g_core_fp.fp_flash_write_burst(tmp_addr, tmp_data); + msleep(20); + g_core_fp.fp_register_read(tmp_addr, 4, tmp_data, 0); + /* + * I("%s:Now tmp_data[0]=0x%02X,[1]=0x%02X,[2]=0x%02X,[3]=0x%02X\n", + * __func__, tmp_data[0], tmp_data[1], tmp_data[2], tmp_data[3]); + */ + } while ((tmp_data[1] != hb && tmp_data[0] != lb) && cnt++ < 100); + + if (cnt == 99) + return HX_RW_REG_FAIL; + + I("Now register 0x%08X : high byte=0x%02X,low byte=0x%02X\n", tmp_addr[3], tmp_data[1], tmp_data[0]); + return NO_ERR; +} + +static void himax_mcu_interface_on(void) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t tmp_data2[FOUR_BYTE_DATA_SZ]; + int cnt = 0; + + /* Read a dummy register to wake up I2C.*/ + if (himax_bus_read(pic_op->addr_ahb_rdata_byte_0[0], tmp_data, FOUR_BYTE_DATA_SZ, HIMAX_I2C_RETRY_TIMES) < 0) {/* to knock I2C*/ + E("%s: i2c access fail!\n", __func__); + return; + } + + do { + /* + *=========================================== + * Enable continuous burst mode : 0x13 ==> 0x31 + *=========================================== + */ + tmp_data[0] = pic_op->data_conti[0]; + + if (himax_bus_write(pic_op->addr_conti[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + + /* + *=========================================== + * AHB address auto +4 : 0x0D ==> 0x11 + * Do not AHB address auto +4 : 0x0D ==> 0x10 + *=========================================== + */ + tmp_data[0] = pic_op->data_incr4[0]; + + if (himax_bus_write(pic_op->addr_incr4[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + + /* Check cmd */ + if (himax_bus_read(pic_op->addr_conti[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail: addr_conti!\n", __func__); + return; + } + if (himax_bus_read(pic_op->addr_incr4[0], tmp_data2, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail: addr_incr4!\n", __func__); + return; + } + if (tmp_data[0] == pic_op->data_conti[0] && tmp_data2[0] == pic_op->data_incr4[0]) + break; + + msleep(20); + } while (++cnt < 10); + + if (cnt > 0) + I("%s:Polling burst mode: %d times", __func__, cnt); + +} + +static bool himax_mcu_wait_wip(int Timing) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + int retry_cnt = 0; + + /* + *===================================== + * SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_fmt, pflash_op->data_spi200_trans_fmt); + tmp_data[0] = 0x01; + + do { + /* + *===================================== + * SPI Transfer Control : 0x8000_0020 ==> 0x4200_0003 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_1); + /* + *===================================== + * SPI Command : 0x8000_0024 ==> 0x0000_0005 + * read 0x8000_002C for 0x01, means wait success + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_1); + tmp_data[0] = tmp_data[1] = tmp_data[2] = tmp_data[3] = 0xFF; + g_core_fp.fp_register_read(pflash_op->addr_spi200_data, 4, tmp_data, 0); + + if ((tmp_data[0] & 0x01) == 0x00) + return true; + + retry_cnt++; + + if (tmp_data[0] != 0x00 || tmp_data[1] != 0x00 || tmp_data[2] != 0x00 || tmp_data[3] != 0x00) + I("%s:Wait wip retry_cnt:%d, buffer[0]=%d, buffer[1]=%d, buffer[2]=%d, buffer[3]=%d\n", + __func__, retry_cnt, tmp_data[0], tmp_data[1], tmp_data[2], tmp_data[3]); + + if (retry_cnt > 100) { + E("%s: Wait wip error!\n", __func__); + return false; + } + + msleep(Timing); + } while ((tmp_data[0] & 0x01) == 0x01); + + return true; +} + +static void himax_mcu_sense_on(uint8_t FlashMode) +{ + /* uint8_t tmp_addr[FOUR_BYTE_ADDR_SZ]; */ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + int retry = 0; + + I("Enter %s\n", __func__); + g_core_fp.fp_interface_on(); + g_core_fp.fp_register_write(pfw_op->addr_ctrl_fw_isr, + sizeof(pfw_op->data_clear), pfw_op->data_clear, false); + msleep(20); + + if (!FlashMode) { +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#else + /* ===AHBI2C_SystemReset========== */ + g_core_fp.fp_system_reset(); +#endif + } else { + do { + /* + * tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x98; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x53; + */ + g_core_fp.fp_register_write(pfw_op->addr_safe_mode_release_pw, + sizeof(pfw_op->data_safe_mode_release_pw_active), pfw_op->data_safe_mode_release_pw_active, false); + /* tmp_addr[0] = 0xE4; */ + g_core_fp.fp_register_read(pfw_op->addr_flag_reset_event, FOUR_BYTE_DATA_SZ, tmp_data, 0); + I("%s:Read status from IC = %X,%X\n", __func__, tmp_data[0], tmp_data[1]); + } while ((tmp_data[1] != 0x01 || tmp_data[0] != 0x00) && retry++ < 5); + + if (retry >= 5) { + E("%s: Fail:\n", __func__); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#else + /* ===AHBI2C_System Reset========== */ + g_core_fp.fp_system_reset(); +#endif + } else { + I("%s:OK and Read status from IC = %X,%X\n", __func__, tmp_data[0], tmp_data[1]); + /* reset code */ + tmp_data[0] = 0x00; + + if (himax_bus_write(pic_op->adr_i2c_psw_lb[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) + E("%s: i2c access fail!\n", __func__); + + if (himax_bus_write(pic_op->adr_i2c_psw_ub[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) + E("%s: i2c access fail!\n", __func__); + + /* + * tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x98; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00; + */ + g_core_fp.fp_register_write(pfw_op->addr_safe_mode_release_pw, + sizeof(pfw_op->data_safe_mode_release_pw_reset), pfw_op->data_safe_mode_release_pw_reset, false); + } + } +} + +static bool himax_mcu_sense_off(void) +{ + uint8_t cnt = 0; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + + do { + /* + *=========================================== + * I2C_password[7:0] set Enter safe mode : 0x31 ==> 0x27 + *=========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_lb[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_lb[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *=========================================== + * I2C_password[15:8] set Enter safe mode :0x32 ==> 0x95 + *=========================================== + */ + tmp_data[0] = pic_op->data_i2c_psw_ub[0]; + + if (himax_bus_write(pic_op->adr_i2c_psw_ub[0], tmp_data, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return false; + } + + /* + *====================== + * Check enter_save_mode + *====================== + */ + g_core_fp.fp_register_read(pic_op->addr_cs_central_state, FOUR_BYTE_ADDR_SZ, tmp_data, 0); + I("%s: Check enter_save_mode data[0]=%X\n", __func__, tmp_data[0]); + + if (tmp_data[0] == 0x0C) { + /* + *===================================== + * Reset TCON + *===================================== + */ + g_core_fp.fp_flash_write_burst(pic_op->addr_tcon_on_rst, pic_op->data_rst); + msleep(20); + tmp_data[3] = pic_op->data_rst[3]; + tmp_data[2] = pic_op->data_rst[2]; + tmp_data[1] = pic_op->data_rst[1]; + tmp_data[0] = pic_op->data_rst[0] | 0x01; + g_core_fp.fp_flash_write_burst(pic_op->addr_tcon_on_rst, tmp_data); + /* + *===================================== + * Reset ADC + *===================================== + */ + g_core_fp.fp_flash_write_burst(pic_op->addr_adc_on_rst, pic_op->data_rst); + msleep(20); + tmp_data[3] = pic_op->data_rst[3]; + tmp_data[2] = pic_op->data_rst[2]; + tmp_data[1] = pic_op->data_rst[1]; + tmp_data[0] = pic_op->data_rst[0] | 0x01; + g_core_fp.fp_flash_write_burst(pic_op->addr_adc_on_rst, tmp_data); + return true; + } + + msleep(20); +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#endif + + } while (cnt++ < 15); + + return false; +} + +static void himax_mcu_init_psl(void) /*power saving level*/ +{ + /* + *============================================================== + * SCU_Power_State_PW : 0x9000_00A0 ==> 0x0000_0000 (Reset PSL) + *============================================================== + */ + g_core_fp.fp_register_write(pic_op->addr_psl, sizeof(pic_op->data_rst), pic_op->data_rst, false); + I("%s: power saving level reset OK!\n", __func__); +} + +static void himax_mcu_resume_ic_action(void) +{ + /* Nothing to do */ +} + +static void himax_mcu_suspend_ic_action(void) +{ + /* Nothing to do */ +} + +static void himax_mcu_power_on_init(void) +{ + I("%s:\n", __func__); + g_core_fp.fp_touch_information(); + /* RawOut select initial */ + g_core_fp.fp_register_write(pfw_op->addr_raw_out_sel, sizeof(pfw_op->data_clear), pfw_op->data_clear, false); + /* DSRAM func initial */ + g_core_fp.fp_assign_sorting_mode(pfw_op->data_clear); + g_core_fp.fp_sense_on(0x00); +} + +/* IC side end */ +#endif + +#ifdef CORE_FW +/* FW side start */ +static void diag_mcu_parse_raw_data(struct himax_report_data *hx_touch_data, int mul_num, int self_num, uint8_t diag_cmd, int32_t *mutual_data, int32_t *self_data) +{ + int RawDataLen_word; + int index = 0; + int temp1, temp2, i; + + if (hx_touch_data->hx_rawdata_buf[0] == pfw_op->data_rawdata_ready_lb[0] + && hx_touch_data->hx_rawdata_buf[1] == pfw_op->data_rawdata_ready_hb[0] + && hx_touch_data->hx_rawdata_buf[2] > 0 + && hx_touch_data->hx_rawdata_buf[3] == diag_cmd) { + RawDataLen_word = hx_touch_data->rawdata_size / 2; + index = (hx_touch_data->hx_rawdata_buf[2] - 1) * RawDataLen_word; + + /* + * I("Header[%d]: %x, %x, %x, %x, mutual: %d, self: %d\n", index, buf[56], buf[57], buf[58], buf[59], mul_num, self_num); + * I("RawDataLen=%d , RawDataLen_word=%d , hx_touch_info_size=%d\n", RawDataLen, RawDataLen_word, hx_touch_info_size); + */ + for (i = 0; i < RawDataLen_word; i++) { + temp1 = index + i; + + if (temp1 < mul_num) /* mutual */ + mutual_data[index + i] = ((int8_t)hx_touch_data->hx_rawdata_buf[i * 2 + 4 + 1]) * 256 + hx_touch_data->hx_rawdata_buf[i * 2 + 4]; + + else { /* self */ + temp1 = i + index; + temp2 = self_num + mul_num; + + if (temp1 >= temp2) + break; + + self_data[i + index - mul_num] = (((int8_t)hx_touch_data->hx_rawdata_buf[i * 2 + 4 + 1]) << 8) + + hx_touch_data->hx_rawdata_buf[i * 2 + 4]; + } + } + } +} + +static void himax_mcu_system_reset(void) +{ + /* + * tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x18; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x55; + */ + g_core_fp.fp_register_write(pfw_op->addr_system_reset, sizeof(pfw_op->data_system_reset), pfw_op->data_system_reset, false); +} + +static bool himax_mcu_Calculate_CRC_with_AP(unsigned char *FW_content, int CRC_from_FW, int mode) +{ + return true; +} + +static uint32_t himax_mcu_check_CRC(uint8_t *start_addr, int reload_length) +{ + uint32_t result = 0; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + int cnt = 0, ret = 0; + int length = reload_length / FOUR_BYTE_DATA_SZ; + /* + * CRC4 // 0x8005_0020 <= from, 0x8005_0028 <= 0x0099_length + * tmp_addr[3] = 0x80; tmp_addr[2] = 0x05; tmp_addr[1] = 0x00; tmp_addr[0] = 0x20; + */ + ret = g_core_fp.fp_flash_write_burst(pfw_op->addr_reload_addr_from, start_addr); + if (ret < NO_ERR) { + E("%s: i2c access fail!\n", __func__); + return HW_CRC_FAIL; + } + /* tmp_addr[3] = 0x80; tmp_addr[2] = 0x05; tmp_addr[1] = 0x00; tmp_addr[0] = 0x28; */ + tmp_data[3] = 0x00; tmp_data[2] = 0x99; tmp_data[1] = (length >> 8); tmp_data[0] = length; + ret = g_core_fp.fp_flash_write_burst(pfw_op->addr_reload_addr_cmd_beat, tmp_data); + if (ret < NO_ERR) { + E("%s: i2c access fail!\n", __func__); + return HW_CRC_FAIL; + } + cnt = 0; + + do { + /* tmp_addr[3] = 0x80; tmp_addr[2] = 0x05; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; */ + ret = g_core_fp.fp_register_read(pfw_op->addr_reload_status, FOUR_BYTE_DATA_SZ, tmp_data, 0); + if (ret < NO_ERR) { + E("%s: i2c access fail!\n", __func__); + return HW_CRC_FAIL; + } + + if ((tmp_data[0] & 0x01) != 0x01) { + /* tmp_addr[3] = 0x80; tmp_addr[2] = 0x05; tmp_addr[1] = 0x00; tmp_addr[0] = 0x18; */ + ret = g_core_fp.fp_register_read(pfw_op->addr_reload_crc32_result, FOUR_BYTE_DATA_SZ, tmp_data, 0); + if (ret < NO_ERR) { + E("%s: i2c access fail!\n", __func__); + return HW_CRC_FAIL; + } + I("%s: tmp_data[3]=%X, tmp_data[2]=%X, tmp_data[1]=%X, tmp_data[0]=%X\n", __func__, tmp_data[3], tmp_data[2], tmp_data[1], tmp_data[0]); + result = ((tmp_data[3] << 24) + (tmp_data[2] << 16) + (tmp_data[1] << 8) + tmp_data[0]); + break; + } + } while (cnt++ < 100); + + return result; +} + +static void himax_mcu_set_reload_cmd(uint8_t *write_data, int idx, uint32_t cmd_from, uint32_t cmd_to, uint32_t cmd_beat) +{ + int index = idx * 12; + int i; + + for (i = 3; i >= 0; i--) { + write_data[index + i] = (cmd_from >> (8 * i)); + write_data[index + 4 + i] = (cmd_to >> (8 * i)); + write_data[index + 8 + i] = (cmd_beat >> (8 * i)); + } +} + +static bool himax_mcu_program_reload(void) +{ + return true; +} + +static void himax_mcu_set_SMWP_enable(uint8_t SMWP_enable, bool suspended) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t back_data[FOUR_BYTE_DATA_SZ]; + uint8_t retry_cnt = 0; + + do { + if (SMWP_enable) { + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_smwp_enable, tmp_data); + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, back_data, 4); + } else { + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_smwp_enable, tmp_data); + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, back_data, 4); + } + + g_core_fp.fp_register_read(pfw_op->addr_smwp_enable, FOUR_BYTE_DATA_SZ, tmp_data, 0); + /* I("%s: tmp_data[0]=%d, SMWP_enable=%d, retry_cnt=%d\n", __func__, tmp_data[0],SMWP_enable,retry_cnt); */ + retry_cnt++; + } while ((tmp_data[3] != back_data[3] || tmp_data[2] != back_data[2] || tmp_data[1] != back_data[1] || tmp_data[0] != back_data[0]) && retry_cnt < HIMAX_REG_RETRY_TIMES); +} + +static void himax_mcu_set_HSEN_enable(uint8_t HSEN_enable, bool suspended) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t back_data[FOUR_BYTE_DATA_SZ]; + uint8_t retry_cnt = 0; + + do { + if (HSEN_enable) { + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_hsen_enable, tmp_data); + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, back_data, 4); + } else { + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_hsen_enable, tmp_data); + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, back_data, 4); + } + + g_core_fp.fp_register_read(pfw_op->addr_hsen_enable, FOUR_BYTE_DATA_SZ, tmp_data, 0); + /* I("%s: tmp_data[0]=%d, HSEN_enable=%d, retry_cnt=%d\n", __func__, tmp_data[0],HSEN_enable,retry_cnt); */ + retry_cnt++; + } while ((tmp_data[3] != back_data[3] || tmp_data[2] != back_data[2] || tmp_data[1] != back_data[1] || tmp_data[0] != back_data[0]) && retry_cnt < HIMAX_REG_RETRY_TIMES); +} + +static void himax_mcu_usb_detect_set(uint8_t *cable_config) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t back_data[FOUR_BYTE_DATA_SZ]; + uint8_t retry_cnt = 0; + + do { + if (cable_config[1] == 0x01) { + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_usb_detect, tmp_data); + himax_in_parse_assign_cmd(fw_func_handshaking_pwd, back_data, 4); + I("%s: USB detect status IN!\n", __func__); + } else { + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, tmp_data, 4); + g_core_fp.fp_flash_write_burst(pfw_op->addr_usb_detect, tmp_data); + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, back_data, 4); + I("%s: USB detect status OUT!\n", __func__); + } + + g_core_fp.fp_register_read(pfw_op->addr_usb_detect, FOUR_BYTE_DATA_SZ, tmp_data, 0); + /* I("%s: tmp_data[0]=%d, USB detect=%d, retry_cnt=%d\n", __func__, tmp_data[0],cable_config[1] ,retry_cnt); */ + retry_cnt++; + } while ((tmp_data[3] != back_data[3] || tmp_data[2] != back_data[2] || tmp_data[1] != back_data[1] || tmp_data[0] != back_data[0]) && retry_cnt < HIMAX_REG_RETRY_TIMES); +} + +static void himax_mcu_diag_register_set(uint8_t diag_command, uint8_t storage_type) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + + if (diag_command > 0 && storage_type % 8 > 0) + tmp_data[0] = diag_command + 0x08; + else + tmp_data[0] = diag_command; + I("diag_command = %d, tmp_data[0] = %X\n", diag_command, tmp_data[0]); + g_core_fp.fp_interface_on(); + tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; + g_core_fp.fp_flash_write_burst(pfw_op->addr_raw_out_sel, tmp_data); + g_core_fp.fp_register_read(pfw_op->addr_raw_out_sel, FOUR_BYTE_DATA_SZ, tmp_data, 0); + I("%s: tmp_data[3]=0x%02X,tmp_data[2]=0x%02X,tmp_data[1]=0x%02X,tmp_data[0]=0x%02X!\n", + __func__, tmp_data[3], tmp_data[2], tmp_data[1], tmp_data[0]); +} + +static int himax_mcu_chip_self_test(void) +{ + uint8_t tmp_data[FLASH_WRITE_BURST_SZ]; + uint8_t self_test_info[20]; + int pf_value = 0x00; + uint8_t test_result_id = 0; + int i; + + memset(tmp_data, 0x00, sizeof(tmp_data)); + g_core_fp.fp_interface_on(); + g_core_fp.fp_sense_off(); + g_core_fp.fp_burst_enable(1); + /* 0x10007f18 -> 0x00006AA6 */ + g_core_fp.fp_flash_write_burst(pfw_op->addr_selftest_addr_en, pfw_op->data_selftest_request); + /* Set criteria 0x10007F1C [0,1]=aa/up,down=, [2-3]=key/up,down, [4-5]=avg/up,down */ + tmp_data[0] = pfw_op->data_criteria_aa_top[0]; + tmp_data[1] = pfw_op->data_criteria_aa_bot[0]; + tmp_data[2] = pfw_op->data_criteria_key_top[0]; + tmp_data[3] = pfw_op->data_criteria_key_bot[0]; + tmp_data[4] = pfw_op->data_criteria_avg_top[0]; + tmp_data[5] = pfw_op->data_criteria_avg_bot[0]; + tmp_data[6] = 0x00; + tmp_data[7] = 0x00; + g_core_fp.fp_flash_write_burst_length(pfw_op->addr_criteria_addr, tmp_data, FLASH_WRITE_BURST_SZ); + /* 0x10007294 -> 0x0000190 //SET IIR_MAX FRAMES */ + g_core_fp.fp_flash_write_burst(pfw_op->addr_set_frame_addr, pfw_op->data_set_frame); + /* Disable IDLE Mode */ + g_core_fp.fp_idle_mode(1); + /* 0x10007f00 -> 0x0000A55A //Disable Flash Reload */ + g_core_fp.fp_reload_disable(1); + /* start selftest // leave safe mode */ + g_core_fp.fp_sense_on(0x01); + + /* Hand shaking -> 0x10007f18 waiting 0xA66A */ + for (i = 0; i < 1000; i++) { + g_core_fp.fp_register_read(pfw_op->addr_selftest_addr_en, 4, tmp_data, 0); + I("%s: tmp_data[0] = 0x%02X,tmp_data[1] = 0x%02X,tmp_data[2] = 0x%02X,tmp_data[3] = 0x%02X, cnt=%d\n", + __func__, tmp_data[0], tmp_data[1], tmp_data[2], tmp_data[3], i); + msleep(20); + + if (tmp_data[1] == pfw_op->data_selftest_ack_hb[0] && tmp_data[0] == pfw_op->data_selftest_ack_lb[0]) { + I("%s Data ready goto moving data\n", __func__); + break; + } + } + + g_core_fp.fp_sense_off(); + msleep(20); + /* + *===================================== + * Read test result ID : 0x10007f24 ==> bit[2][1][0] = [key][AA][avg] => 0xF = PASS + *===================================== + */ + g_core_fp.fp_register_read(pfw_op->addr_selftest_result_addr, 20, self_test_info, 0); + test_result_id = self_test_info[0]; + I("%s: check test result, test_result_id=%x, test_result=%x\n", __func__ + , test_result_id, self_test_info[0]); + I("raw top 1 = %d\n", self_test_info[3] * 256 + self_test_info[2]); + I("raw top 2 = %d\n", self_test_info[5] * 256 + self_test_info[4]); + I("raw top 3 = %d\n", self_test_info[7] * 256 + self_test_info[6]); + I("raw last 1 = %d\n", self_test_info[9] * 256 + self_test_info[8]); + I("raw last 2 = %d\n", self_test_info[11] * 256 + self_test_info[10]); + I("raw last 3 = %d\n", self_test_info[13] * 256 + self_test_info[12]); + I("raw key 1 = %d\n", self_test_info[15] * 256 + self_test_info[14]); + I("raw key 2 = %d\n", self_test_info[17] * 256 + self_test_info[16]); + I("raw key 3 = %d\n", self_test_info[19] * 256 + self_test_info[18]); + + if (test_result_id == pfw_op->data_selftest_pass[0]) { + I("[Himax]: self-test pass\n"); + pf_value = 0x0; + } else { + E("[Himax]: self-test fail\n"); + /* + * E("[Himax]: bank_avg = %d, bank_max = %d,%d,%d, bank_min = %d,%d,%d, key = %d,%d,%d\n", + * tmp_data[1],tmp_data[2],tmp_data[3],tmp_data[4],tmp_data[5],tmp_data[6],tmp_data[7], + * tmp_data[8],tmp_data[9],tmp_data[10]); + */ + pf_value = 0x1; + } + + /* Enable IDLE Mode */ + g_core_fp.fp_idle_mode(0); + /* 0x10007f00 -> 0x00000000 //Enable Flash Reload //recovery */ + g_core_fp.fp_reload_disable(0); + g_core_fp.fp_sense_on(0x00); + msleep(120); + return pf_value; +} + +static void himax_mcu_idle_mode(int disable) +{ + int retry = 20; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t switch_cmd = 0x00; + + I("%s:entering\n", __func__); + + do { + I("%s,now %d times\n!", __func__, retry); + g_core_fp.fp_register_read(pfw_op->addr_fw_mode_status, FOUR_BYTE_DATA_SZ, tmp_data, 0); + + if (disable) + switch_cmd = pfw_op->data_idle_dis_pwd[0]; + else + switch_cmd = pfw_op->data_idle_en_pwd[0]; + + tmp_data[0] = switch_cmd; + g_core_fp.fp_flash_write_burst(pfw_op->addr_fw_mode_status, tmp_data); + g_core_fp.fp_register_read(pfw_op->addr_fw_mode_status, FOUR_BYTE_DATA_SZ, tmp_data, 0); + I("%s:After turn ON/OFF IDLE Mode [0] = 0x%02X,[1] = 0x%02X,[2] = 0x%02X,[3] = 0x%02X\n", + __func__, tmp_data[0], tmp_data[1], tmp_data[2], tmp_data[3]); + retry--; + msleep(20); + } while ((tmp_data[0] != switch_cmd) && retry > 0); + + I("%s: setting OK!\n", __func__); +} + +static void himax_mcu_reload_disable(int disable) +{ + I("%s:entering\n", __func__); + + if (disable) /* reload disable */ + g_core_fp.fp_flash_write_burst(pdriver_op->addr_fw_define_flash_reload, pdriver_op->data_fw_define_flash_reload_dis); + else /* reload enable */ + g_core_fp.fp_flash_write_burst(pdriver_op->addr_fw_define_flash_reload, pdriver_op->data_fw_define_flash_reload_en); + + I("%s: setting OK!\n", __func__); +} + +static bool himax_mcu_check_chip_version(void) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t ret_data = false; + int i = 0; + + for (i = 0; i < 5; i++) { + g_core_fp.fp_register_read(pfw_op->addr_icid_addr, FOUR_BYTE_DATA_SZ, tmp_data, 0); + I("%s:Read driver IC ID = %X,%X,%X\n", __func__, tmp_data[3], tmp_data[2], tmp_data[1]); + + if ((tmp_data[3] == 0x83) && (tmp_data[2] == 0x10) && (tmp_data[1] == 0x2a)) { + strlcpy(private_ts->chip_name, HX_83102A_SERIES_PWON, 30); + ret_data = true; + break; + } + + ret_data = false; + E("%s:Read driver ID register Fail:\n", __func__); + } + + return ret_data; +} + +static int himax_mcu_read_ic_trigger_type(void) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + int trigger_type = false; + + g_core_fp.fp_register_read(pfw_op->addr_trigger_addr, FOUR_BYTE_DATA_SZ, tmp_data, 0); + + if ((tmp_data[0] & 0x01) == 1) + trigger_type = true; + + return trigger_type; +} + +static int himax_mcu_read_i2c_status(void) +{ + return i2c_error_count; +} + +static void himax_mcu_read_FW_ver(void) +{ + uint8_t data[FOUR_BYTE_DATA_SZ]; + uint8_t data_2[FOUR_BYTE_DATA_SZ]; + int retry = 200; + int reload_status = 0; + + g_core_fp.fp_sense_on(0x00); + + while (reload_status == 0) { + /* cmd[3] = 0x10; cmd[2] = 0x00; cmd[1] = 0x7f; cmd[0] = 0x00; */ + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_flash_reload, FOUR_BYTE_DATA_SZ, data, 0); + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_2nd_flash_reload, FOUR_BYTE_DATA_SZ, data_2, 0); + + if ((data[1] == 0x3A && data[0] == 0xA3) + || (data_2[1] == 0x72 && data_2[0] == 0xC0)) { + I("reload OK!\n"); + reload_status = 1; + break; + } else if (retry == 0) { + E("reload 20 times! fail\n"); + ic_data->vendor_panel_ver = 0; + ic_data->vendor_fw_ver = 0; + ic_data->vendor_config_ver = 0; + ic_data->vendor_touch_cfg_ver = 0; + ic_data->vendor_display_cfg_ver = 0; + ic_data->vendor_cid_maj_ver = 0; + ic_data->vendor_cid_min_ver = 0; + return; + } + + retry--; + msleep(20); + if (retry % 10 == 0) + I("reload fail ,delay 10ms retry=%d\n", retry); + + } + + I("%s : data[0]=0x%2.2X,data[1]=0x%2.2X,data_2[0]=0x%2.2X,data_2[1]=0x%2.2X\n", __func__, data[0], data[1], data_2[0], data_2[1]); + I("reload_status=%d\n", reload_status); + /* + *===================================== + * Read FW version : 0x1000_7004 but 05,06 are the real addr for FW Version + *===================================== + */ + g_core_fp.fp_sense_off(); + g_core_fp.fp_register_read(pfw_op->addr_fw_ver_addr, FOUR_BYTE_DATA_SZ, data, 0); + ic_data->vendor_panel_ver = data[0]; + ic_data->vendor_fw_ver = data[1] << 8 | data[2]; + I("PANEL_VER : %X\n", ic_data->vendor_panel_ver); + I("FW_VER : %X\n", ic_data->vendor_fw_ver); + g_core_fp.fp_register_read(pfw_op->addr_fw_cfg_addr, FOUR_BYTE_DATA_SZ, data, 0); + ic_data->vendor_config_ver = data[2] << 8 | data[3]; + /* I("CFG_VER : %X\n",ic_data->vendor_config_ver); */ + ic_data->vendor_touch_cfg_ver = data[2]; + I("TOUCH_VER : %X\n", ic_data->vendor_touch_cfg_ver); + ic_data->vendor_display_cfg_ver = data[3]; + I("DISPLAY_VER : %X\n", ic_data->vendor_display_cfg_ver); + g_core_fp.fp_register_read(pfw_op->addr_fw_vendor_addr, FOUR_BYTE_DATA_SZ, data, 0); + ic_data->vendor_cid_maj_ver = data[2]; + ic_data->vendor_cid_min_ver = data[3]; + I("CID_VER : %X\n", (ic_data->vendor_cid_maj_ver << 8 | ic_data->vendor_cid_min_ver)); +} + +static bool himax_mcu_read_event_stack(uint8_t *buf, uint8_t length) +{ + uint8_t cmd[FOUR_BYTE_DATA_SZ]; + /* AHB_I2C Burst Read Off */ + cmd[0] = pfw_op->data_ahb_dis[0]; + + if (himax_bus_write(pfw_op->addr_ahb_addr[0], cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return 0; + } + + if (himax_bus_read(pfw_op->addr_event_addr[0], buf, length, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return 0; + } + /* AHB_I2C Burst Read On */ + cmd[0] = pfw_op->data_ahb_en[0]; + + if (himax_bus_write(pfw_op->addr_ahb_addr[0], cmd, 1, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return 0; + } + + return 1; +} + +static void himax_mcu_return_event_stack(void) +{ + int retry = 20, i; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + + I("%s:entering\n", __func__); + + do { + I("now %d times\n!", retry); + + for (i = 0; i < FOUR_BYTE_DATA_SZ; i++) + tmp_data[i] = psram_op->addr_rawdata_end[i]; + + g_core_fp.fp_flash_write_burst(psram_op->addr_rawdata_addr, tmp_data); + g_core_fp.fp_register_read(psram_op->addr_rawdata_addr, FOUR_BYTE_DATA_SZ, tmp_data, 0); + retry--; + msleep(20); + } while ((tmp_data[1] != psram_op->addr_rawdata_end[1] && tmp_data[0] != psram_op->addr_rawdata_end[0]) && retry > 0); + + I("%s: End of setting!\n", __func__); +} + +static bool himax_mcu_calculateChecksum(bool change_iref) +{ + uint8_t CRC_result = 0, i; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + + for (i = 0; i < FOUR_BYTE_DATA_SZ; i++) + tmp_data[i] = psram_op->addr_rawdata_end[i]; + + CRC_result = g_core_fp.fp_check_CRC(tmp_data, FW_SIZE_64k); + msleep(50); + + return (CRC_result == 0) ? true : false; +} + +static int himax_mcu_read_FW_status(uint8_t *state_addr, uint8_t *tmp_addr) +{ + uint8_t i; + uint8_t req_size = 0; + uint8_t status_addr[FOUR_BYTE_DATA_SZ]; /* 0x10007F44 */ + uint8_t cmd_addr[FOUR_BYTE_DATA_SZ]; /* 0x900000F8 */ + + if (state_addr[0] == 0x01) { + state_addr[1] = 0x04; + + for (i = 0; i < FOUR_BYTE_DATA_SZ; i++) { + /* 0x10007F44 */ + state_addr[i + 2] = pfw_op->addr_fw_dbg_msg_addr[i]; + status_addr[i] = pfw_op->addr_fw_dbg_msg_addr[i]; + } + + req_size = 0x04; + g_core_fp.fp_register_read(status_addr, req_size, tmp_addr, 0); + } else if (state_addr[0] == 0x02) { + state_addr[1] = 0x30; + + for (i = 0; i < FOUR_BYTE_DATA_SZ; i++) { + /* 0x10007F44 */ + state_addr[i + 2] = pfw_op->addr_fw_dbg_msg_addr[i]; + cmd_addr[i] = pfw_op->addr_fw_dbg_msg_addr[i]; + } + + req_size = 0x30; + g_core_fp.fp_register_read(cmd_addr, req_size, tmp_addr, 0); + } + + return NO_ERR; +} + +static void himax_mcu_irq_switch(int switch_on) +{ + if (switch_on) { + if (private_ts->use_irq) + himax_int_enable(switch_on); + else + hrtimer_start(&private_ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + } else { + if (private_ts->use_irq) + himax_int_enable(switch_on); + else { + hrtimer_cancel(&private_ts->timer); + cancel_work_sync(&private_ts->work); + } + } +} + +static int himax_mcu_assign_sorting_mode(uint8_t *tmp_data) +{ + + I("%s:Now tmp_data[3]=0x%02X,tmp_data[2]=0x%02X,tmp_data[1]=0x%02X,tmp_data[0]=0x%02X\n", + __func__, tmp_data[3], tmp_data[2], tmp_data[1], tmp_data[0]); + g_core_fp.fp_flash_write_burst(pfw_op->addr_sorting_mode_en, tmp_data); + + return NO_ERR; +} + +static int himax_mcu_check_sorting_mode(uint8_t *tmp_data) +{ + + g_core_fp.fp_register_read(pfw_op->addr_sorting_mode_en, FOUR_BYTE_DATA_SZ, tmp_data, 0); + I("%s: tmp_data[0]=%x,tmp_data[1]=%x\n", __func__, tmp_data[0], tmp_data[1]); + + return NO_ERR; +} + +static int himax_mcu_switch_mode(int mode) +{ + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t mode_write_cmd; + uint8_t mode_read_cmd; + int result = -1; + int retry = 200; + + I("%s: Entering\n", __func__); + + if (mode == 0) { + /* normal mode */ + mode_write_cmd = pfw_op->data_normal_cmd[0]; + mode_read_cmd = pfw_op->data_normal_status[0]; + } else { + /* sorting mode */ + mode_write_cmd = pfw_op->data_sorting_cmd[0]; + mode_read_cmd = pfw_op->data_sorting_status[0]; + } + + g_core_fp.fp_sense_off(); + /* g_core_fp.fp_interface_on();*/ + /* clean up FW status */ + + /* + * tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00; + */ + g_core_fp.fp_flash_write_burst(psram_op->addr_rawdata_addr, psram_op->addr_rawdata_end); + /* tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x7F; tmp_addr[0] = 0x04;*/ + tmp_data[3] = 0x00; + tmp_data[2] = 0x00; + tmp_data[1] = mode_write_cmd; + tmp_data[0] = mode_write_cmd; + g_core_fp.fp_assign_sorting_mode(tmp_data); + g_core_fp.fp_idle_mode(1); + g_core_fp.fp_reload_disable(1); + + /* To stable the sorting*/ + if (mode) { + /* + * tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x70; tmp_addr[0] = 0xF4; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x08; + */ + g_core_fp.fp_flash_write_burst(pdriver_op->addr_fw_define_rxnum_txnum_maxpt, pdriver_op->data_fw_define_rxnum_txnum_maxpt_sorting); + } else { + g_core_fp.fp_flash_write_burst(pfw_op->addr_set_frame_addr, pfw_op->data_set_frame); + /* + * tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x70; tmp_addr[0] = 0xF4; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x14; + */ + g_core_fp.fp_flash_write_burst(pdriver_op->addr_fw_define_rxnum_txnum_maxpt, pdriver_op->data_fw_define_rxnum_txnum_maxpt_normal); + } + + g_core_fp.fp_sense_on(0x01); + + while (retry != 0) { + I("[%d] %s Read\n", retry, __func__); + /* tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x7F; tmp_addr[0] = 0x04; */ + g_core_fp.fp_check_sorting_mode(tmp_data); + msleep(100); + I("mode_read_cmd(0)=0x%2.2X,mode_read_cmd(1)=0x%2.2X\n", tmp_data[0], tmp_data[1]); + + if (tmp_data[0] == mode_read_cmd && tmp_data[1] == mode_read_cmd) { + I("Read OK!\n"); + result = 0; + break; + } + + /* tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0xA8; */ + g_core_fp.fp_register_read(pfw_op->addr_chk_fw_status, FOUR_BYTE_DATA_SZ, tmp_data, 0); + + if (tmp_data[0] == 0x00 && tmp_data[1] == 0x00 && tmp_data[2] == 0x00 && tmp_data[3] == 0x00) { + E("%s,: FW Stop!\n", __func__); + break; + } + + retry--; + } + + if (result == 0) { + if (mode == 0) + return HX_NORMAL_MODE; /* normal mode */ + else + return HX_SORTING_MODE; /* sorting mode */ + } else + return HX_CHANGE_MODE_FAIL; /* change mode fail */ + +} + +static uint8_t himax_mcu_read_DD_status(uint8_t *cmd_set, uint8_t *tmp_data) +{ + int cnt = 0; + uint8_t req_size = cmd_set[0]; + + cmd_set[3] = pfw_op->data_dd_request[0]; + g_core_fp.fp_register_write(pfw_op->addr_dd_handshak_addr, FOUR_BYTE_DATA_SZ, cmd_set, 0); + I("%s: cmd_set[0] = 0x%02X,cmd_set[1] = 0x%02X,cmd_set[2] = 0x%02X,cmd_set[3] = 0x%02X\n", + __func__, cmd_set[0], cmd_set[1], cmd_set[2], cmd_set[3]); + + /* Doing hand shaking 0xAA -> 0xBB */ + for (cnt = 0; cnt < 100; cnt++) { + g_core_fp.fp_register_read(pfw_op->addr_dd_handshak_addr, FOUR_BYTE_DATA_SZ, tmp_data, 0); + msleep(20); + + if (tmp_data[3] == pfw_op->data_dd_ack[0]) { + I("%s Data ready goto moving data\n", __func__); + break; + } else if (cnt >= 99) { + I("%s Data not ready in FW\n", __func__); + return FW_NOT_READY; + } + } + + g_core_fp.fp_register_read(pfw_op->addr_dd_data_addr, req_size, tmp_data, 0); + return NO_ERR; +} +/* FW side end */ +#endif + +#ifdef CORE_FLASH +/* FLASH side start */ +static void himax_mcu_chip_erase(void) +{ + g_core_fp.fp_interface_on(); + + /* Reset power saving level */ + if (g_core_fp.fp_init_psl != NULL) + g_core_fp.fp_init_psl(); + + /* + *===================================== + * SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_fmt, pflash_op->data_spi200_trans_fmt); + /* + *===================================== + * Chip Erase + * Write Enable : 1. 0x8000_0020 ==> 0x4700_0000 + * 2. 0x8000_0024 ==> 0x0000_0006 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_2); + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_2); + /* + *===================================== + * Chip Erase + * Erase Command : 0x8000_0024 ==> 0x0000_00C7 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_3); + msleep(2000); + + if (!g_core_fp.fp_wait_wip(100)) + E("%s: Chip_Erase Fail\n", __func__); + +} + +/* complete not yet */ +static bool himax_mcu_block_erase(int start_addr, int length) +{ + uint32_t page_prog_start = 0; + uint32_t block_size = 0x10000; + + g_core_fp.fp_interface_on(); + + g_core_fp.fp_init_psl(); + + /* + *===================================== + * SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_fmt, pflash_op->data_spi200_trans_fmt); + + for (page_prog_start = start_addr; page_prog_start < start_addr + length; page_prog_start = page_prog_start + block_size) { + /* + *===================================== + * Chip Erase + * Write Enable : 1. 0x8000_0020 ==> 0x4700_0000 + * 2. 0x8000_0024 ==> 0x0000_0006 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_2); + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_2); + /* + *===================================== + * Block Erase + * Erase Command : 0x8000_0028 ==> 0x0000_0000 //SPI addr + * 0x8000_0020 ==> 0x6700_0000 //control + * 0x8000_0024 ==> 0x0000_0052 //BE + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_addr, pflash_op->data_spi200_addr); + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_3); + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_4); + msleep(1000); + + if (!g_core_fp.fp_wait_wip(100)) { + E("%s:Erase Fail\n", __func__); + return false; + } + } + + I("%s:END\n", __func__); + return true; +} + +static bool himax_mcu_sector_erase(int start_addr) +{ + return true; +} + +static void himax_mcu_flash_programming(uint8_t *FW_content, int FW_Size) +{ + int page_prog_start = 0, i = 0, j = 0, k = 0; + int program_length = PROGRAM_SZ; + uint8_t tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t buring_data[FLASH_RW_MAX_LEN]; /* Read for flash data, 128K */ + + /* 4 bytes for 0x80002C padding */ + g_core_fp.fp_interface_on(); + /* + *===================================== + * SPI Transfer Format : 0x8000_0010 ==> 0x0002_0780 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_fmt, pflash_op->data_spi200_trans_fmt); + + for (page_prog_start = 0; page_prog_start < FW_Size; page_prog_start += FLASH_RW_MAX_LEN) { + /* + *===================================== + * Write Enable : 1. 0x8000_0020 ==> 0x4700_0000 + * 2. 0x8000_0024 ==> 0x0000_0006 + *===================================== + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_2); + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_2); + /* + *================================= + * SPI Transfer Control + * Set 256 bytes page write : 0x8000_0020 ==> 0x610F_F000 + * Set read start address : 0x8000_0028 ==> 0x0000_0000 + *================================= + * data bytes should be 0x6100_0000 + ((word_number)*4-1)*4096 = 0x6100_0000 + 0xFF000 = 0x610F_F000 + * Programmable size = 1 page = 256 bytes, word_number = 256 byte / 4 = 64 + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_trans_ctrl, pflash_op->data_spi200_trans_ctrl_4); + + /* Flash start address 1st : 0x0000_0000 */ + if (page_prog_start < 0x100) { + tmp_data[3] = 0x00; + tmp_data[2] = 0x00; + tmp_data[1] = 0x00; + tmp_data[0] = (uint8_t)page_prog_start; + } else if (page_prog_start >= 0x100 && page_prog_start < 0x10000) { + tmp_data[3] = 0x00; + tmp_data[2] = 0x00; + tmp_data[1] = (uint8_t)(page_prog_start >> 8); + tmp_data[0] = (uint8_t)page_prog_start; + } else if (page_prog_start >= 0x10000 && page_prog_start < 0x1000000) { + tmp_data[3] = 0x00; + tmp_data[2] = (uint8_t)(page_prog_start >> 16); + tmp_data[1] = (uint8_t)(page_prog_start >> 8); + tmp_data[0] = (uint8_t)page_prog_start; + } + + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_addr, tmp_data); + + /* + *================================= + * Send 16 bytes data : 0x8000_002C ==> 16 bytes data + *================================= + */ + for (i = 0; i < FOUR_BYTE_ADDR_SZ; i++) + buring_data[i] = pflash_op->addr_spi200_data[i]; + + for (i = page_prog_start, j = 0; i < 16 + page_prog_start; i++, j++)/* <------ bin file */ + buring_data[j + FOUR_BYTE_ADDR_SZ] = FW_content[i]; + + if (himax_bus_write(pic_op->addr_ahb_addr_byte_0[0], buring_data, FOUR_BYTE_ADDR_SZ + 16, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + + /* + *================================= + * Write command : 0x8000_0024 ==> 0x0000_0002 + *================================= + */ + g_core_fp.fp_flash_write_burst(pflash_op->addr_spi200_cmd, pflash_op->data_spi200_cmd_6); + + /* + *================================= + * Send 240 bytes data : 0x8000_002C ==> 240 bytes data + *================================= + */ + for (j = 0; j < 5; j++) { + for (i = (page_prog_start + 16 + (j * 48)), k = 0; i < (page_prog_start + 16 + (j * 48)) + program_length; i++, k++) + buring_data[k + FOUR_BYTE_ADDR_SZ] = FW_content[i]; + + if (himax_bus_write(pic_op->addr_ahb_addr_byte_0[0], buring_data, program_length + FOUR_BYTE_ADDR_SZ, HIMAX_I2C_RETRY_TIMES) < 0) { + E("%s: i2c access fail!\n", __func__); + return; + } + } + + if (!g_core_fp.fp_wait_wip(1)) + E("%s:Flash_Programming Fail\n", __func__); + } +} + +static void himax_mcu_flash_page_write(uint8_t *write_addr, int length, uint8_t *write_data) +{ +} + +static int himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_32k(unsigned char *fw, int len, bool change_iref) +{ + /* Not use */ + return 0; +} + +static int himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_60k(unsigned char *fw, int len, bool change_iref) +{ + /* Not use */ + return 0; +} + +static int himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_64k(unsigned char *fw, int len, bool change_iref) +{ + int burnFW_success = 0; + + if (len != FW_SIZE_64k) { + E("%s: The file size is not 64K bytes\n", __func__); + return false; + } + +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#else + /* + * tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x18; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x55; + */ + g_core_fp.fp_system_reset(); +#endif + g_core_fp.fp_sense_off(); + g_core_fp.fp_chip_erase(); + g_core_fp.fp_flash_programming(fw, FW_SIZE_64k); + + if (g_core_fp.fp_check_CRC(pfw_op->addr_program_reload_from, FW_SIZE_64k) == 0) + burnFW_success = 1; + + /* RawOut select initial */ + g_core_fp.fp_register_write(pfw_op->addr_raw_out_sel, sizeof(pfw_op->data_clear), pfw_op->data_clear, false); + /* DSRAM func initial */ + g_core_fp.fp_assign_sorting_mode(pfw_op->data_clear); + +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_ic_reset(false, false); +#else + /* + * System reset + * tmp_addr[3] = 0x90; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x18; + * tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x55; + */ + g_core_fp.fp_system_reset(); +#endif + return burnFW_success; +} + +static int himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_124k(unsigned char *fw, int len, bool change_iref) +{ + /* Not use */ + return 0; +} + +static int himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_128k(unsigned char *fw, int len, bool change_iref) +{ + /* Not use */ + return 0; +} + +static void himax_mcu_flash_dump_func(uint8_t local_flash_command, int Flash_Size, uint8_t *flash_buffer) +{ + uint8_t tmp_addr[FOUR_BYTE_DATA_SZ]; + uint8_t buffer[256]; + int page_prog_start = 0; + + g_core_fp.fp_sense_off(); + g_core_fp.fp_burst_enable(1); + + for (page_prog_start = 0; page_prog_start < Flash_Size; page_prog_start += 128) { + /* + *================================= + * SPI Transfer Control + * Set 256 bytes page read : 0x8000_0020 ==> 0x6940_02FF + * Set read start address : 0x8000_0028 ==> 0x0000_0000 + * Set command : 0x8000_0024 ==> 0x0000_003B + *================================= + */ + tmp_addr[0] = page_prog_start % 0x100; + tmp_addr[1] = (page_prog_start >> 8) % 0x100; + tmp_addr[2] = (page_prog_start >> 16) % 0x100; + tmp_addr[3] = page_prog_start / 0x1000000; + himax_mcu_register_read(tmp_addr, 128, buffer, 0); + memcpy(&flash_buffer[page_prog_start], buffer, 128); + } + + g_core_fp.fp_burst_enable(0); + g_core_fp.fp_sense_on(0x01); +} + +static bool himax_mcu_flash_lastdata_check(void) +{ + uint8_t tmp_addr[4]; + uint32_t start_addr = 0xFF80; + uint32_t temp_addr = 0; + uint32_t flash_page_len = 0x80; + uint8_t flash_tmp_buffer[128]; + + for (temp_addr = start_addr; temp_addr < (start_addr + flash_page_len); temp_addr = temp_addr + flash_page_len) { + /* I("temp_addr=%d,tmp_addr[0]=0x%2X, tmp_addr[1]=0x%2X,tmp_addr[2]=0x%2X,tmp_addr[3]=0x%2X\n", temp_addr,tmp_addr[0], tmp_addr[1], tmp_addr[2],tmp_addr[3]); */ + tmp_addr[0] = temp_addr % 0x100; + tmp_addr[1] = (temp_addr >> 8) % 0x100; + tmp_addr[2] = (temp_addr >> 16) % 0x100; + tmp_addr[3] = temp_addr / 0x1000000; + g_core_fp.fp_register_read(tmp_addr, flash_page_len, &flash_tmp_buffer[0], 0); + } + + if ((!flash_tmp_buffer[flash_page_len-4]) && (!flash_tmp_buffer[flash_page_len-3]) && (!flash_tmp_buffer[flash_page_len-2]) && (!flash_tmp_buffer[flash_page_len-1])) + return 1;/* FAIL */ + + I("flash_buffer[FFFC]=0x%2X,flash_buffer[FFFD]=0x%2X,flash_buffer[FFFE]=0x%2X,flash_buffer[FFFF]=0x%2X\n", + flash_tmp_buffer[flash_page_len-4], flash_tmp_buffer[flash_page_len-3], flash_tmp_buffer[flash_page_len-2], flash_tmp_buffer[flash_page_len-1]); + return 0;/* PASS */ +} +/* FLASH side end */ +#endif + +#ifdef CORE_SRAM +/* SRAM side start */ +static void himax_mcu_sram_write(uint8_t *FW_content) +{ +} + +static bool himax_mcu_sram_verify(uint8_t *FW_File, int FW_Size) +{ + return true; +} + +static void himax_mcu_get_DSRAM_data(uint8_t *info_data, bool DSRAM_Flag) +{ + int i = 0; + unsigned char tmp_addr[FOUR_BYTE_ADDR_SZ]; + unsigned char tmp_data[FOUR_BYTE_DATA_SZ]; + uint8_t max_i2c_size = MAX_I2C_TRANS_SZ; + uint8_t x_num = ic_data->HX_RX_NUM; + uint8_t y_num = ic_data->HX_TX_NUM; + /* int m_key_num = 0; */ + int total_size = (x_num * y_num + x_num + y_num) * 2 + 4; + int total_size_temp; + int mutual_data_size = x_num * y_num * 2; + int total_read_times = 0; + int address = 0; + uint8_t *temp_info_data; /* max mkey size = 8 */ + uint16_t check_sum_cal = 0; + int fw_run_flag = -1; + + temp_info_data = kcalloc((total_size + 8), sizeof(uint8_t), GFP_KERNEL); + if (!temp_info_data) { + E("%s: allocate memory failed!\n", __func__); + return; + } + /* + *1. Read number of MKey R100070E8H to determin data size + * m_key_num = ic_data->HX_BT_NUM; + * I("%s,m_key_num=%d\n",__func__ ,m_key_num); + * total_size += m_key_num * 2; + *2. Start DSRAM Rawdata and Wait Data Ready + *=====tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; + */ + tmp_data[3] = 0x00; tmp_data[2] = 0x00; + tmp_data[1] = psram_op->passwrd_start[1]; + tmp_data[0] = psram_op->passwrd_start[0]; + fw_run_flag = himax_write_read_reg(psram_op->addr_rawdata_addr, tmp_data, psram_op->passwrd_end[1], psram_op->passwrd_end[0]); + + if (fw_run_flag < 0) { + I("%s Data NOT ready => bypass\n", __func__); + kfree(temp_info_data); + return; + } + + /* 3. Read RawData */ + total_size_temp = total_size; + /* ====tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; */ + I("%s: tmp_data[0] = 0x%02X,tmp_data[1] = 0x%02X,tmp_data[2] = 0x%02X,tmp_data[3] = 0x%02X\n", + __func__, psram_op->addr_rawdata_addr[0], psram_op->addr_rawdata_addr[1], psram_op->addr_rawdata_addr[2], psram_op->addr_rawdata_addr[3]); + tmp_addr[0] = psram_op->addr_rawdata_addr[0]; + tmp_addr[1] = psram_op->addr_rawdata_addr[1]; + tmp_addr[2] = psram_op->addr_rawdata_addr[2]; + tmp_addr[3] = psram_op->addr_rawdata_addr[3]; + + if (total_size % max_i2c_size == 0) + total_read_times = total_size / max_i2c_size; + else + total_read_times = total_size / max_i2c_size + 1; + + for (i = 0; i < total_read_times; i++) { + address = (psram_op->addr_rawdata_addr[3] << 24) + + (psram_op->addr_rawdata_addr[2] << 16) + + (psram_op->addr_rawdata_addr[1] << 8) + + psram_op->addr_rawdata_addr[0] + i * max_i2c_size; + I("%s address = %08X\n", __func__, address); + + tmp_addr[3] = (uint8_t)((address >> 24) & 0x00FF); + tmp_addr[2] = (uint8_t)((address >> 16) & 0x00FF); + tmp_addr[1] = (uint8_t)((address >> 8) & 0x00FF); + tmp_addr[0] = (uint8_t)((address) & 0x00FF); + + if (total_size_temp >= max_i2c_size) { + g_core_fp.fp_register_read(tmp_addr, max_i2c_size, &temp_info_data[i * max_i2c_size], 0); + total_size_temp = total_size_temp - max_i2c_size; + } else { + /* I("last total_size_temp=%d\n",total_size_temp); */ + g_core_fp.fp_register_read(tmp_addr, total_size_temp % max_i2c_size, &temp_info_data[i * max_i2c_size], 0); + } + } + + /* 4. FW stop outputing */ + /* I("DSRAM_Flag=%d\n",DSRAM_Flag); */ + if (DSRAM_Flag == false) { + /* + * I("Return to Event Stack!\n"); + * ====tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; + * ====tmp_data[3] = 0x00; tmp_data[2] = 0x00; tmp_data[1] = 0x00; tmp_data[0] = 0x00; + */ + g_core_fp.fp_flash_write_burst(psram_op->addr_rawdata_addr, psram_op->data_fin); + } else { + /* + * I("Continue to SRAM!\n"); + * =====tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x00; tmp_addr[0] = 0x00; + * =====tmp_data[3] = 0x11; tmp_data[2] = 0x22; tmp_data[1] = 0x33; tmp_data[0] = 0x44; + */ + g_core_fp.fp_flash_write_burst(psram_op->addr_rawdata_addr, psram_op->data_conti); + } + + /* 5. Data Checksum Check */ + for (i = 2; i < total_size; i += 2) /* 2:PASSWORD NOT included */ + check_sum_cal += (temp_info_data[i + 1] * 256 + temp_info_data[i]); + + if (check_sum_cal % 0x10000 != 0) { + I("%s check_sum_cal fail=%2X\n", __func__, check_sum_cal); + kfree(temp_info_data); + return; + } + + memcpy(info_data, &temp_info_data[4], mutual_data_size * sizeof(uint8_t)); + /* I("%s checksum PASS\n", __func__); */ + + kfree(temp_info_data); +} +/* SRAM side end */ +#endif + +#ifdef CORE_DRIVER +static bool himax_mcu_detect_ic(void) +{ + I("%s: use default incell detect.\n", __func__); + + return 0; +} + + +static void himax_mcu_init_ic(void) +{ + I("%s: use default incell init.\n", __func__); +} + + +#ifdef HX_RST_PIN_FUNC +static void himax_mcu_pin_reset(void) +{ + I("%s: Now reset the Touch chip.\n", __func__); + himax_rst_gpio_set(private_ts->rst_gpio, 0); + msleep(20); + himax_rst_gpio_set(private_ts->rst_gpio, 1); + msleep(50); +} + +static void himax_mcu_ic_reset(uint8_t loadconfig, uint8_t int_off) +{ + struct himax_ts_data *ts = private_ts; + + HX_HW_RESET_ACTIVATE = 1; + I("%s,status: loadconfig=%d,int_off=%d\n", __func__, loadconfig, int_off); + + if (ts->rst_gpio >= 0) { + if (int_off) + g_core_fp.fp_irq_switch(0); + + g_core_fp.fp_pin_reset(); + + if (loadconfig) + g_core_fp.fp_reload_config(); + + if (int_off) + g_core_fp.fp_irq_switch(1); + } +} +#endif + +static void himax_mcu_touch_information(void) +{ +#ifndef HX_FIX_TOUCH_INFO + char data[EIGHT_BYTE_DATA_SZ] = {0}; + + /* cmd[3] = 0x10; cmd[2] = 0x00; cmd[1] = 0x70; cmd[0] = 0xF4; */ + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_rxnum_txnum_maxpt, EIGHT_BYTE_DATA_SZ, data, 0); + ic_data->HX_RX_NUM = data[2]; + ic_data->HX_TX_NUM = data[3]; + ic_data->HX_MAX_PT = data[4]; + /* + * I("%s : HX_RX_NUM=%d,ic_data->HX_TX_NUM=%d,ic_data->HX_MAX_PT=%d\n",__func__,ic_data->HX_RX_NUM,ic_data->HX_TX_NUM,ic_data->HX_MAX_PT); + * cmd[3] = 0x10; cmd[2] = 0x00; cmd[1] = 0x70; cmd[0] = 0xFA; + */ + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_xy_res_enable, FOUR_BYTE_DATA_SZ, data, 0); + + /* I("%s : c_data->HX_XY_REVERSE=0x%2.2X\n",__func__,data[1]); */ + if ((data[1] & 0x04) == 0x04) + ic_data->HX_XY_REVERSE = true; + else + ic_data->HX_XY_REVERSE = false; + + /* cmd[3] = 0x10; cmd[2] = 0x00; cmd[1] = 0x70; cmd[0] = 0xFC; */ + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_x_y_res, FOUR_BYTE_DATA_SZ, data, 0); + ic_data->HX_Y_RES = data[0] * 256 + data[1]; + ic_data->HX_X_RES = data[2] * 256 + data[3]; + /* + * I("%s : ic_data->HX_Y_RES=%d,ic_data->HX_X_RES=%d\n",__func__,ic_data->HX_Y_RES,ic_data->HX_X_RES); + * cmd[3] = 0x10; cmd[2] = 0x00; cmd[1] = 0x70; cmd[0] = 0x89; + */ + g_core_fp.fp_register_read(pdriver_op->addr_fw_define_int_is_edge, FOUR_BYTE_DATA_SZ, data, 0); + + /* + * I("%s : data[0]=0x%2.2X,data[1]=0x%2.2X,data[2]=0x%2.2X,data[3]=0x%2.2X\n",__func__,data[0],data[1],data[2],data[3]); + * I("data[0] & 0x01 = %d\n",(data[0] & 0x01)); + */ + if ((data[0] & 0x01) == 1) + ic_data->HX_INT_IS_EDGE = true; + else + ic_data->HX_INT_IS_EDGE = false; + + if (ic_data->HX_RX_NUM > 40) + ic_data->HX_RX_NUM = 32; + + + if (ic_data->HX_TX_NUM > 20) + ic_data->HX_TX_NUM = 18; + + if (ic_data->HX_MAX_PT > 10) + ic_data->HX_MAX_PT = 10; + + if (ic_data->HX_Y_RES > 2000) + ic_data->HX_Y_RES = 1280; + + if (ic_data->HX_X_RES > 2000) + ic_data->HX_X_RES = 720; + + /* 1. Read number of MKey R100070E8H to determin data size */ + /* ====tmp_addr[3] = 0x10; tmp_addr[2] = 0x00; tmp_addr[1] = 0x70; tmp_addr[0] = 0xE8; */ + g_core_fp.fp_register_read(psram_op->addr_mkey, FOUR_BYTE_DATA_SZ, data, 0); + /* + * I("%s: tmp_data[0] = 0x%02X,tmp_data[1] = 0x%02X,tmp_data[2] = 0x%02X,tmp_data[3] = 0x%02X\n", + * __func__, tmp_data[0], tmp_data[1], tmp_data[2], tmp_data[3]); + */ + ic_data->HX_BT_NUM = data[0] & 0x03; +#else + ic_data->HX_RX_NUM = FIX_HX_RX_NUM; + ic_data->HX_TX_NUM = FIX_HX_TX_NUM; + ic_data->HX_BT_NUM = FIX_HX_BT_NUM; + ic_data->HX_X_RES = FIX_HX_X_RES; + ic_data->HX_Y_RES = FIX_HX_Y_RES; + ic_data->HX_MAX_PT = FIX_HX_MAX_PT; + ic_data->HX_XY_REVERSE = FIX_HX_XY_REVERSE; + ic_data->HX_INT_IS_EDGE = FIX_HX_INT_IS_EDGE; +#endif + I("%s:HX_RX_NUM =%d,HX_TX_NUM =%d,HX_MAX_PT=%d\n", __func__, ic_data->HX_RX_NUM, ic_data->HX_TX_NUM, ic_data->HX_MAX_PT); + I("%s:HX_XY_REVERSE =%d,HX_Y_RES =%d,HX_X_RES=%d\n", __func__, ic_data->HX_XY_REVERSE, ic_data->HX_Y_RES, ic_data->HX_X_RES); + I("%s:HX_INT_IS_EDGE =%d\n", __func__, ic_data->HX_INT_IS_EDGE); +} + +static void himax_mcu_reload_config(void) +{ + if (himax_report_data_init()) + E("%s: allocate data fail\n", __func__); + + g_core_fp.fp_sense_on(0x00); +} + +static int himax_mcu_get_touch_data_size(void) +{ + return HIMAX_TOUCH_DATA_SIZE; +} + +static int himax_mcu_hand_shaking(void) +{ + /* 0:Running, 1:Stop, 2:I2C Fail */ + int result = 0; + return result; +} + +static int himax_mcu_determin_diag_rawdata(int diag_command) +{ + return diag_command % 10; +} + +static int himax_mcu_determin_diag_storage(int diag_command) +{ + return diag_command / 10; +} + +static int himax_mcu_cal_data_len(int raw_cnt_rmd, int HX_MAX_PT, int raw_cnt_max) +{ + int RawDataLen; + + if (raw_cnt_rmd != 0x00) + RawDataLen = MAX_I2C_TRANS_SZ - ((HX_MAX_PT + raw_cnt_max + 3) * 4) - 1; + else + RawDataLen = MAX_I2C_TRANS_SZ - ((HX_MAX_PT + raw_cnt_max + 2) * 4) - 1; + + return RawDataLen; +} + +static bool himax_mcu_diag_check_sum(struct himax_report_data *hx_touch_data) +{ + uint16_t check_sum_cal = 0; + int i; + + /* Check 128th byte CRC */ + for (i = 0, check_sum_cal = 0; i < (hx_touch_data->touch_all_size - hx_touch_data->touch_info_size); i += 2) + check_sum_cal += (hx_touch_data->hx_rawdata_buf[i + 1] * FLASH_RW_MAX_LEN + hx_touch_data->hx_rawdata_buf[i]); + + if (check_sum_cal % HX64K != 0) { + I("%s fail=%2X\n", __func__, check_sum_cal); + return 0; + } + + return 1; +} + +static void himax_mcu_diag_parse_raw_data(struct himax_report_data *hx_touch_data, int mul_num, int self_num, uint8_t diag_cmd, int32_t *mutual_data, int32_t *self_data) +{ + diag_mcu_parse_raw_data(hx_touch_data, mul_num, self_num, diag_cmd, mutual_data, self_data); +} + +#ifdef HX_ESD_RECOVERY +static int himax_mcu_ic_esd_recovery(int hx_esd_event, int hx_zero_event, int length) +{ + int ret_val = NO_ERR; + + if (g_zero_event_count > 5) { + g_zero_event_count = 0; + I("[HIMAX TP MSG]: ESD event checked - ALL Zero.\n"); + ret_val = HX_ESD_EVENT; + goto END_FUNCTION; + } + + if (hx_esd_event == length) { + g_zero_event_count = 0; + ret_val = HX_ESD_EVENT; + goto END_FUNCTION; + } else if (hx_zero_event == length) { + g_zero_event_count++; + I("[HIMAX TP MSG]: ALL Zero event is %d times.\n", g_zero_event_count); + ret_val = HX_ZERO_EVENT_COUNT; + goto END_FUNCTION; + } + +END_FUNCTION: + return ret_val; +} + +static void himax_mcu_esd_ic_reset(void) +{ + HX_ESD_RESET_ACTIVATE = 0; +#ifdef HX_RST_PIN_FUNC + himax_mcu_pin_reset(); +#endif + I("%s:\n", __func__); +} +#endif +#endif + +#if defined(HX_SMART_WAKEUP) || defined(HX_HIGH_SENSE) || defined(HX_USB_DETECT_GLOBAL) +static void himax_mcu_resend_cmd_func(bool suspended) +{ +#if defined(HX_SMART_WAKEUP) || defined(HX_HIGH_SENSE) + struct himax_ts_data *ts = private_ts; +#endif +#ifdef HX_SMART_WAKEUP + g_core_fp.fp_set_SMWP_enable(ts->SMWP_enable, suspended); +#endif +#ifdef HX_HIGH_SENSE + g_core_fp.fp_set_HSEN_enable(ts->HSEN_enable, suspended); +#endif +#ifdef HX_USB_DETECT_GLOBAL + himax_cable_detect_func(true); +#endif +} +#endif + +#ifdef HX_ZERO_FLASH +int G_POWERONOF = 1; +void himax_mcu_sys_reset(void) +{ + /* 0x10007f00 -> 0x00009AA9 //Disable Flash Reload */ + g_core_fp.fp_flash_write_burst(pzf_op->addr_dis_flash_reload, pzf_op->data_dis_flash_reload); + msleep(20); + g_core_fp.fp_register_write(pzf_op->addr_system_reset, 4, pzf_op->data_system_reset, false); +} +void himax_mcu_clean_sram_0f(uint8_t *addr, int write_len, int type) +{ + int total_read_times = 0; + int max_bus_size = MAX_I2C_TRANS_SZ; + int total_size_temp = 0; + int total_size = 0; + int address = 0; + int i = 0; + + uint8_t fix_data = 0x00; + uint8_t tmp_addr[4]; + uint8_t tmp_data[MAX_I2C_TRANS_SZ] = {0}; + + I("%s, Entering\n", __func__); + + total_size = write_len; + + if (total_size > 4096) + max_bus_size = 4096; + + total_size_temp = write_len; + + g_core_fp.fp_burst_enable(1); + + tmp_addr[3] = addr[3]; + tmp_addr[2] = addr[2]; + tmp_addr[1] = addr[1]; + tmp_addr[0] = addr[0]; + I("%s, write addr tmp_addr[3]=0x%2.2X, tmp_addr[2]=0x%2.2X, tmp_addr[1]=0x%2.2X, tmp_addr[0]=0x%2.2X\n", + __func__, tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + + switch (type) { + case 0: + fix_data = 0x00; + break; + case 1: + fix_data = 0xAA; + break; + case 2: + fix_data = 0xBB; + break; + } + + for (i = 0; i < MAX_I2C_TRANS_SZ; i++) + tmp_data[i] = fix_data; + + + I("%s, total size=%d\n", __func__, total_size); + + if (total_size_temp % max_bus_size == 0) + total_read_times = total_size_temp / max_bus_size; + else + total_read_times = total_size_temp / max_bus_size + 1; + + for (i = 0; i < (total_read_times); i++) { + I("[log]write %d time start!\n", i); + if (total_size_temp >= max_bus_size) { + g_core_fp.fp_flash_write_burst_length(tmp_addr, tmp_data, max_bus_size); + total_size_temp = total_size_temp - max_bus_size; + } else { + I("last total_size_temp=%d\n", total_size_temp); + g_core_fp.fp_flash_write_burst_length(tmp_addr, tmp_data, total_size_temp % max_bus_size); + } + address = ((i+1) * max_bus_size); + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF); + tmp_addr[0] = addr[0] + (uint8_t) ((address) & 0x00FF); + + msleep(20); + } + + I("%s, END\n", __func__); +} + +void himax_mcu_write_sram_0f(const struct firmware *fw_entry, uint8_t *addr, int start_index, uint32_t write_len) +{ + int total_read_times = 0; + int max_bus_size = MAX_I2C_TRANS_SZ; + int total_size_temp = 0; + int total_size = 0; + int address = 0; + int i = 0; + + uint8_t tmp_addr[4]; + uint8_t *tmp_data; + uint32_t now_addr; + + I("%s, ---Entering\n", __func__); + + total_size = fw_entry->size; + + total_size_temp = write_len; + + if (write_len > 4096) + max_bus_size = 4096; + else + max_bus_size = write_len; + + g_core_fp.fp_burst_enable(1); + + tmp_addr[3] = addr[3]; + tmp_addr[2] = addr[2]; + tmp_addr[1] = addr[1]; + tmp_addr[0] = addr[0]; + I("%s, write addr tmp_addr[3]=0x%2.2X, tmp_addr[2]=0x%2.2X, tmp_addr[1]=0x%2.2X, tmp_addr[0]=0x%2.2X\n", + __func__, tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + now_addr = (addr[3] << 24) + (addr[2] << 16) + (addr[1] << 8) + addr[0]; + I("now addr= 0x%08X\n", now_addr); + + I("%s, total size=%d\n", __func__, total_size); + + + tmp_data = kcalloc(total_size, sizeof(uint8_t), GFP_KERNEL); + if (!tmp_data) { + E("%s: allocate memory failed!\n", __func__); + return; + } + memcpy(tmp_data, fw_entry->data, total_size); + + /* + * for(i = 0;i<10;i++) + * { + * I("[%d] 0x%2.2X", i, tmp_data[i]); + * } + * I("\n"); + */ + if (total_size_temp % max_bus_size == 0) + total_read_times = total_size_temp / max_bus_size; + else + total_read_times = total_size_temp / max_bus_size + 1; + + for (i = 0; i < (total_read_times); i++) { + I("[log]write %d time start!\n", i); + I("[log]addr[3]=0x%02X, addr[2]=0x%02X, addr[1]=0x%02X, addr[0]=0x%02X!\n", tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + + if (total_size_temp >= max_bus_size) { + g_core_fp.fp_flash_write_burst_length(tmp_addr, &(tmp_data[start_index+i * max_bus_size]), max_bus_size); + total_size_temp = total_size_temp - max_bus_size; + } else { + I("last total_size_temp=%d\n", total_size_temp); + g_core_fp.fp_flash_write_burst_length(tmp_addr, &(tmp_data[start_index+i * max_bus_size]), total_size_temp % max_bus_size); + } + + I("[log]write %d time end!\n", i); + address = ((i+1) * max_bus_size); + tmp_addr[0] = addr[0] + (uint8_t) ((address) & 0x00FF); + + if (tmp_addr[0] < addr[0]) + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF) + 1; + else + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF); + + udelay(100); + } + I("%s, ----END\n", __func__); + kfree(tmp_data); +} + +void himax_mcu_firmware_update_0f(const struct firmware *fw_entry) +{ + int retry = 0; + int crc = -1; + + I("%s, Entering\n", __func__); + + g_core_fp.fp_register_write(pzf_op->addr_system_reset, 4, pzf_op->data_system_reset, false); + + g_core_fp.fp_sense_off(); + + /* first 48K */ + do { + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_sram_start_addr, 0, HX_48K_SZ); + crc = g_core_fp.fp_check_CRC(pzf_op->data_sram_start_addr, HX_48K_SZ); + if (crc == 0) { + I("%s, HW CRC OK in %d time\n", __func__, retry); + break; + } + E("%s, HW CRC FAIL in %d time !\n", __func__, retry); + + retry++; + } while (crc != 0 && retry < 3); + + if (crc != 0) { + E("Last time CRC Fail!\n"); + return; + } + + /* clean */ + if (G_POWERONOF == 1) + g_core_fp.fp_clean_sram_0f(pzf_op->data_sram_clean, HX_32K_SZ, 0); + + /* last 16k */ + /* config info */ + if (G_POWERONOF == 1) + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_cfg_info, HX_48K_SZ, 132); + else + g_core_fp.fp_clean_sram_0f(pzf_op->data_cfg_info, 132, 2); + + /* FW config */ + if (G_POWERONOF == 1) + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_fw_cfg, 0xC0FE, 512); + else + g_core_fp.fp_clean_sram_0f(pzf_op->data_fw_cfg, 512, 1); + + /* ADC config */ + if (G_POWERONOF == 1) + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_adc_cfg_1, 0xD000, 376); + else + g_core_fp.fp_clean_sram_0f(pzf_op->data_adc_cfg_1, 376, 2); + + if (G_POWERONOF == 1) + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_adc_cfg_2, 0xD178, 376); + else + g_core_fp.fp_clean_sram_0f(pzf_op->data_adc_cfg_2, 376, 2); + + if (G_POWERONOF == 1) + g_core_fp.fp_write_sram_0f(fw_entry, pzf_op->data_adc_cfg_3, 0xD000, 376); + else + g_core_fp.fp_clean_sram_0f(pzf_op->data_adc_cfg_3, 376, 2); + + msleep(20); + g_core_fp.fp_sys_reset(); + msleep(20); + I("%s:End\n", __func__); + himax_int_enable(1); + + I("%s, END\n", __func__); +} + +void himax_mcu_0f_operation(struct work_struct *work) +{ + int err = NO_ERR; + const struct firmware *fw_entry = NULL; + char *firmware_name = "himax.bin"; + + I("%s, Entering\n", __func__); + I("file name = %s\n", firmware_name); + err = request_firmware(&fw_entry, firmware_name, private_ts->dev); + if (err < 0) { + E("%s, fail in line%d error code=%d\n", __func__, __LINE__, err); + return; + } + g_core_fp.fp_firmware_update_0f(fw_entry); + release_firmware(fw_entry); + + I("%s, END\n", __func__); +} + +#ifdef HX_0F_DEBUG +void himax_mcu_read_sram_0f(const struct firmware *fw_entry, uint8_t *addr, int start_index, int read_len) +{ + int total_read_times = 0; + int max_i2c_size = MAX_I2C_TRANS_SZ; + int total_size_temp = 0; + int total_size = 0; + int address = 0; + int i = 0, j = 0; + int not_same = 0; + + uint8_t tmp_addr[4]; + uint8_t *temp_info_data; + int *not_same_buff; + + I("%s, Entering\n", __func__); + + g_core_fp.fp_burst_enable(1); + total_size = read_len; + total_size_temp = read_len; + temp_info_data = kcalloc(total_size, sizeof(uint8_t), GFP_KERNEL); + if (!temp_info_data) { + E("%s: allocate memory failed: temp_info_data!\n", __func__); + return; + } + not_same_buff = kcalloc(total_size, sizeof(int), GFP_KERNEL); + if (!not_same_buff) { + kfree(temp_info_data); + E("%s: allocate memory failed: not_same_buff!\n", __func__); + return; + } + + tmp_addr[3] = addr[3]; + tmp_addr[2] = addr[2]; + tmp_addr[1] = addr[1]; + tmp_addr[0] = addr[0]; + I("%s, read addr tmp_addr[3]=0x%2.2X, tmp_addr[2]=0x%2.2X, tmp_addr[1]=0x%2.2X, tmp_addr[0]=0x%2.2X\n", + __func__, tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + + I("%s, total size=%d\n", __func__, total_size); + + g_core_fp.fp_burst_enable(1); + + if (total_size % max_i2c_size == 0) + total_read_times = total_size / max_i2c_size; + else + total_read_times = total_size / max_i2c_size + 1; + + for (i = 0; i < (total_read_times); i++) { + if (total_size_temp >= max_i2c_size) { + g_core_fp.fp_register_read(tmp_addr, max_i2c_size, &temp_info_data[i*max_i2c_size], false); + total_size_temp = total_size_temp - max_i2c_size; + } else + g_core_fp.fp_register_read(tmp_addr, total_size_temp % max_i2c_size, &temp_info_data[i*max_i2c_size], false); + + address = ((i+1) * max_i2c_size); + tmp_addr[0] = addr[0] + (uint8_t) ((address) & 0x00FF); + if (tmp_addr[0] < addr[0]) + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF) + 1; + else + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF); + + msleep(20); + } + I("%s, READ Start\n", __func__); + I("%s, start_index = %d\n", __func__, start_index); + j = start_index; + for (i = 0; i < read_len; i++, j++) { + if (fw_entry->data[j] != temp_info_data[i]) { + not_same++; + not_same_buff[i] = 1; + } + + I("0x%2.2X, ", temp_info_data[i]); + + if (i > 0 && i%16 == 15) + I("\n"); + } + I("%s, READ END\n", __func__); + I("%s, Not Same count=%d\n", __func__, not_same); + if (not_same != 0) { + j = start_index; + for (i = 0; i < read_len; i++, j++) { + if (not_same_buff[i] == 1) + I("bin = [%d] 0x%2.2X\n", i, fw_entry->data[j]); + } + for (i = 0; i < read_len; i++, j++) { + if (not_same_buff[i] == 1) + I("sram = [%d] 0x%2.2X\n", i, temp_info_data[i]); + } + } + I("%s, READ END\n", __func__); + I("%s, Not Same count=%d\n", __func__, not_same); + I("%s, END\n", __func__); + + kfree(not_same_buff); + kfree(temp_info_data); +} + +void himax_mcu_read_all_sram(uint8_t *addr, int read_len) +{ + int total_read_times = 0; + int max_bus_size = MAX_I2C_TRANS_SZ; + int total_size_temp = 0; + int total_size = 0; + int address = 0; + int i = 0; + /* + * struct file *fn; + * struct filename *vts_name; + */ + + uint8_t tmp_addr[4]; + uint8_t *temp_info_data; + + I("%s, Entering\n", __func__); + + g_core_fp.fp_burst_enable(1); + total_size = read_len; + total_size_temp = read_len; + temp_info_data = kcalloc(total_size, sizeof(uint8_t), GFP_KERNEL); + if (!temp_info_data) { + E("%s: allocate memory failed!\n", __func__); + return; + } + + tmp_addr[3] = addr[3]; + tmp_addr[2] = addr[2]; + tmp_addr[1] = addr[1]; + tmp_addr[0] = addr[0]; + I("%s, read addr tmp_addr[3]=0x%2.2X, tmp_addr[2]=0x%2.2X, tmp_addr[1]=0x%2.2X, tmp_addr[0]=0x%2.2X\n", + __func__, tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + + I("%s, total size=%d\n", __func__, total_size); + + if (total_size % max_bus_size == 0) + total_read_times = total_size / max_bus_size; + else + total_read_times = total_size / max_bus_size + 1; + + for (i = 0; i < (total_read_times); i++) { + if (total_size_temp >= max_bus_size) + g_core_fp.fp_register_read(tmp_addr, max_bus_size, &temp_info_data[i*max_bus_size], false); + total_size_temp = total_size_temp - max_bus_size; + else + g_core_fp.fp_register_read(tmp_addr, total_size_temp % max_bus_size, &temp_info_data[i*max_bus_size], false); + + address = ((i+1) * max_bus_size); + tmp_addr[1] = addr[1] + (uint8_t) ((address>>8) & 0x00FF); + tmp_addr[0] = addr[0] + (uint8_t) ((address) & 0x00FF); + + msleep(20); + } + I("%s, NOW addr tmp_addr[3]=0x%2.2X, tmp_addr[2]=0x%2.2X, tmp_addr[1]=0x%2.2X, tmp_addr[0]=0x%2.2X\n", + __func__, tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]); + /* + * for(i = 0;i 0 && i%16 == 15) + * printk("\n"); + * } + */ + + /* need modify */ + /* + * I("Now Write File start!\n"); + * vts_name = getname_kernel("/sdcard/dump_dsram.txt"); + * fn = file_open_name(vts_name, O_CREAT | O_WRONLY, 0); + * if (!IS_ERR (fn)) { + * I("%s create file and ready to write\n", __func__); + * fn->f_op->write(fn, temp_info_data, read_len*sizeof(uint8_t), &fn->f_pos); + * filp_close(fn, NULL); + * } + * I("Now Write File End!\n"); + */ + + I("%s, END\n", __func__); + + kfree(temp_info_data); +} + +void himax_mcu_firmware_read_0f(const struct firmware *fw_entry, int type) +{ + uint8_t tmp_addr[4]; + + I("%s, Entering\n", __func__); + if (type == 0) { + /* first 48K */ + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_sram_start_addr, 0, HX_48K_SZ); + g_core_fp.fp_read_all_sram(tmp_addr, 0xC000); + } else { + /* last 16k */ + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_cfg_info, 0xC000, 132); + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_fw_cfg, 0xC0FE, 512); + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_adc_cfg_1, 0xD000, 376); + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_adc_cfg_2, 0xD178, 376); + g_core_fp.fp_read_sram_0f(fw_entry, pzf_op->data_adc_cfg_3, 0xD000, 376); + g_core_fp.fp_read_all_sram(pzf_op->data_sram_clean, HX_32K_SZ); + } + I("%s, END\n", __func__); +} + +void himax_mcu_0f_operation_check(int type) +{ + int err = NO_ERR; + const struct firmware *fw_entry = NULL; + char *firmware_name = "himax.bin"; + + I("%s, Entering\n", __func__); + I("file name = %s\n", firmware_name); + + err = request_firmware(&fw_entry, firmware_name, private_ts->dev); + if (err < 0) { + E("%s, fail in line%d error code=%d\n", __func__, __LINE__, err); + return; + } + + I("first 4 bytes 0x%2X, 0x%2X, 0x%2X, 0x%2X !\n", fw_entry->data[0], fw_entry->data[1], fw_entry->data[2], fw_entry->data[3]); + I("next 4 bytes 0x%2X, 0x%2X, 0x%2X, 0x%2X !\n", fw_entry->data[4], fw_entry->data[5], fw_entry->data[6], fw_entry->data[7]); + I("and next 4 bytes 0x%2X, 0x%2X, 0x%2X, 0x%2X !\n", fw_entry->data[8], fw_entry->data[9], fw_entry->data[10], fw_entry->data[11]); + + g_core_fp.fp_firmware_read_0f(fw_entry, type); + + release_firmware(fw_entry); + I("%s, END\n", __func__); + +} +#endif + +#endif + +#ifdef CORE_INIT +/* init start */ +static void himax_mcu_fp_init(void) +{ +#ifdef CORE_IC + g_core_fp.fp_burst_enable = himax_mcu_burst_enable; + g_core_fp.fp_register_read = himax_mcu_register_read; + g_core_fp.fp_flash_write_burst = himax_mcu_flash_write_burst; + g_core_fp.fp_flash_write_burst_length = himax_mcu_flash_write_burst_length; + g_core_fp.fp_register_write = himax_mcu_register_write; + g_core_fp.fp_interface_on = himax_mcu_interface_on; + g_core_fp.fp_sense_on = himax_mcu_sense_on; + g_core_fp.fp_sense_off = himax_mcu_sense_off; + g_core_fp.fp_wait_wip = himax_mcu_wait_wip; + g_core_fp.fp_init_psl = himax_mcu_init_psl; + g_core_fp.fp_resume_ic_action = himax_mcu_resume_ic_action; + g_core_fp.fp_suspend_ic_action = himax_mcu_suspend_ic_action; + g_core_fp.fp_power_on_init = himax_mcu_power_on_init; +#endif +#ifdef CORE_FW + g_core_fp.fp_system_reset = himax_mcu_system_reset; + g_core_fp.fp_Calculate_CRC_with_AP = himax_mcu_Calculate_CRC_with_AP; + g_core_fp.fp_check_CRC = himax_mcu_check_CRC; + g_core_fp.fp_set_reload_cmd = himax_mcu_set_reload_cmd; + g_core_fp.fp_program_reload = himax_mcu_program_reload; + g_core_fp.fp_set_SMWP_enable = himax_mcu_set_SMWP_enable; + g_core_fp.fp_set_HSEN_enable = himax_mcu_set_HSEN_enable; + g_core_fp.fp_usb_detect_set = himax_mcu_usb_detect_set; + g_core_fp.fp_diag_register_set = himax_mcu_diag_register_set; + g_core_fp.fp_chip_self_test = himax_mcu_chip_self_test; + g_core_fp.fp_idle_mode = himax_mcu_idle_mode; + g_core_fp.fp_reload_disable = himax_mcu_reload_disable; + g_core_fp.fp_check_chip_version = himax_mcu_check_chip_version; + g_core_fp.fp_read_ic_trigger_type = himax_mcu_read_ic_trigger_type; + g_core_fp.fp_read_i2c_status = himax_mcu_read_i2c_status; + g_core_fp.fp_read_FW_ver = himax_mcu_read_FW_ver; + g_core_fp.fp_read_event_stack = himax_mcu_read_event_stack; + g_core_fp.fp_return_event_stack = himax_mcu_return_event_stack; + g_core_fp.fp_calculateChecksum = himax_mcu_calculateChecksum; + g_core_fp.fp_read_FW_status = himax_mcu_read_FW_status; + g_core_fp.fp_irq_switch = himax_mcu_irq_switch; + g_core_fp.fp_assign_sorting_mode = himax_mcu_assign_sorting_mode; + g_core_fp.fp_check_sorting_mode = himax_mcu_check_sorting_mode; + g_core_fp.fp_switch_mode = himax_mcu_switch_mode; + g_core_fp.fp_read_DD_status = himax_mcu_read_DD_status; +#endif +#ifdef CORE_FLASH + g_core_fp.fp_chip_erase = himax_mcu_chip_erase; + g_core_fp.fp_block_erase = himax_mcu_block_erase; + g_core_fp.fp_sector_erase = himax_mcu_sector_erase; + g_core_fp.fp_flash_programming = himax_mcu_flash_programming; + g_core_fp.fp_flash_page_write = himax_mcu_flash_page_write; + g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_32k = himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_32k; + g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_60k = himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_60k; + g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_64k = himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_64k; + g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_124k = himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_124k; + g_core_fp.fp_fts_ctpm_fw_upgrade_with_sys_fs_128k = himax_mcu_fts_ctpm_fw_upgrade_with_sys_fs_128k; + g_core_fp.fp_flash_dump_func = himax_mcu_flash_dump_func; + g_core_fp.fp_flash_lastdata_check = himax_mcu_flash_lastdata_check; +#endif +#ifdef CORE_SRAM + g_core_fp.fp_sram_write = himax_mcu_sram_write; + g_core_fp.fp_sram_verify = himax_mcu_sram_verify; + g_core_fp.fp_get_DSRAM_data = himax_mcu_get_DSRAM_data; +#endif +#ifdef CORE_DRIVER + g_core_fp.fp_chip_detect = himax_mcu_detect_ic; + g_core_fp.fp_chip_init = himax_mcu_init_ic; +#ifdef HX_RST_PIN_FUNC + g_core_fp.fp_pin_reset = himax_mcu_pin_reset; + g_core_fp.fp_ic_reset = himax_mcu_ic_reset; +#endif + g_core_fp.fp_touch_information = himax_mcu_touch_information; + g_core_fp.fp_reload_config = himax_mcu_reload_config; + g_core_fp.fp_get_touch_data_size = himax_mcu_get_touch_data_size; + g_core_fp.fp_hand_shaking = himax_mcu_hand_shaking; + g_core_fp.fp_determin_diag_rawdata = himax_mcu_determin_diag_rawdata; + g_core_fp.fp_determin_diag_storage = himax_mcu_determin_diag_storage; + g_core_fp.fp_cal_data_len = himax_mcu_cal_data_len; + g_core_fp.fp_diag_check_sum = himax_mcu_diag_check_sum; + g_core_fp.fp_diag_parse_raw_data = himax_mcu_diag_parse_raw_data; +#ifdef HX_ESD_RECOVERY + g_core_fp.fp_ic_esd_recovery = himax_mcu_ic_esd_recovery; + g_core_fp.fp_esd_ic_reset = himax_mcu_esd_ic_reset; +#endif +#if defined(HX_SMART_WAKEUP) || defined(HX_HIGH_SENSE) || defined(HX_USB_DETECT_GLOBAL) + g_core_fp.fp_resend_cmd_func = himax_mcu_resend_cmd_func; +#endif +#endif +#ifdef HX_ZERO_FLASH + g_core_fp.fp_sys_reset = himax_mcu_sys_reset; + g_core_fp.fp_clean_sram_0f = himax_mcu_clean_sram_0f; + g_core_fp.fp_write_sram_0f = himax_mcu_write_sram_0f; + g_core_fp.fp_firmware_update_0f = himax_mcu_firmware_update_0f; + g_core_fp.fp_0f_operation = himax_mcu_0f_operation; +#ifdef HX_0F_DEBUG + g_core_fp.fp_read_sram_0f = himax_mcu_read_sram_0f; + g_core_fp.fp_read_all_sram = himax_mcu_read_all_sram; + g_core_fp.fp_firmware_read_0f = himax_mcu_firmware_read_0f; + g_core_fp.fp_0f_operation_check = himax_mcu_0f_operation_check; +#endif +#endif +} + +void himax_mcu_in_cmd_struct_init(void) +{ + I("%s: Entering!\n", __func__); + g_core_cmd_op = kzalloc(sizeof(struct himax_core_command_operation), GFP_KERNEL); + if (!g_core_cmd_op) + return; + + g_core_cmd_op->ic_op = kzalloc(sizeof(struct ic_operation), GFP_KERNEL); + if (!g_core_cmd_op->ic_op) { + E("%s: allocate memory failed: g_core_cmd_op->ic_op!\n", __func__); + goto err_alloc_ic_op; + } + + g_core_cmd_op->fw_op = kzalloc(sizeof(struct fw_operation), GFP_KERNEL); + if (!g_core_cmd_op->fw_op) { + E("%s: allocate memory failed: g_core_cmd_op->fw_op!\n", __func__); + goto err_alloc_fw_op; + } + + g_core_cmd_op->flash_op = kzalloc(sizeof(struct flash_operation), GFP_KERNEL); + if (!g_core_cmd_op->flash_op) { + E("%s: allocate memory failed: g_core_cmd_op->flash_op!\n", __func__); + goto err_alloc_flash_op; + } + + g_core_cmd_op->sram_op = kzalloc(sizeof(struct sram_operation), GFP_KERNEL); + if (!g_core_cmd_op->sram_op) { + E("%s: allocate memory failed: g_core_cmd_op->sram_op!\n", __func__); + goto err_alloc_sram_op; + } + + g_core_cmd_op->driver_op = kzalloc(sizeof(struct driver_operation), GFP_KERNEL); + if (!g_core_cmd_op->driver_op) { + E("%s: allocate memory failed: g_core_cmd_op->driver_op!\n", __func__); + goto err_alloc_driver_op; + } + + pic_op = g_core_cmd_op->ic_op; + pfw_op = g_core_cmd_op->fw_op; + pflash_op = g_core_cmd_op->flash_op; + psram_op = g_core_cmd_op->sram_op; + pdriver_op = g_core_cmd_op->driver_op; +#ifdef HX_ZERO_FLASH + g_core_cmd_op->zf_op = kzalloc(sizeof(struct zf_operation), GFP_KERNEL); + if (!g_core_cmd_op->zf_op){ + E("%s: allocate memory failed: g_core_cmd_op->zf_op!\n", __func__); + goto err_alloc_zf_op; + } + + pzf_op = g_core_cmd_op->zf_op; +#endif + himax_mcu_fp_init(); + return; + +#ifdef HX_ZERO_FLASH +err_alloc_zf_op: + kfree(g_core_cmd_op->driver_op); +#endif +err_alloc_driver_op: + kfree(g_core_cmd_op->sram_op); +err_alloc_sram_op: + kfree(g_core_cmd_op->flash_op); +err_alloc_flash_op: + kfree(g_core_cmd_op->fw_op); +err_alloc_fw_op: + kfree(g_core_cmd_op->ic_op); +err_alloc_ic_op: + kfree(g_core_cmd_op); +} + +/* + *static void himax_mcu_in_cmd_struct_free(void) + *{ + * pic_op = NULL; + * pfw_op = NULL; + * pflash_op = NULL; + * psram_op = NULL; + * pdriver_op = NULL; + * kfree(g_core_cmd_op); + * kfree(g_core_cmd_op->ic_op); + * kfree(g_core_cmd_op->flash_op); + * kfree(g_core_cmd_op->sram_op); + * kfree(g_core_cmd_op->driver_op); + *} + */ + +void himax_in_parse_assign_cmd(uint32_t addr, uint8_t *cmd, int len) +{ + /* I("%s: Entering!\n", __func__); */ + switch (len) { + case 1: + cmd[0] = addr; + /* I("%s: cmd[0] = 0x%02X\n", __func__, cmd[0]); */ + break; + + case 2: + cmd[0] = addr % 0x100; + cmd[1] = (addr >> 8) % 0x100; + /* I("%s: cmd[0] = 0x%02X,cmd[1] = 0x%02X\n", __func__, cmd[0], cmd[1]); */ + break; + + case 4: + cmd[0] = addr % 0x100; + cmd[1] = (addr >> 8) % 0x100; + cmd[2] = (addr >> 16) % 0x100; + cmd[3] = addr / 0x1000000; + /* + * I("%s: cmd[0] = 0x%02X,cmd[1] = 0x%02X,cmd[2] = 0x%02X,cmd[3] = 0x%02X\n", + * __func__, cmd[0], cmd[1], cmd[2], cmd[3]); + */ + break; + + default: + E("%s: input length fault,len = %d!", __func__, len); + } +} + +void himax_mcu_in_cmd_init(void) +{ + I("%s: Entering!\n", __func__); +#ifdef CORE_IC + himax_in_parse_assign_cmd(ic_adr_ahb_addr_byte_0, pic_op->addr_ahb_addr_byte_0, sizeof(pic_op->addr_ahb_addr_byte_0)); + himax_in_parse_assign_cmd(ic_adr_ahb_rdata_byte_0, pic_op->addr_ahb_rdata_byte_0, sizeof(pic_op->addr_ahb_rdata_byte_0)); + himax_in_parse_assign_cmd(ic_adr_ahb_access_direction, pic_op->addr_ahb_access_direction, sizeof(pic_op->addr_ahb_access_direction)); + himax_in_parse_assign_cmd(ic_adr_conti, pic_op->addr_conti, sizeof(pic_op->addr_conti)); + himax_in_parse_assign_cmd(ic_adr_incr4, pic_op->addr_incr4, sizeof(pic_op->addr_incr4)); + himax_in_parse_assign_cmd(ic_adr_i2c_psw_lb, pic_op->adr_i2c_psw_lb, sizeof(pic_op->adr_i2c_psw_lb)); + himax_in_parse_assign_cmd(ic_adr_i2c_psw_ub, pic_op->adr_i2c_psw_ub, sizeof(pic_op->adr_i2c_psw_ub)); + himax_in_parse_assign_cmd(ic_cmd_ahb_access_direction_read, pic_op->data_ahb_access_direction_read, sizeof(pic_op->data_ahb_access_direction_read)); + himax_in_parse_assign_cmd(ic_cmd_conti, pic_op->data_conti, sizeof(pic_op->data_conti)); + himax_in_parse_assign_cmd(ic_cmd_incr4, pic_op->data_incr4, sizeof(pic_op->data_incr4)); + himax_in_parse_assign_cmd(ic_cmd_i2c_psw_lb, pic_op->data_i2c_psw_lb, sizeof(pic_op->data_i2c_psw_lb)); + himax_in_parse_assign_cmd(ic_cmd_i2c_psw_ub, pic_op->data_i2c_psw_ub, sizeof(pic_op->data_i2c_psw_ub)); + himax_in_parse_assign_cmd(ic_adr_tcon_on_rst, pic_op->addr_tcon_on_rst, sizeof(pic_op->addr_tcon_on_rst)); + himax_in_parse_assign_cmd(ic_addr_adc_on_rst, pic_op->addr_adc_on_rst, sizeof(pic_op->addr_adc_on_rst)); + himax_in_parse_assign_cmd(ic_adr_psl, pic_op->addr_psl, sizeof(pic_op->addr_psl)); + himax_in_parse_assign_cmd(ic_adr_cs_central_state, pic_op->addr_cs_central_state, sizeof(pic_op->addr_cs_central_state)); + himax_in_parse_assign_cmd(ic_cmd_rst, pic_op->data_rst, sizeof(pic_op->data_rst)); +#endif +#ifdef CORE_FW + himax_in_parse_assign_cmd(fw_addr_system_reset, pfw_op->addr_system_reset, sizeof(pfw_op->addr_system_reset)); + himax_in_parse_assign_cmd(fw_addr_safe_mode_release_pw, pfw_op->addr_safe_mode_release_pw, sizeof(pfw_op->addr_safe_mode_release_pw)); + himax_in_parse_assign_cmd(fw_addr_ctrl_fw, pfw_op->addr_ctrl_fw_isr, sizeof(pfw_op->addr_ctrl_fw_isr)); + himax_in_parse_assign_cmd(fw_addr_flag_reset_event, pfw_op->addr_flag_reset_event, sizeof(pfw_op->addr_flag_reset_event)); + himax_in_parse_assign_cmd(fw_addr_hsen_enable, pfw_op->addr_hsen_enable, sizeof(pfw_op->addr_hsen_enable)); + himax_in_parse_assign_cmd(fw_addr_smwp_enable, pfw_op->addr_smwp_enable, sizeof(pfw_op->addr_smwp_enable)); + himax_in_parse_assign_cmd(fw_addr_program_reload_from, pfw_op->addr_program_reload_from, sizeof(pfw_op->addr_program_reload_from)); + himax_in_parse_assign_cmd(fw_addr_program_reload_to, pfw_op->addr_program_reload_to, sizeof(pfw_op->addr_program_reload_to)); + himax_in_parse_assign_cmd(fw_addr_program_reload_page_write, pfw_op->addr_program_reload_page_write, sizeof(pfw_op->addr_program_reload_page_write)); + himax_in_parse_assign_cmd(fw_addr_raw_out_sel, pfw_op->addr_raw_out_sel, sizeof(pfw_op->addr_raw_out_sel)); + himax_in_parse_assign_cmd(fw_addr_reload_status, pfw_op->addr_reload_status, sizeof(pfw_op->addr_reload_status)); + himax_in_parse_assign_cmd(fw_addr_reload_crc32_result, pfw_op->addr_reload_crc32_result, sizeof(pfw_op->addr_reload_crc32_result)); + himax_in_parse_assign_cmd(fw_addr_reload_addr_from, pfw_op->addr_reload_addr_from, sizeof(pfw_op->addr_reload_addr_from)); + himax_in_parse_assign_cmd(fw_addr_reload_addr_cmd_beat, pfw_op->addr_reload_addr_cmd_beat, sizeof(pfw_op->addr_reload_addr_cmd_beat)); + himax_in_parse_assign_cmd(fw_addr_selftest_addr_en, pfw_op->addr_selftest_addr_en, sizeof(pfw_op->addr_selftest_addr_en)); + himax_in_parse_assign_cmd(fw_addr_criteria_addr, pfw_op->addr_criteria_addr, sizeof(pfw_op->addr_criteria_addr)); + himax_in_parse_assign_cmd(fw_addr_set_frame_addr, pfw_op->addr_set_frame_addr, sizeof(pfw_op->addr_set_frame_addr)); + himax_in_parse_assign_cmd(fw_addr_selftest_result_addr, pfw_op->addr_selftest_result_addr, sizeof(pfw_op->addr_selftest_result_addr)); + himax_in_parse_assign_cmd(fw_addr_sorting_mode_en, pfw_op->addr_sorting_mode_en, sizeof(pfw_op->addr_sorting_mode_en)); + himax_in_parse_assign_cmd(fw_addr_fw_mode_status, pfw_op->addr_fw_mode_status, sizeof(pfw_op->addr_fw_mode_status)); + himax_in_parse_assign_cmd(fw_addr_icid_addr, pfw_op->addr_icid_addr, sizeof(pfw_op->addr_icid_addr)); + himax_in_parse_assign_cmd(fw_addr_trigger_addr, pfw_op->addr_trigger_addr, sizeof(pfw_op->addr_trigger_addr)); + himax_in_parse_assign_cmd(fw_addr_fw_ver_addr, pfw_op->addr_fw_ver_addr, sizeof(pfw_op->addr_fw_ver_addr)); + himax_in_parse_assign_cmd(fw_addr_fw_cfg_addr, pfw_op->addr_fw_cfg_addr, sizeof(pfw_op->addr_fw_cfg_addr)); + himax_in_parse_assign_cmd(fw_addr_fw_vendor_addr, pfw_op->addr_fw_vendor_addr, sizeof(pfw_op->addr_fw_vendor_addr)); + himax_in_parse_assign_cmd(fw_addr_fw_state_addr, pfw_op->addr_fw_state_addr, sizeof(pfw_op->addr_fw_state_addr)); + himax_in_parse_assign_cmd(fw_addr_fw_dbg_msg_addr, pfw_op->addr_fw_dbg_msg_addr, sizeof(pfw_op->addr_fw_dbg_msg_addr)); + himax_in_parse_assign_cmd(fw_addr_chk_fw_status, pfw_op->addr_chk_fw_status, sizeof(pfw_op->addr_chk_fw_status)); + himax_in_parse_assign_cmd(fw_addr_dd_handshak_addr, pfw_op->addr_dd_handshak_addr, sizeof(pfw_op->addr_dd_handshak_addr)); + himax_in_parse_assign_cmd(fw_addr_dd_data_addr, pfw_op->addr_dd_data_addr, sizeof(pfw_op->addr_dd_data_addr)); + himax_in_parse_assign_cmd(fw_data_system_reset, pfw_op->data_system_reset, sizeof(pfw_op->data_system_reset)); + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_active, pfw_op->data_safe_mode_release_pw_active, sizeof(pfw_op->data_safe_mode_release_pw_active)); + himax_in_parse_assign_cmd(fw_data_clear, pfw_op->data_clear, sizeof(pfw_op->data_clear)); + himax_in_parse_assign_cmd(fw_data_safe_mode_release_pw_reset, pfw_op->data_safe_mode_release_pw_reset, sizeof(pfw_op->data_safe_mode_release_pw_reset)); + himax_in_parse_assign_cmd(fw_data_program_reload_start, pfw_op->data_program_reload_start, sizeof(pfw_op->data_program_reload_start)); + himax_in_parse_assign_cmd(fw_data_program_reload_compare, pfw_op->data_program_reload_compare, sizeof(pfw_op->data_program_reload_compare)); + himax_in_parse_assign_cmd(fw_data_program_reload_break, pfw_op->data_program_reload_break, sizeof(pfw_op->data_program_reload_break)); + himax_in_parse_assign_cmd(fw_data_selftest_request, pfw_op->data_selftest_request, sizeof(pfw_op->data_selftest_request)); + himax_in_parse_assign_cmd(fw_data_criteria_aa_top, pfw_op->data_criteria_aa_top, sizeof(pfw_op->data_criteria_aa_top)); + himax_in_parse_assign_cmd(fw_data_criteria_aa_bot, pfw_op->data_criteria_aa_bot, sizeof(pfw_op->data_criteria_aa_bot)); + himax_in_parse_assign_cmd(fw_data_criteria_key_top, pfw_op->data_criteria_key_top, sizeof(pfw_op->data_criteria_key_top)); + himax_in_parse_assign_cmd(fw_data_criteria_key_bot, pfw_op->data_criteria_key_bot, sizeof(pfw_op->data_criteria_key_bot)); + himax_in_parse_assign_cmd(fw_data_criteria_avg_top, pfw_op->data_criteria_avg_top, sizeof(pfw_op->data_criteria_avg_top)); + himax_in_parse_assign_cmd(fw_data_criteria_avg_bot, pfw_op->data_criteria_avg_bot, sizeof(pfw_op->data_criteria_avg_bot)); + himax_in_parse_assign_cmd(fw_data_set_frame, pfw_op->data_set_frame, sizeof(pfw_op->data_set_frame)); + himax_in_parse_assign_cmd(fw_data_selftest_ack_hb, pfw_op->data_selftest_ack_hb, sizeof(pfw_op->data_selftest_ack_hb)); + himax_in_parse_assign_cmd(fw_data_selftest_ack_lb, pfw_op->data_selftest_ack_lb, sizeof(pfw_op->data_selftest_ack_lb)); + himax_in_parse_assign_cmd(fw_data_selftest_pass, pfw_op->data_selftest_pass, sizeof(pfw_op->data_selftest_pass)); + himax_in_parse_assign_cmd(fw_data_normal_cmd, pfw_op->data_normal_cmd, sizeof(pfw_op->data_normal_cmd)); + himax_in_parse_assign_cmd(fw_data_normal_status, pfw_op->data_normal_status, sizeof(pfw_op->data_normal_status)); + himax_in_parse_assign_cmd(fw_data_sorting_cmd, pfw_op->data_sorting_cmd, sizeof(pfw_op->data_sorting_cmd)); + himax_in_parse_assign_cmd(fw_data_sorting_status, pfw_op->data_sorting_status, sizeof(pfw_op->data_sorting_status)); + himax_in_parse_assign_cmd(fw_data_dd_request, pfw_op->data_dd_request, sizeof(pfw_op->data_dd_request)); + himax_in_parse_assign_cmd(fw_data_dd_ack, pfw_op->data_dd_ack, sizeof(pfw_op->data_dd_ack)); + himax_in_parse_assign_cmd(fw_data_idle_dis_pwd, pfw_op->data_idle_dis_pwd, sizeof(pfw_op->data_idle_dis_pwd)); + himax_in_parse_assign_cmd(fw_data_idle_en_pwd, pfw_op->data_idle_en_pwd, sizeof(pfw_op->data_idle_en_pwd)); + himax_in_parse_assign_cmd(fw_data_rawdata_ready_hb, pfw_op->data_rawdata_ready_hb, sizeof(pfw_op->data_rawdata_ready_hb)); + himax_in_parse_assign_cmd(fw_data_rawdata_ready_lb, pfw_op->data_rawdata_ready_lb, sizeof(pfw_op->data_rawdata_ready_lb)); + himax_in_parse_assign_cmd(fw_addr_ahb_addr, pfw_op->addr_ahb_addr, sizeof(pfw_op->addr_ahb_addr)); + himax_in_parse_assign_cmd(fw_data_ahb_dis, pfw_op->data_ahb_dis, sizeof(pfw_op->data_ahb_dis)); + himax_in_parse_assign_cmd(fw_data_ahb_en, pfw_op->data_ahb_en, sizeof(pfw_op->data_ahb_en)); + himax_in_parse_assign_cmd(fw_addr_event_addr, pfw_op->addr_event_addr, sizeof(pfw_op->addr_event_addr)); + himax_in_parse_assign_cmd(fw_usb_detect_addr, pfw_op->addr_usb_detect, sizeof(pfw_op->addr_usb_detect)); +#endif +#ifdef CORE_FLASH + himax_in_parse_assign_cmd(flash_addr_spi200_trans_fmt, pflash_op->addr_spi200_trans_fmt, sizeof(pflash_op->addr_spi200_trans_fmt)); + himax_in_parse_assign_cmd(flash_addr_spi200_trans_ctrl, pflash_op->addr_spi200_trans_ctrl, sizeof(pflash_op->addr_spi200_trans_ctrl)); + himax_in_parse_assign_cmd(flash_addr_spi200_cmd, pflash_op->addr_spi200_cmd, sizeof(pflash_op->addr_spi200_cmd)); + himax_in_parse_assign_cmd(flash_addr_spi200_addr, pflash_op->addr_spi200_addr, sizeof(pflash_op->addr_spi200_addr)); + himax_in_parse_assign_cmd(flash_addr_spi200_data, pflash_op->addr_spi200_data, sizeof(pflash_op->addr_spi200_data)); + himax_in_parse_assign_cmd(flash_addr_spi200_bt_num, pflash_op->addr_spi200_bt_num, sizeof(pflash_op->addr_spi200_bt_num)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_fmt, pflash_op->data_spi200_trans_fmt, sizeof(pflash_op->data_spi200_trans_fmt)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_ctrl_1, pflash_op->data_spi200_trans_ctrl_1, sizeof(pflash_op->data_spi200_trans_ctrl_1)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_ctrl_2, pflash_op->data_spi200_trans_ctrl_2, sizeof(pflash_op->data_spi200_trans_ctrl_2)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_ctrl_3, pflash_op->data_spi200_trans_ctrl_3, sizeof(pflash_op->data_spi200_trans_ctrl_3)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_ctrl_4, pflash_op->data_spi200_trans_ctrl_4, sizeof(pflash_op->data_spi200_trans_ctrl_4)); + himax_in_parse_assign_cmd(flash_data_spi200_trans_ctrl_5, pflash_op->data_spi200_trans_ctrl_5, sizeof(pflash_op->data_spi200_trans_ctrl_5)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_1, pflash_op->data_spi200_cmd_1, sizeof(pflash_op->data_spi200_cmd_1)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_2, pflash_op->data_spi200_cmd_2, sizeof(pflash_op->data_spi200_cmd_2)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_3, pflash_op->data_spi200_cmd_3, sizeof(pflash_op->data_spi200_cmd_3)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_4, pflash_op->data_spi200_cmd_4, sizeof(pflash_op->data_spi200_cmd_4)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_5, pflash_op->data_spi200_cmd_5, sizeof(pflash_op->data_spi200_cmd_5)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_6, pflash_op->data_spi200_cmd_6, sizeof(pflash_op->data_spi200_cmd_6)); + himax_in_parse_assign_cmd(flash_data_spi200_cmd_7, pflash_op->data_spi200_cmd_7, sizeof(pflash_op->data_spi200_cmd_7)); + himax_in_parse_assign_cmd(flash_data_spi200_addr, pflash_op->data_spi200_addr, sizeof(pflash_op->data_spi200_addr)); +#endif +#ifdef CORE_SRAM + /* sram start */ + himax_in_parse_assign_cmd(sram_adr_mkey, psram_op->addr_mkey, sizeof(psram_op->addr_mkey)); + himax_in_parse_assign_cmd(sram_adr_rawdata_addr, psram_op->addr_rawdata_addr, sizeof(psram_op->addr_rawdata_addr)); + himax_in_parse_assign_cmd(sram_adr_rawdata_end, psram_op->addr_rawdata_end, sizeof(psram_op->addr_rawdata_end)); + himax_in_parse_assign_cmd(sram_cmd_conti, psram_op->data_conti, sizeof(psram_op->data_conti)); + himax_in_parse_assign_cmd(sram_cmd_fin, psram_op->data_fin, sizeof(psram_op->data_fin)); + himax_in_parse_assign_cmd(sram_passwrd_start, psram_op->passwrd_start, sizeof(psram_op->passwrd_start)); + himax_in_parse_assign_cmd(sram_passwrd_end, psram_op->passwrd_end, sizeof(psram_op->passwrd_end)); + /* sram end */ +#endif +#ifdef CORE_DRIVER + himax_in_parse_assign_cmd(driver_addr_fw_define_flash_reload, pdriver_op->addr_fw_define_flash_reload, sizeof(pdriver_op->addr_fw_define_flash_reload)); + himax_in_parse_assign_cmd(driver_addr_fw_define_2nd_flash_reload, pdriver_op->addr_fw_define_2nd_flash_reload, sizeof(pdriver_op->addr_fw_define_2nd_flash_reload)); + himax_in_parse_assign_cmd(driver_addr_fw_define_int_is_edge, pdriver_op->addr_fw_define_int_is_edge, sizeof(pdriver_op->addr_fw_define_int_is_edge)); + himax_in_parse_assign_cmd(driver_addr_fw_define_rxnum_txnum_maxpt, pdriver_op->addr_fw_define_rxnum_txnum_maxpt, sizeof(pdriver_op->addr_fw_define_rxnum_txnum_maxpt)); + himax_in_parse_assign_cmd(driver_addr_fw_define_xy_res_enable, pdriver_op->addr_fw_define_xy_res_enable, sizeof(pdriver_op->addr_fw_define_xy_res_enable)); + himax_in_parse_assign_cmd(driver_addr_fw_define_x_y_res, pdriver_op->addr_fw_define_x_y_res, sizeof(pdriver_op->addr_fw_define_x_y_res)); + himax_in_parse_assign_cmd(driver_data_fw_define_flash_reload_dis, pdriver_op->data_fw_define_flash_reload_dis, sizeof(pdriver_op->data_fw_define_flash_reload_dis)); + himax_in_parse_assign_cmd(driver_data_fw_define_flash_reload_en, pdriver_op->data_fw_define_flash_reload_en, sizeof(pdriver_op->data_fw_define_flash_reload_en)); + himax_in_parse_assign_cmd(driver_data_fw_define_rxnum_txnum_maxpt_sorting, pdriver_op->data_fw_define_rxnum_txnum_maxpt_sorting, sizeof(pdriver_op->data_fw_define_rxnum_txnum_maxpt_sorting)); + himax_in_parse_assign_cmd(driver_data_fw_define_rxnum_txnum_maxpt_normal, pdriver_op->data_fw_define_rxnum_txnum_maxpt_normal, sizeof(pdriver_op->data_fw_define_rxnum_txnum_maxpt_normal)); +#endif +#ifdef HX_ZERO_FLASH + himax_in_parse_assign_cmd(zf_addr_dis_flash_reload, pzf_op->addr_dis_flash_reload, sizeof(pzf_op->addr_dis_flash_reload)); + himax_in_parse_assign_cmd(zf_data_dis_flash_reload, pzf_op->data_dis_flash_reload, sizeof(pzf_op->data_dis_flash_reload)); + himax_in_parse_assign_cmd(zf_addr_system_reset, pzf_op->addr_system_reset, sizeof(pzf_op->addr_system_reset)); + himax_in_parse_assign_cmd(zf_data_system_reset, pzf_op->data_system_reset, sizeof(pzf_op->data_system_reset)); + himax_in_parse_assign_cmd(zf_data_sram_start_addr, pzf_op->data_sram_start_addr, sizeof(pzf_op->data_sram_start_addr)); + himax_in_parse_assign_cmd(zf_data_sram_clean, pzf_op->data_sram_clean, sizeof(pzf_op->data_sram_clean)); + himax_in_parse_assign_cmd(zf_data_cfg_info, pzf_op->data_cfg_info, sizeof(pzf_op->data_cfg_info)); + himax_in_parse_assign_cmd(zf_data_fw_cfg, pzf_op->data_fw_cfg, sizeof(pzf_op->data_fw_cfg)); + himax_in_parse_assign_cmd(zf_data_adc_cfg_1, pzf_op->data_adc_cfg_1, sizeof(pzf_op->data_adc_cfg_1)); + himax_in_parse_assign_cmd(zf_data_adc_cfg_2, pzf_op->data_adc_cfg_2, sizeof(pzf_op->data_adc_cfg_2)); + himax_in_parse_assign_cmd(zf_data_adc_cfg_3, pzf_op->data_adc_cfg_3, sizeof(pzf_op->data_adc_cfg_3)); +#endif +} + +/* init end */ +#endif diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.c b/drivers/input/touchscreen/hxchipset/himax_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..95564672112b1c4c015a33e774dfda3908dd01ec --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_platform.c @@ -0,0 +1,802 @@ +/* + * Himax Android Driver Sample Code for QCT platform + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "himax_platform.h" +#include "himax_common.h" + +int i2c_error_count; +int irq_enable_count; + +int himax_dev_set(struct himax_ts_data *ts) +{ + int ret = 0; + + ts->input_dev = input_allocate_device(); + if (ts->input_dev == NULL) { + ret = -ENOMEM; + E("%s: Failed to allocate input device\n", __func__); + return ret; + } + + ts->input_dev->name = "himax-touchscreen"; + return ret; +} + +int himax_input_register_device(struct input_dev *input_dev) +{ + return input_register_device(input_dev); +} + +#if defined(HX_PLATFOME_DEFINE_KEY) +void himax_platform_key(void) +{ + I("Nothing to be done! Plz cancel it!\n"); +} +#endif + +void himax_vk_parser(struct device_node *dt, + struct himax_i2c_platform_data *pdata) +{ + u32 data = 0; + uint8_t cnt = 0, i = 0; + uint32_t coords[4] = {0}; + struct device_node *node, *pp = NULL; + struct himax_virtual_key *vk; + + node = of_parse_phandle(dt, "virtualkey", 0); + if (node == NULL) { + I(" DT-No vk info in DT"); + return; + } + + cnt = of_get_child_count(node); + if (!cnt) { + of_node_put(node); + return; + } + + vk = kcalloc(cnt, sizeof(*vk), GFP_KERNEL); + if (!vk) { + E(" %s: allocate memory failed!", __func__); + of_node_put(node); + return; + } + + for_each_child_of_node(node, pp) { + if (of_property_read_u32(pp, "idx", &data) != 0) + continue; + vk[i].index = data; + + if (of_property_read_u32_array(pp, "range", coords, 4) != 0) + continue; + vk[i].x_range_min = coords[0], vk[i].x_range_max = coords[1]; + vk[i].y_range_min = coords[2], vk[i].y_range_max = coords[3]; + + i++; + } + + pdata->virtual_key = vk; + of_node_put(node); + + for (i = 0; i < cnt; i++) + I(" vk[%d] idx:%d x_min:%d, y_max:%d", i, pdata->virtual_key[i].index, + pdata->virtual_key[i].x_range_min, pdata->virtual_key[i].y_range_max); +} + +int himax_parse_dt(struct himax_ts_data *ts, struct himax_i2c_platform_data *pdata) +{ + int rc, coords_size = 0; + uint32_t coords[4] = {0}; + struct property *prop; + struct device_node *dt = private_ts->client->dev.of_node; + u32 data = 0; + + prop = of_find_property(dt, "himax,panel-coords", NULL); + if (prop) { + coords_size = prop->length / sizeof(u32); + + if (coords_size != 4) + D(" %s:Invalid panel coords size %d", __func__, coords_size); + } + + if (of_property_read_u32_array(dt, "himax,panel-coords", coords, coords_size) == 0) { + pdata->abs_x_min = coords[0], pdata->abs_x_max = coords[1]; + pdata->abs_y_min = coords[2], pdata->abs_y_max = coords[3]; + I(" DT-%s:panel-coords = %d, %d, %d, %d\n", __func__, pdata->abs_x_min, + pdata->abs_x_max, pdata->abs_y_min, pdata->abs_y_max); + } + + prop = of_find_property(dt, "himax,display-coords", NULL); + + if (prop) { + coords_size = prop->length / sizeof(u32); + + if (coords_size != 4) + D(" %s:Invalid display coords size %d", __func__, coords_size); + } + + rc = of_property_read_u32_array(dt, "himax,display-coords", coords, coords_size); + + if (rc && (rc != -EINVAL)) { + D(" %s:Fail to read display-coords %d\n", __func__, rc); + return rc; + } + + pdata->screenWidth = coords[1]; + pdata->screenHeight = coords[3]; + I(" DT-%s:display-coords = (%d, %d)", __func__, pdata->screenWidth, + pdata->screenHeight); + pdata->gpio_irq = of_get_named_gpio(dt, "himax,irq-gpio", 0); + + if (!gpio_is_valid(pdata->gpio_irq)) + I(" DT:gpio_irq value is not valid\n"); + + pdata->gpio_reset = of_get_named_gpio(dt, "himax,rst-gpio", 0); + + if (!gpio_is_valid(pdata->gpio_reset)) + I(" DT:gpio_rst value is not valid\n"); + + pdata->gpio_3v3_en = of_get_named_gpio(dt, "himax,3v3-gpio", 0); + + if (!gpio_is_valid(pdata->gpio_3v3_en)) + I(" DT:gpio_3v3_en value is not valid\n"); + + I(" DT:gpio_irq=%d, gpio_rst=%d, gpio_3v3_en=%d", pdata->gpio_irq, pdata->gpio_reset, pdata->gpio_3v3_en); + + if (of_property_read_u32(dt, "himax,report_type", &data) == 0) { + pdata->protocol_type = data; + I(" DT:protocol_type=%d", pdata->protocol_type); + } + + himax_vk_parser(dt, pdata); + return 0; +} + +int himax_bus_read(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRetry) +{ + int retry; + struct i2c_client *client = private_ts->client; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = 1, + .buf = &command, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = data, + } + }; + + mutex_lock(&private_ts->rw_lock); + + for (retry = 0; retry < toRetry; retry++) { + if (i2c_transfer(client->adapter, msg, 2) == 2) + break; + + msleep(20); + } + + if (retry == toRetry) { + E("%s: i2c_read_block retry over %d\n", __func__, toRetry); + i2c_error_count = toRetry; + mutex_unlock(&private_ts->rw_lock); + return -EIO; + } + + mutex_unlock(&private_ts->rw_lock); + return 0; +} + +int himax_bus_write(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRetry) +{ + int retry; + uint8_t buf[length + 1]; + struct i2c_client *client = private_ts->client; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length + 1, + .buf = buf, + } + }; + + mutex_lock(&private_ts->rw_lock); + buf[0] = command; + memcpy(buf + 1, data, length); + + for (retry = 0; retry < toRetry; retry++) { + if (i2c_transfer(client->adapter, msg, 1) == 1) + break; + + msleep(20); + } + + if (retry == toRetry) { + E("%s: i2c_write_block retry over %d\n", __func__, toRetry); + i2c_error_count = toRetry; + mutex_unlock(&private_ts->rw_lock); + return -EIO; + } + + mutex_unlock(&private_ts->rw_lock); + return 0; +} + +int himax_bus_write_command(uint8_t command, uint8_t toRetry) +{ + return himax_bus_write(command, NULL, 0, toRetry); +} + +int himax_bus_master_write(uint8_t *data, uint32_t length, uint8_t toRetry) +{ + int retry; + uint8_t buf[length]; + struct i2c_client *client = private_ts->client; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = length, + .buf = buf, + } + }; + + mutex_lock(&private_ts->rw_lock); + memcpy(buf, data, length); + + for (retry = 0; retry < toRetry; retry++) { + if (i2c_transfer(client->adapter, msg, 1) == 1) + break; + + msleep(20); + } + + if (retry == toRetry) { + E("%s: i2c_write_block retry over %d\n", __func__, toRetry); + i2c_error_count = toRetry; + mutex_unlock(&private_ts->rw_lock); + return -EIO; + } + + mutex_unlock(&private_ts->rw_lock); + return 0; +} + +void himax_int_enable(int enable) +{ + int irqnum = 0; + + irqnum = private_ts->client->irq; + + if (enable == 1 && irq_enable_count == 0) { + enable_irq(irqnum); + irq_enable_count++; + private_ts->irq_enabled = 1; + } else if (enable == 0 && irq_enable_count == 1) { + disable_irq_nosync(irqnum); + irq_enable_count--; + private_ts->irq_enabled = 0; + } + + I("irq_enable_count = %d", irq_enable_count); +} + +#ifdef HX_RST_PIN_FUNC +void himax_rst_gpio_set(int pinnum, uint8_t value) +{ + gpio_direction_output(pinnum, value); +} +#endif + +uint8_t himax_int_gpio_read(int pinnum) +{ + return gpio_get_value(pinnum); +} + +#if defined(CONFIG_HMX_DB) +static int himax_regulator_configure(struct himax_i2c_platform_data *pdata) +{ + int retval; + struct i2c_client *client = private_ts->client; + + pdata->vcc_dig = regulator_get(&client->dev, "vdd"); + if (IS_ERR(pdata->vcc_dig)) { + E("%s: Failed to get regulator vdd\n", __func__); + retval = PTR_ERR(pdata->vcc_dig); + return retval; + } + + pdata->vcc_ana = regulator_get(&client->dev, "avdd"); + + if (IS_ERR(pdata->vcc_ana)) { + E("%s: Failed to get regulator avdd\n", __func__); + retval = PTR_ERR(pdata->vcc_ana); + regulator_put(pdata->vcc_ana); + return retval; + } + + return 0; +}; + +static int himax_power_on(struct himax_i2c_platform_data *pdata, bool on) +{ + int retval; + + if (on) { + retval = regulator_enable(pdata->vcc_dig); + + if (retval) { + E("%s: Failed to enable regulator vdd\n", __func__); + return retval; + } + + msleep(100); + retval = regulator_enable(pdata->vcc_ana); + + if (retval) { + E("%s: Failed to enable regulator avdd\n", __func__); + regulator_disable(pdata->vcc_dig); + return retval; + } + } else { + regulator_disable(pdata->vcc_dig); + regulator_disable(pdata->vcc_ana); + } + + return 0; +} + +int himax_gpio_power_config(struct himax_i2c_platform_data *pdata) +{ + int error; + struct i2c_client *client = private_ts->client; + + error = himax_regulator_configure(pdata); + if (error) { + E("Failed to initialize hardware\n"); + goto err_regulator_not_on; + } + +#ifdef HX_RST_PIN_FUNC + + if (gpio_is_valid(pdata->gpio_reset)) { + /* configure touchscreen reset out gpio */ + error = gpio_request(pdata->gpio_reset, "hmx_reset_gpio"); + + if (error) { + E("unable to request gpio [%d]\n", pdata->gpio_reset); + goto err_regulator_on; + } + + error = gpio_direction_output(pdata->gpio_reset, 0); + + if (error) { + E("unable to set direction for gpio [%d]\n", pdata->gpio_reset); + goto err_gpio_reset_req; + } + } + +#endif + error = himax_power_on(pdata, true); + + if (error) { + E("Failed to power on hardware\n"); + goto err_gpio_reset_req; + } + + if (gpio_is_valid(pdata->gpio_irq)) { + /* configure touchscreen irq gpio */ + error = gpio_request(pdata->gpio_irq, "hmx_gpio_irq"); + + if (error) { + E("unable to request gpio [%d]\n", pdata->gpio_irq); + goto err_power_on; + } + + error = gpio_direction_input(pdata->gpio_irq); + + if (error) { + E("unable to set direction for gpio [%d]\n", pdata->gpio_irq); + goto err_gpio_irq_req; + } + + client->irq = gpio_to_irq(pdata->gpio_irq); + } else { + E("irq gpio not provided\n"); + goto err_power_on; + } + + msleep(20); +#ifdef HX_RST_PIN_FUNC + + if (gpio_is_valid(pdata->gpio_reset)) { + error = gpio_direction_output(pdata->gpio_reset, 1); + + if (error) { + E("unable to set direction for gpio [%d]\n", + pdata->gpio_reset); + goto err_gpio_irq_req; + } + } + +#endif + return 0; +err_gpio_irq_req: + + if (gpio_is_valid(pdata->gpio_irq)) + gpio_free(pdata->gpio_irq); + +err_power_on: + himax_power_on(pdata, false); +err_gpio_reset_req: +#ifdef HX_RST_PIN_FUNC + + if (gpio_is_valid(pdata->gpio_reset)) + gpio_free(pdata->gpio_reset); + +err_regulator_on: +#endif +err_regulator_not_on: + return error; +} + +#else +int himax_gpio_power_config(struct himax_i2c_platform_data *pdata) +{ + int error = 0; + struct i2c_client *client = private_ts->client; +#ifdef HX_RST_PIN_FUNC + + if (pdata->gpio_reset >= 0) { + error = gpio_request(pdata->gpio_reset, "himax-reset"); + + if (error < 0) { + E("%s: request reset pin failed\n", __func__); + return error; + } + + error = gpio_direction_output(pdata->gpio_reset, 0); + + if (error) { + E("unable to set direction for gpio [%d]\n", pdata->gpio_reset); + return error; + } + } + +#endif + + if (pdata->gpio_3v3_en >= 0) { + error = gpio_request(pdata->gpio_3v3_en, "himax-3v3_en"); + + if (error < 0) { + E("%s: request 3v3_en pin failed\n", __func__); + return error; + } + + gpio_direction_output(pdata->gpio_3v3_en, 1); + I("3v3_en pin =%d\n", gpio_get_value(pdata->gpio_3v3_en)); + } + + if (gpio_is_valid(pdata->gpio_irq)) { + /* configure touchscreen irq gpio */ + error = gpio_request(pdata->gpio_irq, "himax_gpio_irq"); + + if (error) { + E("unable to request gpio [%d]\n", pdata->gpio_irq); + return error; + } + + error = gpio_direction_input(pdata->gpio_irq); + + if (error) { + E("unable to set direction for gpio [%d]\n", pdata->gpio_irq); + return error; + } + + client->irq = gpio_to_irq(pdata->gpio_irq); + } else { + E("irq gpio not provided\n"); + return error; + } + + msleep(20); +#ifdef HX_RST_PIN_FUNC + + if (pdata->gpio_reset >= 0) { + error = gpio_direction_output(pdata->gpio_reset, 1); + + if (error) { + E("unable to set direction for gpio [%d]\n", pdata->gpio_reset); + return error; + } + } + +#endif + return error; +} + +#endif + +static void himax_ts_isr_func(struct himax_ts_data *ts) +{ + himax_ts_work(ts); +} + +irqreturn_t himax_ts_thread(int irq, void *ptr) +{ + himax_ts_isr_func((struct himax_ts_data *)ptr); + + return IRQ_HANDLED; +} + +static void himax_ts_work_func(struct work_struct *work) +{ + struct himax_ts_data *ts; + + ts = container_of(work, struct himax_ts_data, work); + himax_ts_work(ts); +} + +int himax_int_register_trigger(void) +{ + int ret = 0; + struct himax_ts_data *ts = private_ts; + struct i2c_client *client = private_ts->client; + + if (ic_data->HX_INT_IS_EDGE) { + I("%s edge triiger falling\n ", __func__); + ret = request_threaded_irq(client->irq, NULL, himax_ts_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->name, ts); + } else { + I("%s level trigger low\n ", __func__); + ret = request_threaded_irq(client->irq, NULL, himax_ts_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, ts); + } + + return ret; +} + +int himax_int_en_set(void) +{ + int ret = NO_ERR; + + ret = himax_int_register_trigger(); + return ret; +} + +int himax_ts_register_interrupt(void) +{ + struct himax_ts_data *ts = private_ts; + struct i2c_client *client = private_ts->client; + int ret = 0; + + ts->irq_enabled = 0; + + /* Work functon */ + if (client->irq) {/* INT mode */ + ts->use_irq = 1; + ret = himax_int_register_trigger(); + + if (ret == 0) { + ts->irq_enabled = 1; + irq_enable_count = 1; + I("%s: irq enabled at qpio: %d\n", __func__, client->irq); +#ifdef HX_SMART_WAKEUP + irq_set_irq_wake(client->irq, 1); +#endif + } else { + ts->use_irq = 0; + E("%s: request_irq failed\n", __func__); + } + } else + I("%s: client->irq is empty, use polling mode.\n", __func__); + + /* if use polling mode need to disable HX_ESD_RECOVERY function */ + if (!ts->use_irq) { + ts->himax_wq = create_singlethread_workqueue("himax_touch"); + INIT_WORK(&ts->work, himax_ts_work_func); + hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ts->timer.function = himax_ts_timer_func; + hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + I("%s: polling mode enabled\n", __func__); + } + + return ret; +} + +static int himax_common_suspend(struct device *dev) +{ + struct himax_ts_data *ts = dev_get_drvdata(dev); + + I("%s: enter\n", __func__); + himax_chip_common_suspend(ts); + return 0; +} + +static int himax_common_resume(struct device *dev) +{ + struct himax_ts_data *ts = dev_get_drvdata(dev); + + I("%s: enter\n", __func__); + himax_chip_common_resume(ts); + return 0; +} + + +#if defined(CONFIG_DRM) + +int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) +{ + struct msm_drm_notifier *evdata = data; + int *blank; + struct himax_ts_data *ts = + container_of(self, struct himax_ts_data, fb_notif); + + if (!evdata || (evdata->id != 0)) + return 0; + + I("DRM %s\n", __func__); + + if (evdata->data && event == MSM_DRM_EVENT_BLANK && ts && ts->client) { + blank = evdata->data; + + switch (*blank) { + case MSM_DRM_BLANK_UNBLANK: + himax_common_resume(&ts->client->dev); + break; + case MSM_DRM_BLANK_POWERDOWN: + himax_common_suspend(&ts->client->dev); + break; + } + } + + return 0; +} + +#elif defined(CONFIG_FB) + +int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) +{ + struct fb_event *evdata = data; + int *blank; + struct himax_ts_data *ts = + container_of(self, struct himax_ts_data, fb_notif); + + if (!evdata || (evdata->id != 0)) + return 0; + + I("FB %s\n", __func__); + + if (evdata && evdata->data && event == FB_EVENT_BLANK && ts && + ts->client) { + blank = evdata->data; + + switch (*blank) { + case FB_BLANK_UNBLANK: + himax_common_resume(&ts->client->dev); + break; + case FB_BLANK_POWERDOWN: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_NORMAL: + himax_common_suspend(&ts->client->dev); + break; + } + } + + return 0; +} +#endif + +int himax_chip_common_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int ret = 0; + struct himax_ts_data *ts; + + I("%s:Enter\n", __func__); + + /* Check I2C functionality */ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + E("%s: i2c check functionality error\n", __func__); + ret = -ENODEV; + goto err_check_functionality_failed; + } + + ts = kzalloc(sizeof(struct himax_ts_data), GFP_KERNEL); + if (ts == NULL) { + E("%s: allocate himax_ts_data failed\n", __func__); + ret = -ENOMEM; + goto err_alloc_data_failed; + } + + i2c_set_clientdata(client, ts); + ts->client = client; + ts->dev = &client->dev; + mutex_init(&ts->rw_lock); + private_ts = ts; + + ret = himax_chip_common_init(); + +err_alloc_data_failed: +err_check_functionality_failed: + + return ret; +} + +int himax_chip_common_remove(struct i2c_client *client) +{ + himax_chip_common_deinit(); + + return 0; +} + +static const struct i2c_device_id himax_common_ts_id[] = { + {HIMAX_common_NAME, 0 }, + {} +}; + +static const struct dev_pm_ops himax_common_pm_ops = { +#if (!defined(CONFIG_FB) && !defined(CONFIG_DRM)) + .suspend = himax_common_suspend, + .resume = himax_common_resume, +#endif +}; + +#ifdef CONFIG_OF +static const struct of_device_id himax_match_table[] = { + {.compatible = "himax,hxcommon" }, + {}, +}; +#else +#define himax_match_table NULL +#endif + +static struct i2c_driver himax_common_driver = { + .id_table = himax_common_ts_id, + .probe = himax_chip_common_probe, + .remove = himax_chip_common_remove, + .driver = { + .name = HIMAX_common_NAME, + .owner = THIS_MODULE, + .of_match_table = himax_match_table, +#ifdef CONFIG_PM + .pm = &himax_common_pm_ops, +#endif + }, +}; + +static int __init himax_common_init(void) +{ + I("Himax common touch panel driver init\n"); + i2c_add_driver(&himax_common_driver); + + return 0; +} + +static void __exit himax_common_exit(void) +{ + i2c_del_driver(&himax_common_driver); +} + +module_init(himax_common_init); +module_exit(himax_common_exit); + +MODULE_DESCRIPTION("Himax_common driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/input/touchscreen/hxchipset/himax_platform.h b/drivers/input/touchscreen/hxchipset/himax_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..ac6e37d55dce5b869458a0d6c2f81351b60f2326 --- /dev/null +++ b/drivers/input/touchscreen/hxchipset/himax_platform.h @@ -0,0 +1,136 @@ +/* + * Himax Android Driver Sample Code for QCT platform + * + * Copyright (C) 2018 Himax Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HIMAX_PLATFORM_H +#define HIMAX_PLATFORM_H + +#include +#include +#include +#include +#include +#include +#if defined(CONFIG_HMX_DB) + #include +#endif + +#define HIMAX_I2C_RETRY_TIMES 10 + +#if defined(CONFIG_TOUCHSCREEN_HIMAX_DEBUG) +#define D(x...) pr_debug("[HXTP] " x) +#define I(x...) pr_info("[HXTP] " x) +#define W(x...) pr_warn("[HXTP][WARNING] " x) +#define E(x...) pr_err("[HXTP][ERROR] " x) +#define DIF(x...) \ +do { \ + if (debug_flag) \ + pr_debug("[HXTP][DEBUG] " x) \ + } while (0) +#else +#define D(x...) +#define I(x...) +#define W(x...) +#define E(x...) +#define DIF(x...) +#endif + +#if defined(CONFIG_HMX_DB) + /* Analog voltage @2.7 V */ + #define HX_VTG_MIN_UV 2700000 + #define HX_VTG_MAX_UV 3300000 + #define HX_ACTIVE_LOAD_UA 15000 + #define HX_LPM_LOAD_UA 10 + /* Digital voltage @1.8 V */ + #define HX_VTG_DIG_MIN_UV 1800000 + #define HX_VTG_DIG_MAX_UV 1800000 + #define HX_ACTIVE_LOAD_DIG_UA 10000 + #define HX_LPM_LOAD_DIG_UA 10 + + #define HX_I2C_VTG_MIN_UV 1800000 + #define HX_I2C_VTG_MAX_UV 1800000 + #define HX_I2C_LOAD_UA 10000 + #define HX_I2C_LPM_LOAD_UA 10 +#endif + +#define HIMAX_common_NAME "himax_tp" +#define HIMAX_I2C_ADDR 0x48 +#define INPUT_DEV_NAME "himax-touchscreen" + +struct himax_i2c_platform_data { + int abs_x_min; + int abs_x_max; + int abs_x_fuzz; + int abs_y_min; + int abs_y_max; + int abs_y_fuzz; + int abs_pressure_min; + int abs_pressure_max; + int abs_pressure_fuzz; + int abs_width_min; + int abs_width_max; + int screenWidth; + int screenHeight; + uint8_t fw_version; + uint8_t tw_id; + uint8_t powerOff3V3; + uint8_t cable_config[2]; + uint8_t protocol_type; + int gpio_irq; + int gpio_reset; + int gpio_3v3_en; + int (*power)(int on); + void (*reset)(void); + struct himax_virtual_key *virtual_key; + struct kobject *vk_obj; + struct kobj_attribute *vk2Use; + + int hx_config_size; +#if defined(CONFIG_HMX_DB) + bool i2c_pull_up; + bool digital_pwr_regulator; + int reset_gpio; + u32 reset_gpio_flags; + int irq_gpio; + u32 irq_gpio_flags; + + struct regulator *vcc_ana; /* For Dragon Board */ + struct regulator *vcc_dig; /* For Dragon Board */ + struct regulator *vcc_i2c; /* For Dragon Board */ +#endif +}; + +extern int i2c_error_count; +extern int irq_enable_count; +extern int himax_bus_read(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRetry); +extern int himax_bus_write(uint8_t command, uint8_t *data, uint32_t length, uint8_t toRetry); +extern int himax_bus_write_command(uint8_t command, uint8_t toRetry); +extern int himax_bus_master_write(uint8_t *data, uint32_t length, uint8_t toRetry); +extern void himax_int_enable(int enable); +extern int himax_ts_register_interrupt(void); +extern uint8_t himax_int_gpio_read(int pinnum); + +extern int himax_gpio_power_config(struct himax_i2c_platform_data *pdata); + + +#if defined(CONFIG_DRM) || defined(CONFIG_FB) + extern int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); +#endif + +#if defined(HX_PLATFOME_DEFINE_KEY) + extern void himax_platform_key(void) +#endif + +#endif diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index e30f6e4a5dfdc560cd5e498b5c81bc32eb83248e..21aab405ddbbe3c9cad0085a3170b4fd50142689 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -2780,14 +2780,16 @@ static void fts_enter_pointer_event_handler(struct fts_ts_info *info, touchId = event[1] & 0x0F; touchcount = (event[1] & 0xF0) >> 4; touchsize = (event[5] & 0xC0) >> 6; - major = (event[5] & 0x1F); // bit0-bit4: major - minor = event[6]; // event6:minor + major = (event[5] & 0x1F); // bit0-bit4: major + minor = event[6]; // event6:minor __set_bit(touchId, &info->touch_id); x = (event[2] << 4) | (event[4] & 0xF0) >> 4; y = (event[3] << 4) | (event[4] & 0x0F); z = (event[5] & 0x3F); + if (z == 0) + z = 10; if (x == X_AXIS_MAX) x--; @@ -2796,54 +2798,31 @@ static void fts_enter_pointer_event_handler(struct fts_ts_info *info, y--; input_mt_slot(info->input_dev, touchId); -/*#ifdef STYLUS_MODE*/ - /** - * TODO: check with ST how FW report a - * stylus touch in the touch event, - * this is an example code - */ - /*if (info->stylus_enabled == 1 && touchsize == STYLUS_SIZE) {*/ - /*__set_bit(touchId, &info->stylus_id);*/ - /*input_mt_report_slot_state(info->input_dev, MT_TOOL_PEN, 1);*/ - /*logError(0, "%s %s : It is a stylus!\n",tag,__func__); */ - /*} else*/ - /*input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 1);*/ -/*#else*/ - /*input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 1);*/ -/*#endif*/ input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 1); - /*logError(0,*/ - /*"%s %s:TouchID = %d, Touchcount = %d, minor:%d, major:%d\n",*/ - /*tag, __func__, touchId, touchcount, minor, major);*/ - /*logError(0,*/ - /*"%s %s : TouchID = %d,Touchcount = %d\n",*/ - /*tag, __func__, touchId,touchcount);*/ - /*if (touchcount == 1) {*/ - input_report_key(info->input_dev, BTN_TOUCH, 1); - input_report_key(info->input_dev, BTN_TOOL_FINGER, 1); - /*}*/ - /* input_report_abs(info->input_dev, ABS_MT_TRACKING_ID, touchId);*/ + + logError(0, "%s %s : TouchID = %d,Touchcount = %d,minor:%d,major:%d\n", + tag, __func__, touchId, touchcount, minor, major); + + if (touchcount == 1) { + input_report_key(info->input_dev, BTN_TOUCH, 1); + input_report_key(info->input_dev, BTN_TOOL_FINGER, 1); + } input_report_abs(info->input_dev, ABS_MT_POSITION_X, x); input_report_abs(info->input_dev, ABS_MT_POSITION_Y, y); - /*input_report_abs(info->input_dev, ABS_MT_TOUCH_MAJOR, z);*/ - /*input_report_abs(info->input_dev, ABS_MT_TOUCH_MINOR, z);*/ - /*input_report_abs(info->input_dev, ABS_MT_PRESSURE, z);*/ input_report_abs(info->input_dev, ABS_MT_TOUCH_MAJOR, major); input_report_abs(info->input_dev, ABS_MT_TOUCH_MINOR, minor); + input_report_abs(info->input_dev, ABS_MT_PRESSURE, z); input_report_abs(info->input_dev, ABS_MT_DISTANCE, distance); - /*logError(0,*/ - /*"%s%s:Event 0x%02x - ID[%d], (x, y, z) = (%3d, %3d, %3d)\n",*/ - /*tag, __func__, *event, touchId, x, y, z);*/ - no_report: return; - /* return fts_next_event(event); */ + } /* EventId : 0x04 */ static void fts_leave_pointer_event_handler(struct fts_ts_info *info, - unsigned char *event) + unsigned char *event) { + unsigned char touchId, touchcount; u8 touchsize; @@ -2851,37 +2830,18 @@ static void fts_leave_pointer_event_handler(struct fts_ts_info *info, touchcount = (event[1] & 0xF0) >> 4; touchsize = (event[5] & 0xC0) >> 6; - __clear_bit(touchId, &info->touch_id); - input_mt_slot(info->input_dev, touchId); -/*#ifdef STYLUS_MODE*/ - /** - * TODO: check with ST how FW report a stylus touch - * in the touch event, this is an example code - */ - /*if (info->stylus_enabled == 1 && touchsize == STYLUS_SIZE) {*/ - /* __clear_bit(touchId, &info->stylus_id);*/ - /* input_mt_report_slot_state(info->input_dev, MT_TOOL_PEN, 0);*/ - /*logError(0, "%s %s : It is a stylus!\n",tag,__func__);*/ - /*} else*/ - /*input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 0);*/ -/*#else*/ - /*input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 0);*/ -/*#endif*/ + + __clear_bit(touchId, &info->touch_id); input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 0); - /* input_mt_report_slot_state(info->input_dev, MT_TOOL_FINGER, 0);*/ - /*logError(0, "%s %s : TouchID = %d, Touchcount = %d\n",*/ - /*tag,__func__,touchId,touchcount);*/ - /*if (touchcount == 0) {*/ - input_report_key(info->input_dev, BTN_TOUCH, 0); - input_report_key(info->input_dev, BTN_TOOL_FINGER, 0); - /*}*/ + + if (touchcount == 0) { + input_report_key(info->input_dev, BTN_TOUCH, 0); + input_report_key(info->input_dev, BTN_TOOL_FINGER, 0); + } input_report_abs(info->input_dev, ABS_MT_TRACKING_ID, -1); - /*logError(0, "%s %s : Event 0x%02x - release ID[%d]\n",*/ - /*tag, __func__, event[0], touchId);*/ - /*return fts_next_event(event);*/ } /* EventId : 0x05 */ @@ -3835,47 +3795,6 @@ static int fts_mode_handler(struct fts_ts_info *info, int force) return res; } -int fts_chip_power_switch(struct fts_ts_info *info, int on) -{ - int error = 0; - - logError(0, "%s %s:will set power mode %d...\n", tag, __func__, on); - if (on == 0) { - if (info->pwr_reg) { - error = regulator_disable(info->pwr_reg); - if (error < 0) - logError(1, "%s %s: Failed to disable DVDD\n", - tag, __func__); - } - - if (info->bus_reg) { - error = regulator_disable(info->bus_reg); - if (error < 0) - logError(1, "%s %s: Failed to disable AVDD\n", - tag, __func__); - - } - if (info->bdata->reset_gpio != GPIO_NOT_DEFINED) - gpio_set_value(info->bdata->reset_gpio, 0); - } else if (on == 1) { - if (info->bus_reg) { - error = regulator_enable(info->bus_reg); - if (error < 0) - logError(1, "%s %s: Failed to enable AVDD\n", - tag, __func__); - } - if (info->pwr_reg) { - error = regulator_enable(info->pwr_reg); - if (error < 0) - logError(1, "%s %s: Failed to enable DVDD\n", - tag, __func__); - } - - } - - return error; -} - static void fts_resume_work(struct work_struct *work) { @@ -3886,19 +3805,22 @@ static void fts_resume_work(struct work_struct *work) __pm_wakeup_event(&info->wakeup_source, HZ); info->resume_bit = 1; - fts_chip_power_switch(info, 1); #ifdef USE_NOISE_PARAM readNoiseParameters(noise_params); #endif - cleanUp(1); + fts_system_reset(); + #ifdef USE_NOISE_PARAM writeNoiseParameters(noise_params); #endif release_all_touches(info); + fts_mode_handler(info, 0); info->sensor_sleep = false; + + fts_enableInterrupt(); } @@ -3913,12 +3835,11 @@ static void fts_suspend_work(struct work_struct *work) info->resume_bit = 0; fts_mode_handler(info, 0); + release_all_touches(info); info->sensor_sleep = true; - fts_disableInterrupt(); - fts_chip_power_switch(info, 0); - + fts_enableInterrupt(); } @@ -3932,12 +3853,15 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, struct fb_event *evdata = data; unsigned int blank; + if (!evdata || (evdata->id != 0)) + return 0; + if (val != FB_EVENT_BLANK) return 0; logError(0, "%s %s: fts notifier begin!\n", tag, __func__); - if (evdata && evdata->data && val == FB_EVENT_BLANK && info) { + if (evdata->data && val == FB_EVENT_BLANK && info) { blank = *(int *) (evdata->data); @@ -3978,11 +3902,14 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, struct msm_drm_notifier *evdata = data; unsigned int blank; + if (!evdata || (evdata->id != 0)) + return 0; + if (val != MSM_DRM_EVENT_BLANK) return 0; logError(0, "%s %s: fts notifier begin!\n", tag, __func__); - if (evdata && evdata->data && val == MSM_DRM_EVENT_BLANK && info) { + if (evdata->data && val == MSM_DRM_EVENT_BLANK && info) { blank = *(int *) (evdata->data); switch (blank) { diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h index bbdcd393ab3c111a1add57b4b1b847503297ca94..e82b686890a62c52474d9668d5473f8b4442385c 100644 --- a/drivers/iommu/arm-smmu-regs.h +++ b/drivers/iommu/arm-smmu-regs.h @@ -179,6 +179,7 @@ enum arm_smmu_s2cr_privcfg { #define ARM_SMMU_CB_FSRRESTORE 0x5c #define ARM_SMMU_CB_FAR 0x60 #define ARM_SMMU_CB_FSYNR0 0x68 +#define ARM_SMMU_CB_FSYNR1 0x6c #define ARM_SMMU_CB_S1_TLBIVA 0x600 #define ARM_SMMU_CB_S1_TLBIASID 0x610 #define ARM_SMMU_CB_S1_TLBIALL 0x618 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index d2629d41078a3285d4aa175ced011b8870f23748..e9211a1c64cc0d1cc55fc08023c103651ba63749 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -178,6 +178,7 @@ struct arm_smmu_cb { u32 tcr[2]; u32 mair[2]; struct arm_smmu_cfg *cfg; + u32 actlr; }; struct arm_smmu_master_cfg { @@ -514,6 +515,11 @@ static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain) mutex_unlock(&smmu_domain->assign_lock); } +static bool arm_smmu_opt_hibernation(struct arm_smmu_device *smmu) +{ + return IS_ENABLED(CONFIG_HIBERNATION); +} + /* * init() * Hook for additional device tree parsing at probe time. @@ -1260,7 +1266,7 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain, static irqreturn_t arm_smmu_context_fault(int irq, void *dev) { int flags, ret, tmp; - u32 fsr, fsynr, resume; + u32 fsr, fsynr0, fsynr1, frsynra, resume; unsigned long iova; struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); @@ -1270,7 +1276,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) void __iomem *gr1_base; bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF; phys_addr_t phys_soft; - u32 frsynra; + uint64_t pte; bool non_fatal_fault = !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS)); @@ -1297,8 +1303,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) BUG(); } - fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); - flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; + fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); + fsynr1 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR1); + flags = fsynr0 & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; if (fsr & FSR_TF) flags |= IOMMU_FAULT_TRANSLATION; if (fsr & FSR_PF) @@ -1315,8 +1322,8 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) tmp = report_iommu_fault(domain, smmu->dev, iova, flags); if (!tmp || (tmp == -EBUSY)) { dev_dbg(smmu->dev, - "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n", - iova, fsr, fsynr, cfg->cbndx); + "Context fault handled by client: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n", + iova, cfg->cbndx, fsr, fsynr0, fsynr1); dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft); ret = IRQ_HANDLED; @@ -1326,20 +1333,23 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) fsr); if (__ratelimit(&_rs)) { dev_err(smmu->dev, - "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n", - iova, fsr, fsynr, cfg->cbndx); + "Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n", + iova, cfg->cbndx, fsr, fsynr0, fsynr1); dev_err(smmu->dev, "FAR = %016lx\n", (unsigned long)iova); dev_err(smmu->dev, - "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", + "FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, - (fsr & 0x02) ? "TF " : "", + (fsr & 0x02) ? (fsynr0 & 0x10 ? + "TF W " : "TF R ") : "", (fsr & 0x04) ? "AFF " : "", - (fsr & 0x08) ? "PF " : "", + (fsr & 0x08) ? (fsynr0 & 0x10 ? + "PF W " : "PF R ") : "", (fsr & 0x10) ? "EF " : "", (fsr & 0x20) ? "TLBMCF " : "", (fsr & 0x40) ? "TLBLKF " : "", (fsr & 0x80) ? "MHF " : "", + (fsr & 0x100) ? "UUT " : "", (fsr & 0x40000000) ? "SS " : "", (fsr & 0x80000000) ? "MULTI " : ""); dev_err(smmu->dev, @@ -1348,6 +1358,10 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) dev_err(smmu->dev, "SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n", dev_name(smmu->dev)); + else { + pte = arm_smmu_iova_to_pte(domain, iova); + dev_err(smmu->dev, "PTE = %016llx\n", pte); + } if (phys_atos) dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n", &phys_atos); @@ -1586,6 +1600,9 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx, writel_relaxed(cb->mair[1], cb_base + ARM_SMMU_CB_S1_MAIR1); } + /* ACTLR (implementation defined) */ + writel_relaxed(cb->actlr, cb_base + ARM_SMMU_CB_ACTLR); + /* SCTLR */ reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE; @@ -1684,6 +1701,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, goto out_unlock; } + if (arm_smmu_has_secure_vmid(smmu_domain) && + arm_smmu_opt_hibernation(smmu)) { + dev_err(smmu->dev, + "Secure usecases not supported with hibernation\n"); + ret = -EPERM; + goto out_unlock; + } + /* * Mapping the requested stage onto what we support is surprisingly * complicated, mainly because the spec allows S1+S2 SMMUs without @@ -1803,12 +1828,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, goto out_unlock; cfg->cbndx = ret; - if (smmu->version < ARM_SMMU_V2) { - cfg->irptndx = atomic_inc_return(&smmu->irptndx); - cfg->irptndx %= smmu->num_context_irqs; - } else { - cfg->irptndx = cfg->cbndx; - } if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { @@ -1864,11 +1883,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, /* Initialise the context bank with our page table cfg */ arm_smmu_init_context_bank(smmu_domain, &smmu_domain->pgtbl_cfg); + arm_smmu_arch_init_context_bank(smmu_domain, dev); arm_smmu_write_context_bank(smmu, cfg->cbndx, smmu_domain->attributes ); - - arm_smmu_arch_init_context_bank(smmu_domain, dev); - /* for slave side secure, we may have to force the pagetable * format to V8L. */ @@ -1877,6 +1894,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (ret) goto out_clear_smmu; + if (smmu->version < ARM_SMMU_V2) { + cfg->irptndx = atomic_inc_return(&smmu->irptndx); + cfg->irptndx %= smmu->num_context_irqs; + } else { + cfg->irptndx = cfg->cbndx; + } /* * Request context fault interrupt. Do this last to avoid the @@ -2323,13 +2346,13 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, return; } - arm_smmu_domain_remove_master(smmu_domain, fwspec); + if (atomic_domain) + arm_smmu_power_on_atomic(smmu->pwr); + else + arm_smmu_power_on(smmu->pwr); - /* Remove additional vote for atomic power */ - if (atomic_domain) { - WARN_ON(arm_smmu_power_on_atomic(smmu->pwr)); - arm_smmu_power_off(smmu->pwr); - } + arm_smmu_domain_remove_master(smmu_domain, fwspec); + arm_smmu_power_off(smmu->pwr); } static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain) @@ -3620,18 +3643,17 @@ static void qsmmuv2_device_reset(struct arm_smmu_device *smmu) int i; u32 val; struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers; - void __iomem *cb_base; - /* * SCTLR.M must be disabled here per ARM SMMUv2 spec * to prevent table walks with an inconsistent state. */ for (i = 0; i < smmu->num_context_banks; ++i) { - cb_base = ARM_SMMU_CB(smmu, i); + struct arm_smmu_cb *cb = &smmu->cbs[i]; + val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT | ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT | ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT; - writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR); + cb->actlr = val; } /* Program implementation defined registers */ @@ -3720,7 +3742,7 @@ static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu) for (i = 0; i < smmu->num_context_banks; ++i) { cb_base = ARM_SMMU_CB(smmu, i); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + arm_smmu_write_context_bank(smmu, i, 0); writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); /* * Disable MMU-500's not-particularly-beneficial next-page @@ -4258,6 +4280,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) sizeof(*smmu->cbs), GFP_KERNEL); if (!smmu->cbs) return -ENOMEM; + for (i = 0; i < smmu->num_context_banks; i++) { + void __iomem *cb_base; + + cb_base = ARM_SMMU_CB(smmu, i); + smmu->cbs[i].actlr = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR); + } /* ID2 */ id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); @@ -4674,8 +4702,15 @@ static int arm_smmu_device_remove(struct platform_device *pdev) static int __maybe_unused arm_smmu_pm_resume(struct device *dev) { struct arm_smmu_device *smmu = dev_get_drvdata(dev); + int ret; + + ret = arm_smmu_power_on(smmu->pwr); + if (ret) + return ret; arm_smmu_device_reset(smmu); + arm_smmu_power_off(smmu->pwr); + return 0; } @@ -4686,6 +4721,7 @@ static struct platform_driver arm_smmu_driver = { .name = "arm-smmu", .of_match_table = of_match_ptr(arm_smmu_of_match), .pm = &arm_smmu_pm_ops, + .suppress_bind_attrs = true, }, .probe = arm_smmu_device_dt_probe, .remove = arm_smmu_device_remove, @@ -5053,8 +5089,8 @@ static phys_addr_t qsmmuv500_iova_to_phys( val = readq_relaxed(tbu->base + DEBUG_PAR_REG); fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); if (fsr & FSR_FAULT) { - dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n", - fsr); + dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx, SID=0x%x\n", + fsr, sid); /* Clear pending interrupts */ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR); @@ -5180,19 +5216,14 @@ static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain, struct device *dev) { struct arm_smmu_device *smmu = smmu_domain->smmu; + struct arm_smmu_cb *cb = &smmu->cbs[smmu_domain->cfg.cbndx]; struct qsmmuv500_group_iommudata *iommudata = to_qsmmuv500_group_iommudata(dev->iommu_group); - void __iomem *cb_base; - const struct iommu_gather_ops *tlb; if (!iommudata->has_actlr) return; - tlb = smmu_domain->pgtbl_cfg.tlb; - cb_base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); - - writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR); - + cb->actlr = iommudata->actlr; /* * Prefetch only works properly if the start and end of all * buffers in the page table are aligned to ARM_SMMU_MIN_IOVA_ALIGN. @@ -5200,12 +5231,6 @@ static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain, if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) & QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) smmu_domain->qsmmuv500_errata1_min_iova_align = true; - - /* - * Flush the context bank after modifying ACTLR to ensure there - * are no cache entries with stale state - */ - tlb->tlb_flush_all(smmu_domain); } static int qsmmuv500_tbu_register(struct device *dev, void *cookie) diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 57c920c1372d09f927a7dcdeadc25375bf4164a8..e3dbb6101b4a78fae47a093ddb2c365e4cb3d7af 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1342,7 +1342,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, struct qi_desc desc; if (mask) { - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); + BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1)); addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; } else diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index 67bbd07f68a12447e1104ad0281f3c494a2ddc7e..bf34c646fe573b0ec937f2867feee38193568434 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -255,16 +255,17 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size) __av8l_fast_unmap(ptep, size, true); } -/* upper layer must take care of TLB invalidation */ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + struct io_pgtable *iop = &data->iop; av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; __av8l_fast_unmap(ptep, size, false); dmac_clean_range(ptep, ptep + nptes); + io_pgtable_tlb_flush_all(iop); return size; } diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 83b72f4f85e4552278452b6356f3bec387211b6b..623e572398212930bc10038c153e0536552449bd 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -30,6 +30,12 @@ struct gic_quirk { extern bool from_suspend; extern struct irq_chip gic_arch_extn; +#ifdef CONFIG_QCOM_SHOW_RESUME_IRQ +extern int msm_show_resume_irq_mask; +#else +#define msm_show_resume_irq_mask 0 +#endif + int gic_configure_irq(unsigned int irq, unsigned int type, void __iomem *base, void (*sync_access)(void)); void gic_dist_config(void __iomem *base, int gic_irqs, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 13f195c9743e16db76f8cde868d8c8c85450078a..2ea39a83737f5af92fed1802c3b1e5c9ee28e6d3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2221,7 +2221,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - cpu = cpumask_first(cpu_mask); + cpu = cpumask_first_and(cpu_mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) { + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) + return; + + cpu = cpumask_first(cpu_online_mask); + } + its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 65cb116ac0aa2dbab450c17c26c940ed44b49795..dae3bddaa7fd560debea17332b3411256e9b41ae 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -40,6 +40,8 @@ #include #include +#include + #include "irq-gic-common.h" struct redist_region { @@ -331,6 +333,69 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) return 0; } +#ifdef CONFIG_PM + +static int gic_suspend(void) +{ + return 0; +} + +static void gic_show_resume_irq(struct gic_chip_data *gic) +{ + unsigned int i; + u32 enabled; + u32 pending[32]; + void __iomem *base = gic_data.dist_base; + + if (!msm_show_resume_irq_mask) + return; + + for (i = 0; i * 32 < gic->irq_nr; i++) { + enabled = readl_relaxed(base + GICD_ICENABLER + i * 4); + pending[i] = readl_relaxed(base + GICD_ISPENDR + i * 4); + pending[i] &= enabled; + } + + for (i = find_first_bit((unsigned long *)pending, gic->irq_nr); + i < gic->irq_nr; + i = find_next_bit((unsigned long *)pending, gic->irq_nr, i+1)) { + unsigned int irq = irq_find_mapping(gic->domain, i); + struct irq_desc *desc = irq_to_desc(irq); + const char *name = "null"; + + if (desc == NULL) + name = "stray irq"; + else if (desc->action && desc->action->name) + name = desc->action->name; + + pr_warn("%s: %d triggered %s\n", __func__, irq, name); + } +} + +static void gic_resume_one(struct gic_chip_data *gic) +{ + gic_show_resume_irq(gic); +} + +static void gic_resume(void) +{ + gic_resume_one(&gic_data); +} + +static struct syscore_ops gic_syscore_ops = { + .suspend = gic_suspend, + .resume = gic_resume, +}; + +static int __init gic_init_sys(void) +{ + register_syscore_ops(&gic_syscore_ops); + return 0; +} +arch_initcall(gic_init_sys); + +#endif + static u64 gic_mpidr_to_affinity(unsigned long mpidr) { u64 aff; diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig index 6f5dff0bda53a38872fff1a8165bad13d798861a..5d9a9b82544f0d6bf58c4efa7972db1e70c753cd 100644 --- a/drivers/irqchip/qcom/Kconfig +++ b/drivers/irqchip/qcom/Kconfig @@ -21,6 +21,16 @@ config QTI_PDC_SM6150 help QTI Power Domain Controller for SM6150 +config QTI_PDC_SDMMAGPIE + bool "QTI PDC SDMMAGPIE" + select QTI_PDC + default y if ARCH_SDMMAGPIE + help + QTI Power Domain Controller for SDMMAGPIE + This is used for managing and configuring + the wakeup interrupts. Enable it when + ARCH_SDMMAGPIE is selected. + config QTI_MPM bool "QTI MPM" depends on ARCH_QCOM diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile index 62582613438d883e3c721804c2fd321746488110..9ab6c8543f836bba2981519d43309730b0b4c86e 100644 --- a/drivers/irqchip/qcom/Makefile +++ b/drivers/irqchip/qcom/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_QTI_PDC) += pdc.o obj-$(CONFIG_QTI_PDC_SM8150) += pdc-sm8150.o obj-$(CONFIG_QTI_PDC_SM6150) += pdc-sm6150.o +obj-$(CONFIG_QTI_PDC_SDMMAGPIE) += pdc-sdmmagpie.o obj-$(CONFIG_QTI_MPM) += mpm.o -obj-$(CONFIG_QTI_MPM) += mpm.o mpm-8937.o +obj-$(CONFIG_QTI_MPM) += mpm.o mpm-8937.o mpm-qcs405.o diff --git a/drivers/irqchip/qcom/mpm-qcs405.c b/drivers/irqchip/qcom/mpm-qcs405.c new file mode 100644 index 0000000000000000000000000000000000000000..afefa9978cf304edce6174c1f72aad5ab7a0091a --- /dev/null +++ b/drivers/irqchip/qcom/mpm-qcs405.c @@ -0,0 +1,74 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "mpm.h" + +const struct mpm_pin mpm_qcs405_gic_chip_data[] = { + {2, 216}, + {35, 350}, /* dmse_hv, usb20 -> hs_phy_irq */ + {36, 350}, /* dpse_hv, usb20 -> hs_phy_irq */ + {38, 351}, /* dmse_hv, usb30 -> hs_phy_irq */ + {39, 351}, /* dpse_hv, usb30 -> hs_phy_irq */ + {62, 222}, /* mpm_wake,spmi_m */ + {-1}, +}; + +const struct mpm_pin mpm_qcs405_gpio_chip_data[] = { + {3, 4}, + {4, 6}, + {5, 14}, + {6, 18}, + {7, 117}, + {8, 19}, + {9, 20}, + {10, 21}, + {11, 22}, + {12, 23}, + {13, 24}, + {14, 27}, + {15, 28}, + {16, 31}, + {17, 32}, + {18, 34}, + {19, 35}, + {20, 37}, + {21, 38}, + {22, 59}, + {23, 61}, + {24, 62}, + {25, 77}, + {26, 78}, + {27, 79}, + {28, 80}, + {29, 81}, + {30, 83}, + {31, 84}, + {32, 88}, + {33, 89}, + {34, 99}, + {42, 100}, + {43, 104}, + {47, 105}, + {48, 106}, + {49, 107}, + {52, 109}, + {53, 110}, + {54, 111}, + {55, 112}, + {56, 113}, + {57, 114}, + {58, 115}, + {67, 53}, + {68, 54}, + {-1}, +}; diff --git a/drivers/irqchip/qcom/mpm.c b/drivers/irqchip/qcom/mpm.c index c7b610f29f323ef3dd6517a5a077af82d976b0e7..8b4f900184f9ee4b859ea21a5e45f3ebc35bd658 100644 --- a/drivers/irqchip/qcom/mpm.c +++ b/drivers/irqchip/qcom/mpm.c @@ -605,6 +605,10 @@ static const struct of_device_id mpm_gic_chip_data_table[] = { .compatible = "qcom,mpm-gic-msm8937", .data = mpm_msm8937_gic_chip_data, }, + { + .compatible = "qcom,mpm-gic-qcs405", + .data = mpm_qcs405_gic_chip_data, + }, {} }; MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table); @@ -614,6 +618,10 @@ static const struct of_device_id mpm_gpio_chip_data_table[] = { .compatible = "qcom,mpm-gpio-msm8937", .data = mpm_msm8937_gpio_chip_data, }, + { + .compatible = "qcom,mpm-gpio-qcs405", + .data = mpm_qcs405_gpio_chip_data, + }, {} }; diff --git a/drivers/irqchip/qcom/mpm.h b/drivers/irqchip/qcom/mpm.h index c2d5eae179fe34450c1dea4f1f92c2f9cc1f1b77..7b5c3d8f404899e5b733f93cec250ce299b24954 100644 --- a/drivers/irqchip/qcom/mpm.h +++ b/drivers/irqchip/qcom/mpm.h @@ -24,4 +24,6 @@ struct mpm_pin { extern const struct mpm_pin mpm_msm8937_gic_chip_data[]; extern const struct mpm_pin mpm_msm8937_gpio_chip_data[]; +extern const struct mpm_pin mpm_qcs405_gic_chip_data[]; +extern const struct mpm_pin mpm_qcs405_gpio_chip_data[]; #endif /* __QCOM_MPM_H__ */ diff --git a/drivers/irqchip/qcom/pdc-sdmmagpie.c b/drivers/irqchip/qcom/pdc-sdmmagpie.c new file mode 100644 index 0000000000000000000000000000000000000000..04c24d6b274546f409c7bd59be5e920f568e6852 --- /dev/null +++ b/drivers/irqchip/qcom/pdc-sdmmagpie.c @@ -0,0 +1,154 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include "pdc.h" + +static struct pdc_pin sdmmagpie_data[] = { + {0, 512},/*rpmh_wake*/ + {1, 513},/*ee0_apps_hlos_spmi_periph_irq*/ + {2, 514},/*ee1_apps_trustzone_spmi_periph_irq*/ + {3, 515},/*secure_wdog_expired*/ + {4, 516},/*secure_wdog_bark_irq*/ + {5, 517},/*aop_wdog_expired_irq*/ + {6, 518},/*qmp_usb3_lfps_rxterm_irq*/ + {6, 518},/*qmp_usb3_lfps_rxterm_irq*/ + {7, 519},/*not-connected*/ + {8, 520},/*eud_p0_dmse_int_mx*/ + {9, 521},/*eud_p0_dpse_int_mx*/ + {10, 522},/*not-connected*/ + {11, 523},/*not-connected*/ + {12, 524},/*eud_int_mx[1]*/ + {13, 525},/*ssc_xpu_irq_summary*/ + {14, 526},/*wd_bite_apps*/ + {15, 527},/*ssc_vmidmt_irq_summary*/ + {16, 528},/*sdc_gpo[0]*/ + {17, 529},/*not-connected*/ + {18, 530},/*aoss_pmic_arb_mpu_xpu_summary_irq*/ + {19, 531},/*rpmh_wake_2*/ + {20, 532},/*apps_pdc_irq_in_20*/ + {21, 533},/*apps_pdc_irq_in_21*/ + {22, 534},/*pdc_apps_epcb_timeout_summary_irq*/ + {23, 535},/*spmi_protocol_irq*/ + {24, 536},/*tsense0_tsense_max_min_int*/ + {25, 537},/*tsense1_tsense_max_min_int*/ + {26, 538},/*tsense0_upper_lower_intr*/ + {27, 539},/*tsense1_upper_lower_intr*/ + {28, 540},/*tsense0_critical_intr*/ + {29, 541},/*tsense1_critical_intr*/ + {30, 542},/*apps_pdc.gp_irq_mux[0]*/ + {31, 543},/*apps_pdc.gp_irq_mux[1]*/ + {32, 544},/*apps_pdc.gp_irq_mux[2]*/ + {33, 545},/*apps_pdc.gp_irq_mux[3]*/ + {34, 546},/*apps_pdc.gp_irq_mux[4]*/ + {35, 547},/*apps_pdc.gp_irq_mux[5]*/ + {36, 548},/*apps_pdc.gp_irq_mux[6]*/ + {37, 549},/*apps_pdc.gp_irq_mux[7]*/ + {38, 550},/*apps_pdc.gp_irq_mux[8]*/ + {39, 551},/*apps_pdc.gp_irq_mux[9]*/ + {40, 552},/*apps_pdc.gp_irq_mux[10]*/ + {41, 553},/*apps_pdc.gp_irq_mux[11]*/ + {42, 554},/*apps_pdc.gp_irq_mux[12]*/ + {43, 555},/*apps_pdc.gp_irq_mux[13]*/ + {44, 556},/*apps_pdc.gp_irq_mux[14]*/ + {45, 557},/*apps_pdc.gp_irq_mux[15]*/ + {46, 558},/*apps_pdc.gp_irq_mux[16]*/ + {47, 559},/*apps_pdc.gp_irq_mux[17]*/ + {48, 560},/*apps_pdc.gp_irq_mux[18]*/ + {49, 561},/*apps_pdc.gp_irq_mux[19]*/ + {50, 562},/*apps_pdc.gp_irq_mux[20]*/ + {51, 563},/*apps_pdc.gp_irq_mux[21]*/ + {52, 564},/*apps_pdc.gp_irq_mux[22]*/ + {53, 565},/*apps_pdc.gp_irq_mux[23]*/ + {54, 566},/*apps_pdc.gp_irq_mux[24]*/ + {55, 567},/*apps_pdc.gp_irq_mux[25]*/ + {56, 568},/*apps_pdc.gp_irq_mux[26]*/ + {57, 569},/*apps_pdc.gp_irq_mux[27]*/ + {58, 570},/*apps_pdc.gp_irq_mux[28]*/ + {59, 571},/*apps_pdc.gp_irq_mux[29]*/ + {60, 572},/*apps_pdc.gp_irq_mux[30]*/ + {61, 573},/*apps_pdc.gp_irq_mux[31]*/ + {62, 574},/*apps_pdc.gp_irq_mux[32]*/ + {63, 575},/*apps_pdc.gp_irq_mux[33]*/ + {64, 576},/*apps_pdc.gp_irq_mux[34]*/ + {65, 577},/*apps_pdc.gp_irq_mux[35]*/ + {66, 578},/*apps_pdc.gp_irq_mux[36]*/ + {67, 579},/*apps_pdc.gp_irq_mux[37]*/ + {68, 580},/*apps_pdc.gp_irq_mux[38]*/ + {69, 581},/*apps_pdc.gp_irq_mux[39]*/ + {70, 582},/*apps_pdc.gp_irq_mux[40]*/ + {71, 583},/*apps_pdc.gp_irq_mux[41]*/ + {72, 584},/*apps_pdc.gp_irq_mux[42]*/ + {73, 585},/*apps_pdc.gp_irq_mux[43]*/ + {74, 586},/*apps_pdc.gp_irq_mux[44]*/ + {75, 587},/*apps_pdc.gp_irq_mux[45]*/ + {76, 588},/*apps_pdc.gp_irq_mux[46]*/ + {77, 589},/*apps_pdc.gp_irq_mux[47]*/ + {78, 590},/*apps_pdc.gp_irq_mux[48]*/ + {79, 591},/*apps_pdc.gp_irq_mux[49]*/ + {80, 592},/*apps_pdc.gp_irq_mux[50]*/ + {81, 593},/*apps_pdc.gp_irq_mux[51]*/ + {82, 594},/*apps_pdc.gp_irq_mux[52]*/ + {83, 595},/*apps_pdc.gp_irq_mux[53]*/ + {84, 596},/*apps_pdc.gp_irq_mux[54]*/ + {85, 597},/*apps_pdc.gp_irq_mux[55]*/ + {86, 598},/*apps_pdc.gp_irq_mux[56]*/ + {87, 599},/*apps_pdc.gp_irq_mux[57]*/ + {88, 600},/*apps_pdc.gp_irq_mux[58]*/ + {89, 601},/*apps_pdc.gp_irq_mux[59]*/ + {90, 602},/*apps_pdc.gp_irq_mux[60]*/ + {91, 603},/*apps_pdc.gp_irq_mux[61]*/ + {92, 604},/*apps_pdc.gp_irq_mux[62]*/ + {93, 605},/*apps_pdc.gp_irq_mux[63]*/ + {94, 641},/*apps_pdc.gp_irq_mux[64]*/ + {95, 642},/*apps_pdc.gp_irq_mux[65]*/ + {96, 643},/*apps_pdc.gp_irq_mux[66]*/ + {97, 644},/*apps_pdc.gp_irq_mux[67]*/ + {98, 645},/*apps_pdc.gp_irq_mux[68]*/ + {99, 646},/*apps_pdc.gp_irq_mux[69]*/ + {100, 647},/*apps_pdc.gp_irq_mux[70]*/ + {101, 648},/*apps_pdc.gp_irq_mux[71]*/ + {102, 649},/*apps_pdc.gp_irq_mux[72]*/ + {103, 650},/*apps_pdc.gp_irq_mux[73]*/ + {104, 651},/*apps_pdc.gp_irq_mux[74]*/ + {105, 652},/*apps_pdc.gp_irq_mux[75]*/ + {106, 653},/*apps_pdc.gp_irq_mux[76]*/ + {107, 654},/*apps_pdc.gp_irq_mux[77]*/ + {108, 655},/*apps_pdc.gp_irq_mux[78]*/ + {109, 656},/*apps_pdc.gp_irq_mux[79]*/ + {110, 657},/*apps_pdc.gp_irq_mux[80]*/ + {111, 658},/*apps_pdc.gp_irq_mux[81]*/ + {112, 659},/*apps_pdc.gp_irq_mux[82]*/ + {113, 660},/*apps_pdc.gp_irq_mux[83]*/ + {114, 661},/*apps_pdc.gp_irq_mux[84]*/ + {115, 662},/*apps_pdc.gp_irq_mux[85]*/ + {116, 663},/*apps_pdc.gp_irq_mux[86]*/ + {117, 664},/*apps_pdc.gp_irq_mux[87]*/ + {118, 665},/*apps_pdc.gp_irq_mux[88]*/ + {119, 666},/*apps_pdc.gp_irq_mux[89]*/ + {120, 667},/*apps_pdc.gp_irq_mux[90]*/ + {121, 668},/*apps_pdc.gp_irq_mux[91]*/ + {122, 669},/*apps_pdc.gp_irq_mux[92]*/ + {123, 670},/*apps_pdc.gp_irq_mux[93]*/ + {124, 671},/*apps_pdc.gp_irq_mux[94]*/ + {125, 95},/*apps_pdc.gp_irq_mux[95]*/ + {-1} +}; + +static int __init qcom_pdc_gic_init(struct device_node *node, + struct device_node *parent) +{ + return qcom_pdc_init(node, parent, sdmmagpie_data); +} + +IRQCHIP_DECLARE(pdc_sdmmagpie, "qcom,pdc-sdmmagpie", qcom_pdc_gic_init); diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c index 944a7f3380991d107b0a3f50a9dfd100ba0719a8..1b25d8bc153aec16ea8e9ee0cd47de0604a2a393 100644 --- a/drivers/isdn/hardware/eicon/diva.c +++ b/drivers/isdn/hardware/eicon/diva.c @@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void) ** Receive and process command from user mode utility */ void *diva_xdi_open_adapter(void *os_handle, const void __user *src, - int length, + int length, void *mptr, divas_xdi_copy_from_user_fn_t cp_fn) { - diva_xdi_um_cfg_cmd_t msg; + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; diva_os_xdi_adapter_t *a = NULL; diva_os_spin_lock_magic_t old_irql; struct list_head *tmp; @@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src, length, sizeof(diva_xdi_um_cfg_cmd_t))) return NULL; } - if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { + if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) { DBG_ERR(("A: A(?) open, write error")) return NULL; } diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); list_for_each(tmp, &adapter_queue) { a = list_entry(tmp, diva_os_xdi_adapter_t, link); - if (a->controller == (int)msg.adapter) + if (a->controller == (int)msg->adapter) break; a = NULL; } diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); if (!a) { - DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) + DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter)) } return (a); @@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle) int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, - int length, divas_xdi_copy_from_user_fn_t cp_fn) + int length, void *mptr, + divas_xdi_copy_from_user_fn_t cp_fn) { + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr; diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; void *data; @@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src, return (-2); } - length = (*cp_fn) (os_handle, data, src, length); + if (msg) { + *(diva_xdi_um_cfg_cmd_t *)data = *msg; + length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg), + src + sizeof(*msg), length - sizeof(*msg)); + } else { + length = (*cp_fn) (os_handle, data, src, length); + } if (length > 0) { if ((*(a->interface.cmd_proc)) (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h index b067032093a8a2bb656f3e8c58fa91bccad7b056..1ad76650fbf984b3fe8b0385205005a57de52c0d 100644 --- a/drivers/isdn/hardware/eicon/diva.h +++ b/drivers/isdn/hardware/eicon/diva.h @@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst, int max_length, divas_xdi_copy_to_user_fn_t cp_fn); int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, - int length, divas_xdi_copy_from_user_fn_t cp_fn); + int length, void *msg, + divas_xdi_copy_from_user_fn_t cp_fn); void *diva_xdi_open_adapter(void *os_handle, const void __user *src, - int length, + int length, void *msg, divas_xdi_copy_from_user_fn_t cp_fn); void diva_xdi_close_adapter(void *adapter, void *os_handle); diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index b2023e08dcd28b3c85783b7df51a3e3b35c0e2eb..932e98d0d901e4c0b4a972fee9ce63b642560ea5 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file) static ssize_t divas_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { + diva_xdi_um_cfg_cmd_t msg; int ret = -EINVAL; if (!file->private_data) { file->private_data = diva_xdi_open_adapter(file, buf, - count, + count, &msg, xdi_copy_from_user); - } - if (!file->private_data) { - return (-ENODEV); + if (!file->private_data) + return (-ENODEV); + ret = diva_xdi_write(file->private_data, file, + buf, count, &msg, xdi_copy_from_user); + } else { + ret = diva_xdi_write(file->private_data, file, + buf, count, NULL, xdi_copy_from_user); } - ret = diva_xdi_write(file->private_data, file, - buf, count, xdi_copy_from_user); switch (ret) { case -1: /* Message should be removed from rx mailbox first */ ret = -EBUSY; @@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf, static ssize_t divas_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { + diva_xdi_um_cfg_cmd_t msg; int ret = -EINVAL; if (!file->private_data) { file->private_data = diva_xdi_open_adapter(file, buf, - count, + count, &msg, xdi_copy_from_user); } if (!file->private_data) { diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index de74c2ba61ecbfeb497a7399c9786afb8623790c..7b139f45f409709c49f38d1160954ff20dd9653b 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -393,7 +393,7 @@ led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev) static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) { int rc, i, addr_offset; - u8 val = 0, mask; + u8 val = 0, mask, strobe_mask = 0, strobe_ctrl; for (i = 0; i < led->num_fnodes; i++) { addr_offset = led->fnode[i].id; @@ -404,6 +404,51 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; val |= 0x1 << led->fnode[i].id; + + if (led->fnode[i].strobe_sel == HW_STROBE) { + if (led->fnode[i].id == LED3) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + else + strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT; + } + + if (led->fnode[i].id == LED3 && + led->fnode[i].strobe_sel == LPG_STROBE) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + /* + * As per the hardware recommendation, to use LED2/LED3 in HW + * strobe mode, LED1 should be set to HW strobe mode as well. + */ + if (led->fnode[i].strobe_sel == HW_STROBE && + (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) { + mask = FLASH_HW_STROBE_MASK; + addr_offset = led->fnode[LED1].id; + /* + * HW_STROBE: enable, TRIGGER: level, + * POLARITY: active high + */ + strobe_ctrl = BIT(2) | BIT(0); + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_STROBE_CTRL( + led->base + addr_offset), + mask, strobe_ctrl); + if (rc < 0) + return rc; + } + } + + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), + strobe_mask, 0); + if (rc < 0) + return rc; + + if (led->fnode[LED3].strobe_sel == LPG_STROBE) { + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_LPG_INPUT_CTRL(led->base), + LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); + if (rc < 0) + return rc; } rc = qpnp_flash_led_write(led, @@ -597,19 +642,6 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; } - if (led->fnode[LED3].strobe_sel == LPG_STROBE) { - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), - LED3_FLASH_ONCE_ONLY_BIT, 0); - if (rc < 0) - return rc; - - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_LPG_INPUT_CTRL(led->base), - LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); - if (rc < 0) - return rc; - } return 0; } diff --git a/drivers/leds/leds-qpnp-vibrator-ldo.c b/drivers/leds/leds-qpnp-vibrator-ldo.c index 6a143247cd9bd510bc5759cb61d115c8899f07ae..488bb9979d865105b3b91fcbc1c645d5dc290981 100644 --- a/drivers/leds/leds-qpnp-vibrator-ldo.c +++ b/drivers/leds/leds-qpnp-vibrator-ldo.c @@ -65,9 +65,29 @@ struct vib_ldo_chip { bool disable_overdrive; }; -static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV) +static inline int qpnp_vib_ldo_poll_status(struct vib_ldo_chip *chip) { unsigned int val; + int ret; + + ret = regmap_read_poll_timeout(chip->regmap, + chip->base + QPNP_VIB_LDO_REG_STATUS1, val, + val & QPNP_VIB_LDO_VREG_READY, 100, 1000); + if (ret < 0) { + pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n", + val, ret); + + /* Keep VIB_LDO disabled */ + regmap_update_bits(chip->regmap, + chip->base + QPNP_VIB_LDO_REG_EN_CTL, + QPNP_VIB_LDO_EN, 0); + } + + return ret; +} + +static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV) +{ u32 vlevel; u8 reg[2]; int ret; @@ -86,13 +106,9 @@ static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV) } if (chip->vib_enabled) { - ret = regmap_read_poll_timeout(chip->regmap, - chip->base + QPNP_VIB_LDO_REG_STATUS1, - val, val & QPNP_VIB_LDO_VREG_READY, - 100, 1000); + ret = qpnp_vib_ldo_poll_status(chip); if (ret < 0) { - pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n", - val, ret); + pr_err("Vibrator LDO status polling timedout\n"); return ret; } } @@ -103,7 +119,6 @@ static int qpnp_vib_ldo_set_voltage(struct vib_ldo_chip *chip, int new_uV) static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable) { - unsigned int val; int ret; if (chip->vib_enabled == enable) @@ -120,13 +135,9 @@ static inline int qpnp_vib_ldo_enable(struct vib_ldo_chip *chip, bool enable) } if (enable) { - ret = regmap_read_poll_timeout(chip->regmap, - chip->base + QPNP_VIB_LDO_REG_STATUS1, - val, val & QPNP_VIB_LDO_VREG_READY, - 100, 1000); + ret = qpnp_vib_ldo_poll_status(chip); if (ret < 0) { - pr_err("Vibrator LDO vreg_ready timeout, status=0x%02x, ret=%d\n", - val, ret); + pr_err("Vibrator LDO status polling timedout\n"); return ret; } } diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c index 9108d7d7041a1dcc0e1e28917c9d3a44f827d8c2..e64ad5515e7c2959e7f20ab8085d30bdd72da175 100644 --- a/drivers/mailbox/msm_qmp.c +++ b/drivers/mailbox/msm_qmp.c @@ -222,7 +222,6 @@ static void send_irq(struct qmp_device *mdev) */ wmb(); writel_relaxed(mdev->irq_mask, mdev->tx_irq_reg); - writel_relaxed(0x0, mdev->tx_irq_reg); mdev->tx_irq_count++; } diff --git a/drivers/mailbox/qcom-rpmh-mailbox.c b/drivers/mailbox/qcom-rpmh-mailbox.c index 22aad2a2736c0c93507ed3c7775206a52dbd050b..0162ad6e0a1a7cd6dbcea72c75e335b5a6fba6a9 100644 --- a/drivers/mailbox/qcom-rpmh-mailbox.c +++ b/drivers/mailbox/qcom-rpmh-mailbox.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -128,6 +128,7 @@ struct rsc_drv { const char *name; void __iomem *base; /* start address of the RSC's registers */ void __iomem *reg_base; /* start address for DRV specific register */ + int irq; int drv_id; struct platform_device *pdev; struct tcs_mbox tcs[TCS_TYPE_NR]; @@ -693,9 +694,10 @@ static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len) } /* sanity check to ensure the seq is same */ for (j = 1; j < len; j++) { - WARN((tcs->cmd_addr[i + j] != cmd[j].addr), - "Message does not match previous sequence.\n"); + if (tcs->cmd_addr[i + j] != cmd[j].addr) { + pr_debug("Message does not match previous sequence.\n"); return -EINVAL; + } } found = true; break; @@ -723,12 +725,12 @@ static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg) do { slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, n, msg->num_payload, 0); - if (slot == MAX_TCS_SLOTS) + if (slot >= MAX_TCS_SLOTS) break; n += tcs->ncpt; } while (slot + msg->num_payload - 1 >= n); - return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM; + return (slot < MAX_TCS_SLOTS) ? slot : -ENOMEM; } static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg, @@ -877,6 +879,8 @@ static void dump_tcs_stats(struct rsc_drv *drv) { int i; unsigned long long curr = arch_counter_get_cntvct(); + struct irq_data *rsc_irq_data = irq_get_irq_data(drv->irq); + bool irq_sts; for (i = 0; i < drv->num_tcs; i++) { if (!atomic_read(&drv->tcs_in_use[i])) @@ -890,6 +894,20 @@ static void dump_tcs_stats(struct rsc_drv *drv) print_tcs_regs(drv, i); print_response(drv, i); } + + if (rsc_irq_data) { + irq_get_irqchip_state(drv->irq, IRQCHIP_STATE_PENDING, + &irq_sts); + pr_warn("HW IRQ %lu is %s at GIC\n", rsc_irq_data->hwirq, + irq_sts ? "PENDING" : "NOT PENDING"); + } + + if (test_bit(TASKLET_STATE_SCHED, &drv->tasklet.state)) + pr_warn("Tasklet is scheduled for execution\n"); + else if (test_bit(TASKLET_STATE_RUN, &drv->tasklet.state)) + pr_warn("Tasklet is running\n"); + else + pr_warn("Tasklet is not active\n"); } static void chan_debug(struct mbox_chan *chan) @@ -979,7 +997,8 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data) /* If we were just busy waiting for TCS, dump the state and return */ if (ret == -EBUSY) { - pr_info_ratelimited("TCS Busy, retrying RPMH message send\n"); + dev_err_ratelimited(chan->cl->dev, + "TCS Busy, retrying RPMH message send\n"); ret = -EAGAIN; } @@ -1258,6 +1277,8 @@ static int rsc_drv_probe(struct platform_device *pdev) if (ret) return ret; + drv->irq = irq; + /* Enable interrupts for AMC TCS */ write_tcs_reg(drv->reg_base, RSC_DRV_IRQ_ENABLE, 0, 0, drv->tcs[ACTIVE_TCS].tcs_mask); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 21d25b725fec82b71be314e7cf94ac9c272186d5..e4a0eced8950a4a42b60bccef5a1918ed29a6aaf 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -573,4 +573,24 @@ config DM_ANDROID_VERITY of the metadata contents are verified against the key included in the system keyring. Upon success, the underlying verity target is setup. + +config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + bool "Verity will validate blocks at most once" + depends on DM_VERITY + ---help--- + Default enables at_most_once option for dm-verity + + Verify data blocks only the first time they are read from the + data device, rather than every time. This reduces the overhead + of dm-verity so that it can be used on systems that are memory + and/or CPU constrained. However, it provides a reduced level + of security because only offline tampering of the data device's + content will be detected, not online tampering. + + Hash blocks are still verified each time they are read from the + hash device, since verification of hash blocks is less performance + critical than data blocks, and a hash block will not be verified + any more after all the data blocks it covers have been verified anyway. + + If unsure, say N. endif # MD diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 02e42ba2ecbcd86638e2617910e58f924a911d35..72ae5dc50532ec799b77458e7fba31d2480c23c0 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1380,6 +1380,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); +static void requeue_bios(struct pool *pool); + static void check_for_space(struct pool *pool) { int r; @@ -1392,8 +1394,10 @@ static void check_for_space(struct pool *pool) if (r) return; - if (nr_free) + if (nr_free) { set_pool_mode(pool, PM_WRITE); + requeue_bios(pool); + } } /* @@ -1470,7 +1474,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) r = dm_pool_alloc_data_block(pool->pmd, result); if (r) { - metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); + if (r == -ENOSPC) + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); + else + metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); return r; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 78f66d9c92ed3cb90b93bcce36d9e0030564f14a..1742af24fd9cfecdfc5b11cd85df8463c9b6acbb 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1129,6 +1129,14 @@ int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } +#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + if (!v->validated_blocks) { + r = verity_alloc_most_once(v); + if (r) + goto bad; + } +#endif + v->hash_per_block_bits = __fls((1 << v->hash_dev_block_bits) / v->digest_size); diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 6d7bda6f8190583136608898d778a0ec161c7aee..ba6b0a90ecfb1647e7dcbaff72533af1e24cc91e 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -788,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Chunk BIO work */ mutex_init(&dmz->chunk_lock); - INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); + INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 0, dev->name); if (!dmz->chunk_wq) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 636103a8fa5017abde238ed7ddc46a71ad0cff6c..6e60943ca5f12f818f4b4b9ba9e2f0b34197f2f8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2823,7 +2823,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } } else if (cmd_match(buf, "re-add")) { - if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) { + if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && + rdev->saved_raid_disk >= 0) { /* clear_bit is performed _after_ all the devices * have their local Faulty bit cleared. If any writes * happen in the meantime in the local node, they @@ -8594,6 +8595,7 @@ static int remove_and_add_spares(struct mddev *mddev, if (mddev->pers->hot_remove_disk( mddev, rdev) == 0) { sysfs_unlink_rdev(mddev, rdev); + rdev->saved_raid_disk = rdev->raid_disk; rdev->raid_disk = -1; removed++; } diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 33d844fe2e703f619efe6f658c53c048e7954e8c..f7d4ec37fdbc13cad62cc2437f5012bae3dce52f 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -275,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe, wake_up_interruptible (&events->wait_queue); } +static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv, + struct dvb_fe_events *events) +{ + int ret; + + up(&fepriv->sem); + ret = events->eventw != events->eventr; + down(&fepriv->sem); + + return ret; +} + static int dvb_frontend_get_event(struct dvb_frontend *fe, - struct dvb_frontend_event *event, int flags) + struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; @@ -294,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, if (flags & O_NONBLOCK) return -EWOULDBLOCK; - up(&fepriv->sem); - - ret = wait_event_interruptible (events->wait_queue, - events->eventw != events->eventr); - - if (down_interruptible (&fepriv->sem)) - return -ERESTARTSYS; + ret = wait_event_interruptible(events->wait_queue, + dvb_frontend_test_event(fepriv, events)); if (ret < 0) return ret; diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c index 01f8c879fce2bc4c3f311c56483f3b28cc966184..3a5933417683fd49c07aed82da374c2804b1a48d 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c @@ -20,38 +20,72 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl, bool regulator_enable) { int rc = 0; + struct cam_flash_private_soc *soc_private = + (struct cam_flash_private_soc *) + flash_ctrl->soc_info.soc_private; if (!(flash_ctrl->switch_trigger)) { CAM_ERR(CAM_FLASH, "Invalid argument"); return -EINVAL; } - if (regulator_enable && - (flash_ctrl->is_regulator_enabled == false)) { - rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger, - ENABLE_REGULATOR, NULL); - if (rc) { - CAM_ERR(CAM_FLASH, "regulator enable failed rc = %d", - rc); - return rc; - } - flash_ctrl->is_regulator_enabled = true; - } else if ((!regulator_enable) && - (flash_ctrl->is_regulator_enabled == true)) { - rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger, - DISABLE_REGULATOR, NULL); - if (rc) { - CAM_ERR(CAM_FLASH, "regulator disable failed rc = %d", - rc); - return rc; + if (soc_private->is_wled_flash) { + if (regulator_enable && + flash_ctrl->is_regulator_enabled == false) { + rc = wled_flash_led_prepare(flash_ctrl->switch_trigger, + ENABLE_REGULATOR, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, "enable reg failed: rc: %d", + rc); + return rc; + } + + flash_ctrl->is_regulator_enabled = true; + } else if (!regulator_enable && + flash_ctrl->is_regulator_enabled == true) { + rc = wled_flash_led_prepare(flash_ctrl->switch_trigger, + DISABLE_REGULATOR, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, "disalbe reg fail: rc: %d", + rc); + return rc; + } + + flash_ctrl->is_regulator_enabled = false; + } else { + CAM_ERR(CAM_FLASH, "Wrong Wled flash state: %d", + flash_ctrl->flash_state); + rc = -EINVAL; } - flash_ctrl->is_regulator_enabled = false; } else { - CAM_ERR(CAM_FLASH, "Wrong Flash State : %d", - flash_ctrl->flash_state); - rc = -EINVAL; - } + if (regulator_enable && + (flash_ctrl->is_regulator_enabled == false)) { + rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger, + ENABLE_REGULATOR, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, + "Regulator enable failed rc = %d", rc); + return rc; + } + flash_ctrl->is_regulator_enabled = true; + } else if ((!regulator_enable) && + (flash_ctrl->is_regulator_enabled == true)) { + rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger, + DISABLE_REGULATOR, NULL); + if (rc) { + CAM_ERR(CAM_FLASH, + "Regulator disable failed rc = %d", rc); + return rc; + } + + flash_ctrl->is_regulator_enabled = false; + } else { + CAM_ERR(CAM_FLASH, "Wrong Flash State : %d", + flash_ctrl->flash_state); + rc = -EINVAL; + } + } return rc; } @@ -144,37 +178,31 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl, for (i = 0; i < flash_ctrl->torch_num_sources; i++) { if (flash_ctrl->torch_trigger[i]) { max_current = soc_private->torch_max_current[i]; - if (flash_data->led_current_ma[i] <= max_current) curr = flash_data->led_current_ma[i]; else - curr = soc_private->torch_op_current[i]; - - CAM_DBG(CAM_PERF, - "Led_Current[%d] = %d", i, curr); - cam_res_mgr_led_trigger_event( - flash_ctrl->torch_trigger[i], - curr); + curr = max_current; } + CAM_DBG(CAM_FLASH, "Led_Torch[%d]: Current: %d", + i, curr); + cam_res_mgr_led_trigger_event( + flash_ctrl->torch_trigger[i], curr); } } else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) { for (i = 0; i < flash_ctrl->flash_num_sources; i++) { if (flash_ctrl->flash_trigger[i]) { max_current = soc_private->flash_max_current[i]; - if (flash_data->led_current_ma[i] <= max_current) curr = flash_data->led_current_ma[i]; else - curr = soc_private->flash_op_current[i]; - - CAM_DBG(CAM_PERF, "LED flash_current[%d]: %d", - i, curr); - cam_res_mgr_led_trigger_event( - flash_ctrl->flash_trigger[i], - curr); + curr = max_current; } + CAM_DBG(CAM_FLASH, "LED_Flash[%d]: Current: %d", + i, curr); + cam_res_mgr_led_trigger_event( + flash_ctrl->flash_trigger[i], curr); } } else { CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op); @@ -523,11 +551,16 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) struct cam_flash_set_on_off *flash_operation_info = NULL; struct cam_flash_query_curr *flash_query_info = NULL; struct cam_flash_frame_setting *flash_data = NULL; + struct cam_flash_private_soc *soc_private = NULL; if (!fctrl || !arg) { CAM_ERR(CAM_FLASH, "fctrl/arg is NULL"); return -EINVAL; } + + soc_private = (struct cam_flash_private_soc *) + fctrl->soc_info.soc_private; + /* getting CSL Packet */ ioctl_ctrl = (struct cam_control *)arg; @@ -716,8 +749,17 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg) flash_query_info = (struct cam_flash_query_curr *)cmd_buf; - rc = qpnp_flash_led_prepare(fctrl->switch_trigger, - QUERY_MAX_AVAIL_CURRENT, &query_curr_ma); + if (soc_private->is_wled_flash) + rc = wled_flash_led_prepare( + fctrl->switch_trigger, + QUERY_MAX_AVAIL_CURRENT, + &query_curr_ma); + else + rc = qpnp_flash_led_prepare( + fctrl->switch_trigger, + QUERY_MAX_AVAIL_CURRENT, + &query_curr_ma); + CAM_DBG(CAM_FLASH, "query_curr_ma = %d", query_curr_ma); if (rc) { diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h index 0ea210696bb2b88143e8163b536dcff3c2d5557c..1f527b3413196b8668936a0df909265f712c1649 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h @@ -13,7 +13,6 @@ #ifndef _CAM_FLASH_CORE_H_ #define _CAM_FLASH_CORE_H_ -#include #include #include "cam_flash_dev.h" #include "cam_sync_api.h" diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h index 13238552416a98871b4f5f67a41ccf8fb3494562..4fcd81d2ece0731787fe3e30811a21224e4d4bcd 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,7 @@ struct cam_flash_frame_setting { * @torch_trigger_name : Torch trigger name array * @torch_op_current : Torch operational current * @torch_max_current : Max supported current for LED in torch mode + * @is_wled_flash : Detection between WLED/LED flash */ struct cam_flash_private_soc { @@ -134,6 +136,7 @@ struct cam_flash_private_soc { const char *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS]; uint32_t torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS]; uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS]; + bool is_wled_flash; }; /** diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c index 22678a6cf720800701dcdd1ee246deff5616765c..d5f583a72f48a628f843fa6ab98c0700bd28e748 100644 --- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c +++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c @@ -26,9 +26,12 @@ static int32_t cam_get_source_node_info( struct device_node *torch_src_node = NULL; struct device_node *switch_src_node = NULL; + soc_private->is_wled_flash = + of_property_read_bool(of_node, "wled-flash-support"); + switch_src_node = of_parse_phandle(of_node, "switch-source", 0); if (!switch_src_node) { - CAM_DBG(CAM_FLASH, "switch_src_node NULL"); + CAM_WARN(CAM_FLASH, "switch_src_node NULL"); } else { rc = of_property_read_string(switch_src_node, "qcom,default-led-trigger", @@ -75,46 +78,61 @@ static int32_t cam_get_source_node_info( continue; } - CAM_DBG(CAM_FLASH, "default trigger %s", + CAM_DBG(CAM_FLASH, "Flash default trigger %s", soc_private->flash_trigger_name[i]); + cam_res_mgr_led_trigger_register( + soc_private->flash_trigger_name[i], + &fctrl->flash_trigger[i]); + + if (soc_private->is_wled_flash) { + rc = wled_flash_led_prepare( + fctrl->flash_trigger[i], + QUERY_MAX_CURRENT, + &soc_private->flash_max_current[i]); + if (rc) { + CAM_ERR(CAM_FLASH, + "WLED FLASH max_current read fail: %d", + rc); + of_node_put(flash_src_node); + rc = 0; + continue; + } + } else { + rc = of_property_read_u32(flash_src_node, + "qcom,max-current", + &soc_private->flash_max_current[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "LED FLASH max-current read fail: %d", + rc); + of_node_put(flash_src_node); + continue; + } + } /* Read operational-current */ rc = of_property_read_u32(flash_src_node, "qcom,current-ma", &soc_private->flash_op_current[i]); if (rc) { - CAM_WARN(CAM_FLASH, "op-current: read failed"); - of_node_put(flash_src_node); - continue; - } - - /* Read max-current */ - rc = of_property_read_u32(flash_src_node, - "qcom,max-current", - &soc_private->flash_max_current[i]); - if (rc) { - CAM_WARN(CAM_FLASH, - "max-current: read failed"); - of_node_put(flash_src_node); - continue; + CAM_INFO(CAM_FLASH, "op-current: read failed"); + rc = 0; } /* Read max-duration */ rc = of_property_read_u32(flash_src_node, "qcom,duration-ms", &soc_private->flash_max_duration[i]); - if (rc) - CAM_WARN(CAM_FLASH, - "max-duration: read failed"); - + if (rc) { + CAM_INFO(CAM_FLASH, + "max-duration prop unavailable: %d", + rc); + rc = 0; + } of_node_put(flash_src_node); - CAM_DBG(CAM_FLASH, "max_current[%d]: %d", + CAM_DBG(CAM_FLASH, "MainFlashMaxCurrent[%d]: %d", i, soc_private->flash_max_current[i]); - - cam_res_mgr_led_trigger_register( - soc_private->flash_trigger_name[i], - &fctrl->flash_trigger[i]); } } @@ -147,35 +165,51 @@ static int32_t cam_get_source_node_info( continue; } + CAM_DBG(CAM_FLASH, "Torch default trigger %s", + soc_private->torch_trigger_name[i]); + cam_res_mgr_led_trigger_register( + soc_private->torch_trigger_name[i], + &fctrl->torch_trigger[i]); + + if (soc_private->is_wled_flash) { + rc = wled_flash_led_prepare( + fctrl->torch_trigger[i], + QUERY_MAX_CURRENT, + &soc_private->torch_max_current[i]); + if (rc) { + CAM_ERR(CAM_FLASH, + "WLED TORCH max_current read fail: %d", + rc); + of_node_put(torch_src_node); + continue; + } + } else { + rc = of_property_read_u32(torch_src_node, + "qcom,max-current", + &soc_private->torch_max_current[i]); + if (rc < 0) { + CAM_WARN(CAM_FLASH, + "LED-TORCH max-current read failed: %d", + rc); + of_node_put(torch_src_node); + continue; + } + } + /* Read operational-current */ rc = of_property_read_u32(torch_src_node, "qcom,current-ma", &soc_private->torch_op_current[i]); - if (rc < 0) { - CAM_WARN(CAM_FLASH, "current: read failed"); - of_node_put(torch_src_node); - continue; - } - - /* Read max-current */ - rc = of_property_read_u32(torch_src_node, - "qcom,max-current", - &soc_private->torch_max_current[i]); if (rc < 0) { CAM_WARN(CAM_FLASH, - "max-current: read failed"); - of_node_put(torch_src_node); - continue; + "op-current prop unavailable: %d", rc); + rc = 0; } of_node_put(torch_src_node); - CAM_DBG(CAM_FLASH, "max_current[%d]: %d", + CAM_DBG(CAM_FLASH, "TorchMaxCurrent[%d]: %d", i, soc_private->torch_max_current[i]); - - cam_res_mgr_led_trigger_register( - soc_private->torch_trigger_name[i], - &fctrl->torch_trigger[i]); } } diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h index d38cc87b13d99fd5e9f3c8f9909199d3245b7077..c8b4523427cb48e5f7a91c96c3be7e84403700fb 100644 --- a/drivers/media/platform/msm/npu/npu_common.h +++ b/drivers/media/platform/msm/npu/npu_common.h @@ -67,7 +67,7 @@ #define ROW_BYTES 16 #define GROUP_BYTES 4 -#define NUM_TOTAL_CLKS 19 +#define NUM_TOTAL_CLKS 20 #define NPU_MAX_REGULATOR_NUM 2 #define NPU_MAX_DT_NAME_LEN 21 #define NPU_MAX_PWRLEVELS 7 diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c index 996ca91ed44d4e8748cf8078d1ea3425fe76e079..95f3d7d93f01e60a800fc5caccdb386e195d3fd8 100644 --- a/drivers/media/platform/msm/npu/npu_debugfs.c +++ b/drivers/media/platform/msm/npu/npu_debugfs.c @@ -351,6 +351,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file, char buf[24]; struct npu_device *npu_dev = file->private_data; struct npu_debugfs_ctx *debugfs; + int32_t rc = 0; pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev); npu_dev = g_npu_dev; @@ -381,6 +382,10 @@ static ssize_t npu_debug_ctrl_write(struct file *file, REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2); npu_disable_core_power(npu_dev); + } else if (strcmp(buf, "loopback") == 0) { + pr_debug("loopback test\n"); + rc = npu_host_loopback_test(npu_dev); + pr_debug("loopback test end: %d\n", rc); } else if (strcmp(buf, "0") == 0) { pr_info("setting power state to 0\n"); npu_dev->pwrctrl.active_pwrlevel = 0; diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c index 2cc21518989c3bdaea8addd90264904eda0b42cf..0a5745c16f2989b1e11fc30b51fae701624df725 100644 --- a/drivers/media/platform/msm/npu/npu_dev.c +++ b/drivers/media/platform/msm/npu/npu_dev.c @@ -64,7 +64,6 @@ static ssize_t npu_store_pwr_state(struct device *dev, static void npu_suspend_devbw(struct npu_device *npu_dev); static void npu_resume_devbw(struct npu_device *npu_dev); static bool npu_is_post_clock(const char *clk_name); -static bool npu_is_exclude_clock(const char *clk_name); static bool npu_is_exclude_rate_clock(const char *clk_name); static int npu_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state); @@ -99,13 +98,14 @@ static void __exit npu_exit(void); * ------------------------------------------------------------------------- */ static const char * const npu_clock_order[] = { + "qdss_clk", + "at_clk", + "trig_clk", "armwic_core_clk", - "cal_dp_clk_src", "cal_dp_clk", "cal_dp_cdc_clk", "conf_noc_ahb_clk", "comp_noc_axi_clk", - "npu_core_clk_src", "npu_core_clk", "npu_core_cti_clk", "npu_core_apb_clk", @@ -125,19 +125,14 @@ static const char * const npu_post_clocks[] = { "npu_cpc_timer_clk" }; -static const char * const npu_exclude_clocks[] = { - "npu_core_clk_src", - "cal_dp_clk_src", - "perf_cnt_clk", - "npu_core_cti_clk", - "npu_core_apb_clk", - "npu_core_atb_clk" -}; - static const char * const npu_exclude_rate_clocks[] = { + "qdss_clk", + "at_clk", + "trig_clk", "sleep_clk", "xo_clk", "conf_noc_ahb_clk", + "comp_noc_axi_clk", "npu_core_cti_clk", "npu_core_apb_clk", "npu_core_atb_clk", @@ -304,8 +299,7 @@ void npu_disable_core_power(struct npu_device *npu_dev) return; pwr->pwr_vote_num--; if (!pwr->pwr_vote_num) { - if (npu_dev->host_ctx.fw_state == FW_DISABLED) - npu_suspend_devbw(npu_dev); + npu_suspend_devbw(npu_dev); npu_disable_core_clocks(npu_dev); npu_disable_regulators(npu_dev); } @@ -354,7 +348,6 @@ static int npu_set_power_level(struct npu_device *npu_dev) struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; struct npu_pwrlevel *pwrlevel; int i, ret = 0; - long clk_rate = 0; uint32_t pwr_level_to_set; if (!pwr->pwr_vote_num) { @@ -388,13 +381,12 @@ static int npu_set_power_level(struct npu_device *npu_dev) pr_debug("requested rate of clock [%s] to [%ld]\n", npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]); - pr_debug("actual round clk rate [%ld]\n", clk_rate); - - ret = clk_set_rate(npu_dev->core_clks[i].clk, clk_rate); + ret = clk_set_rate(npu_dev->core_clks[i].clk, + pwrlevel->clk_freq[i]); if (ret) { pr_debug("clk_set_rate %s to %ld failed with %d\n", npu_dev->core_clks[i].clk_name, - clk_rate, ret); + pwrlevel->clk_freq[i], ret); break; } } @@ -491,20 +483,6 @@ static bool npu_is_post_clock(const char *clk_name) return ret; } -static bool npu_is_exclude_clock(const char *clk_name) -{ - int ret = false; - int i; - - for (i = 0; i < ARRAY_SIZE(npu_exclude_clocks); i++) { - if (!strcmp(clk_name, npu_exclude_clocks[i])) { - ret = true; - break; - } - } - return ret; -} - static bool npu_is_exclude_rate_clock(const char *clk_name) { int ret = false; @@ -536,10 +514,7 @@ static int npu_enable_core_clocks(struct npu_device *npu_dev, bool post_pil) continue; } - if (npu_is_exclude_clock(core_clks[i].clk_name)) - continue; - - pr_debug("enabling clock [%s]\n", core_clks[i].clk_name); + pr_debug("enabling clock %s\n", core_clks[i].clk_name); rc = clk_prepare_enable(core_clks[i].clk); if (rc) { @@ -551,16 +526,31 @@ static int npu_enable_core_clocks(struct npu_device *npu_dev, bool post_pil) if (npu_is_exclude_rate_clock(core_clks[i].clk_name)) continue; - pr_debug("setting rate of clock [%s] to [%ld]\n", + pr_debug("setting rate of clock %s to %ld\n", core_clks[i].clk_name, pwrlevel->clk_freq[i]); rc = clk_set_rate(core_clks[i].clk, pwrlevel->clk_freq[i]); + /* not fatal error, keep using previous clk rate */ if (rc) { - pr_debug("clk_set_rate %s to %ld failed\n", + pr_err("clk_set_rate %s to %ld failed\n", core_clks[i].clk_name, pwrlevel->clk_freq[i]); - break; + rc = 0; + } + } + + if (rc) { + for (i--; i >= 0; i--) { + if (post_pil) { + if (!npu_is_post_clock(core_clks[i].clk_name)) + continue; + } else { + if (npu_is_post_clock(core_clks[i].clk_name)) + continue; + } + pr_debug("disabling clock %s\n", core_clks[i].clk_name); + clk_disable_unprepare(core_clks[i].clk); } } @@ -573,14 +563,12 @@ static void npu_disable_core_clocks(struct npu_device *npu_dev) struct npu_clk *core_clks = npu_dev->core_clks; for (i = (npu_dev->core_clk_num)-1; i >= 0 ; i--) { - if (npu_is_exclude_clock(core_clks[i].clk_name)) - continue; if (npu_dev->host_ctx.fw_state == FW_DISABLED) { if (npu_is_post_clock(npu_dev->core_clks[i].clk_name)) continue; } - pr_debug("disabling clock [%s]\n", core_clks[i].clk_name); + pr_debug("disabling clock %s\n", core_clks[i].clk_name); clk_disable_unprepare(core_clks[i].clk); } } @@ -1437,8 +1425,8 @@ static int npu_remove(struct platform_device *pdev) thermal_cooling_device_unregister(npu_dev->tcdev); npu_debugfs_deinit(npu_dev); npu_host_deinit(npu_dev); - arm_iommu_release_mapping(npu_dev->smmu_ctx.mmu_mapping); arm_iommu_detach_device(&(npu_dev->pdev->dev)); + arm_iommu_release_mapping(npu_dev->smmu_ctx.mmu_mapping); sysfs_remove_group(&npu_dev->device->kobj, &npu_fs_attr_group); cdev_del(&npu_dev->cdev); device_destroy(npu_dev->class, npu_dev->dev_num); diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.h b/drivers/media/platform/msm/npu/npu_host_ipc.h index 2acdced5550a058e9ffcec1a442e58e373c6b36f..e15db0c3e8fb43d2560c358c289977dd464809a7 100644 --- a/drivers/media/platform/msm/npu/npu_host_ipc.h +++ b/drivers/media/platform/msm/npu/npu_host_ipc.h @@ -32,6 +32,8 @@ #define NPU_IPC_CMD_CONFIG_PERFORMANCE 0x00000005 #define NPU_IPC_CMD_CONFIG_DEBUG 0x00000006 #define NPU_IPC_CMD_SHUTDOWN 0x00000007 +/* npu_ipc_cmd_loopback_packet_t */ +#define NPU_IPC_CMD_LOOPBACK 0x00000008 /* Messages sent **from** NPU */ /* IPC Message Response -- uint32_t */ @@ -45,6 +47,8 @@ #define NPU_IPC_MSG_EXECUTE_DONE 0x00010003 /* ipc_msg_event_notify_pkt */ #define NPU_IPC_MSG_EVENT_NOTIFY 0x00010004 +/* npu_ipc_msg_loopback_packet_t */ +#define NPU_IPC_MSG_LOOPBACK_DONE 0x00010005 /* Logging message size */ /* Number 32-bit elements for the maximum log message size */ @@ -191,6 +195,14 @@ struct ipc_cmd_execute_pkt { uint32_t aco_hdl; }; +/* + * Loopback packet definition + */ +struct ipc_cmd_loopback_pkt { + struct ipc_cmd_header_pkt header; + uint32_t loopbackParams; +}; + /* * LOAD response packet definition */ @@ -257,6 +269,14 @@ struct ipc_msg_execute_pkt { struct ipc_execute_stats stats; }; +/* + * LOOPBACK response packet definition + */ +struct ipc_msg_loopback_pkt { + struct ipc_msg_header_pkt header; + uint32_t loopbackParams; +}; + /* Logging Related */ /* diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c index 0348b3ef9788bf08645e17d2a6352e08fed3d654..0041a85e071e345a06eea9a691a1d61aeb280431 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.c +++ b/drivers/media/platform/msm/npu/npu_mgr.c @@ -43,7 +43,10 @@ static void host_irq_wq(struct work_struct *work); static void turn_off_fw_logging(struct npu_device *npu_dev); static int wait_for_fw_ready(struct npu_device *npu_dev); static struct npu_network *alloc_network(struct npu_host_ctx *ctx); -static struct npu_network *get_network(struct npu_host_ctx *ctx, int64_t id); +static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx, + uint32_t hdl); +static struct npu_network *get_network_by_id(struct npu_host_ctx *ctx, + int64_t id); static void free_network(struct npu_host_ctx *ctx, int64_t id); static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg); static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg); @@ -208,6 +211,7 @@ int npu_host_init(struct npu_device *npu_dev) init_completion(&host_ctx->exec_done); init_completion(&host_ctx->load_done); init_completion(&host_ctx->unload_done); + init_completion(&host_ctx->loopback_done); host_ctx->sys_cache_disable = 0; spin_lock_init(&host_ctx->lock); @@ -284,6 +288,7 @@ static int host_error_hdlr(struct npu_device *npu_dev) complete_all(&host_ctx->exec_done); complete_all(&host_ctx->load_done); complete_all(&host_ctx->unload_done); + complete_all(&host_ctx->loopback_done); return 1; } @@ -361,6 +366,7 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx) * by 1 for the next IPC cmd on the same network */ network->ipc_trans_id = 1; + network->network_hdl = 0; break; } network++; @@ -374,7 +380,28 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx) return network; } -static struct npu_network *get_network(struct npu_host_ctx *ctx, int64_t id) +static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx, + uint32_t hdl) +{ + int32_t i; + struct npu_network *network = ctx->networks; + + for (i = 0; i < MAX_LOADED_NETWORK; i++) { + if (network->network_hdl == hdl) + break; + + network++; + } + if (i == MAX_LOADED_NETWORK) { + pr_err("network hdl invalid %d\n", hdl); + network = NULL; + } + + return network; +} + +static struct npu_network *get_network_by_id(struct npu_host_ctx *ctx, + int64_t id) { if (id >= 1 && id <= MAX_LOADED_NETWORK) return &ctx->networks[id - 1]; @@ -384,7 +411,7 @@ static struct npu_network *get_network(struct npu_host_ctx *ctx, int64_t id) static void free_network(struct npu_host_ctx *ctx, int64_t id) { - struct npu_network *network = get_network(ctx, id); + struct npu_network *network = get_network_by_id(ctx, id); unsigned long flags; if (network) { @@ -405,6 +432,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) struct ipc_msg_header_pkt *resp_pkt; struct ipc_msg_load_pkt *load_rsp_pkt; struct ipc_msg_execute_pkt *exe_rsp_pkt; + struct ipc_msg_loopback_pkt *lb_rsp_pkt; msg_id = msg[1]; switch (msg_id) { @@ -440,7 +468,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) * the network ID on the way back */ network_id = load_rsp_pkt->header.flags; - network = get_network(host_ctx, network_id); + network = get_network_by_id(host_ctx, network_id); if (!network) { pr_err("can't find network %d\n", network_id); break; @@ -456,6 +484,14 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) resp_pkt->status, resp_pkt->trans_id); complete_all(&host_ctx->unload_done); break; + + case NPU_IPC_MSG_LOOPBACK_DONE: + lb_rsp_pkt = (struct ipc_msg_loopback_pkt *)msg; + pr_debug("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n", + lb_rsp_pkt->loopbackParams); + complete_all(&host_ctx->loopback_done); + break; + default: pr_err("Not supported apps response received %d\n", msg_id); @@ -600,7 +636,6 @@ int32_t npu_host_load_network(struct npu_device *npu_dev, network->first_block_size = load_ioctl->first_block_size; network->priority = load_ioctl->priority; network->perf_mode = load_ioctl->perf_mode; - load_ioctl->network_hdl = network->id; networks_perf_mode = find_networks_perf_mode(host_ctx); @@ -646,6 +681,8 @@ int32_t npu_host_load_network(struct npu_device *npu_dev, goto error_free_network; } + load_ioctl->network_hdl = network->network_hdl; + return ret; error_free_network: @@ -664,7 +701,7 @@ int32_t npu_host_unload_network(struct npu_device *npu_dev, struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; /* get the corresponding network for ipc trans id purpose */ - network = get_network(host_ctx, (int64_t)unload->network_hdl); + network = get_network_by_hdl(host_ctx, unload->network_hdl); if (!network) return -EINVAL; @@ -698,7 +735,7 @@ int32_t npu_host_unload_network(struct npu_device *npu_dev, * free the network on the kernel if the corresponding ACO * handle is unloaded on the firmware side */ - free_network(host_ctx, (int64_t)unload->network_hdl); + free_network(host_ctx, network->id); fw_deinit(npu_dev, true); } @@ -718,7 +755,7 @@ int32_t npu_host_exec_network(struct npu_device *npu_dev, struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; int i = 0; - network = get_network(host_ctx, (int64_t)exec_ioctl->network_hdl); + network = get_network_by_hdl(host_ctx, exec_ioctl->network_hdl); if (!network) return -EINVAL; @@ -781,3 +818,36 @@ int32_t npu_host_exec_network(struct npu_device *npu_dev, return ret; } + +int32_t npu_host_loopback_test(struct npu_device *npu_dev) +{ + struct ipc_cmd_loopback_pkt loopback_packet; + struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; + int32_t ret; + + ret = fw_init(npu_dev); + if (ret) + return ret; + + loopback_packet.header.cmd_type = NPU_IPC_CMD_LOOPBACK; + loopback_packet.header.size = sizeof(struct ipc_cmd_loopback_pkt); + loopback_packet.header.trans_id = 0; + loopback_packet.header.flags = 0; + loopback_packet.loopbackParams = 15; + + reinit_completion(&host_ctx->loopback_done); + ret = npu_host_ipc_send_cmd(npu_dev, + IPC_QUEUE_APPS_EXEC, &loopback_packet); + + if (ret) { + pr_err("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret); + } else if (!wait_for_completion_interruptible_timeout( + &host_ctx->loopback_done, NW_LOAD_TIMEOUT)) { + pr_err_ratelimited("npu: NPU_IPC_CMD_LOOPBACK time out\n"); + ret = -ETIMEDOUT; + } + + fw_deinit(npu_dev, true); + + return ret; +} diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h index 80eaca5285870411b0da53be6f286f8471431496..ade06bbf7de200bbedf1c604a9e10f2d15beda27 100644 --- a/drivers/media/platform/msm/npu/npu_mgr.h +++ b/drivers/media/platform/msm/npu/npu_mgr.h @@ -73,6 +73,7 @@ struct npu_host_ctx { struct completion exec_done; struct completion load_done; struct completion unload_done; + struct completion loopback_done; int32_t network_num; struct npu_network networks[MAX_LOADED_NETWORK]; bool sys_cache_disable; @@ -111,6 +112,7 @@ int32_t npu_host_unload_network(struct npu_device *npu_dev, struct msm_npu_unload_network_ioctl *unload); int32_t npu_host_exec_network(struct npu_device *npu_dev, struct msm_npu_exec_network_ioctl *exec_ioctl); +int32_t npu_host_loopback_test(struct npu_device *npu_dev); void npu_dump_debug_timeout_stats(struct npu_device *npu_dev); void npu_dump_cal_state(struct npu_device *npu_dev); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h index c5702e5603e08302ad2a98a487edd0cc7b3d47d9..c6e125986705d49386fe8f130d1638b163e6c929 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h @@ -128,6 +128,7 @@ enum sde_rot_type { * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2 * @SDE_CAPS_PARTIALWR: partial write override * @SDE_CAPS_HW_TIMESTAMP: rotator has hw timestamp support + * @SDE_CAPS_UBWC_3: universal bandwidth compression version 3 */ enum sde_caps_settings { SDE_CAPS_R1_WB, @@ -138,6 +139,7 @@ enum sde_caps_settings { SDE_CAPS_UBWC_2, SDE_CAPS_PARTIALWR, SDE_CAPS_HW_TIMESTAMP, + SDE_CAPS_UBWC_3, SDE_CAPS_MAX, }; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index 585582deeee961bb258c9d478b6ebef3c8c94c87..28bcb18135948bbb062bfb6ae7972415b4adf98a 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -418,9 +418,6 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable) if (ret) goto error_rot_sub; - /* reinitialize static vbif setting */ - sde_mdp_init_vbif(); - /* Active+Sleep */ msm_bus_scale_client_update_context( mgr->data_bus.bus_hdl, false, @@ -587,17 +584,21 @@ static int sde_rotator_import_buffer(struct sde_layer_buffer *buffer, static int sde_rotator_secure_session_ctrl(bool enable) { struct sde_rot_data_type *mdata = sde_rot_get_mdata(); - uint32_t sid_info; + uint32_t *sid_info = NULL; struct scm_desc desc = {0}; unsigned int resp = 0; int ret = 0; - if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, - mdata->sde_caps_map)) { - sid_info = mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_SECURE].sid; + if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map)) { + + sid_info = kzalloc(sizeof(uint32_t), GFP_KERNEL); + if (!sid_info) + return -ENOMEM; + + sid_info[0] = mdata->sde_smmu[SDE_IOMMU_DOMAIN_ROT_SECURE].sid; desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL); desc.args[0] = SDE_ROTATOR_DEVICE; - desc.args[1] = SCM_BUFFER_PHYS(&sid_info); + desc.args[1] = SCM_BUFFER_PHYS(sid_info); desc.args[2] = sizeof(uint32_t); if (!mdata->sec_cam_en && enable) { @@ -611,7 +612,7 @@ static int sde_rotator_secure_session_ctrl(bool enable) mdata->sec_cam_en = 1; sde_smmu_secure_ctrl(0); - dmac_flush_range(&sid_info, &sid_info + 1); + dmac_flush_range(sid_info, sid_info + 1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, MEM_PROTECT_SD_CTRL_SWITCH), &desc); resp = desc.ret[0]; @@ -621,14 +622,16 @@ static int sde_rotator_secure_session_ctrl(bool enable) /* failure, attach smmu */ mdata->sec_cam_en = 0; sde_smmu_secure_ctrl(1); - return -EINVAL; + ret = -EINVAL; + goto end; } SDEROT_DBG( "scm(1) sid0x%x dev0x%llx vmid0x%llx ret%d resp%x\n", - sid_info, desc.args[0], desc.args[3], + sid_info[0], desc.args[0], desc.args[3], ret, resp); - SDEROT_EVTLOG(1, sid_info, desc.args[0], desc.args[3], + SDEROT_EVTLOG(1, sid_info, sid_info[0], + desc.args[0], desc.args[3], ret, resp); } else if (mdata->sec_cam_en && !enable) { /* @@ -639,25 +642,30 @@ static int sde_rotator_secure_session_ctrl(bool enable) desc.args[3] = VMID_CP_PIXEL; mdata->sec_cam_en = 0; - dmac_flush_range(&sid_info, &sid_info + 1); + dmac_flush_range(sid_info, sid_info + 1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, MEM_PROTECT_SD_CTRL_SWITCH), &desc); resp = desc.ret[0]; SDEROT_DBG( "scm(0) sid0x%x dev0x%llx vmid0x%llx ret%d resp%d\n", - sid_info, desc.args[0], desc.args[3], + sid_info[0], desc.args[0], desc.args[3], ret, resp); /* force smmu to reattach */ sde_smmu_secure_ctrl(1); - SDEROT_EVTLOG(0, sid_info, desc.args[0], desc.args[3], + SDEROT_EVTLOG(0, sid_info, sid_info[0], + desc.args[0], desc.args[3], ret, resp); } } else { return 0; } + +end: + kfree(sid_info); + if (ret) return ret; @@ -3342,6 +3350,7 @@ int sde_rotator_pm_resume(struct device *dev) */ pm_runtime_disable(dev); pm_runtime_set_suspended(dev); + pm_runtime_set_active(dev); pm_runtime_enable(dev); sde_rot_mgr_lock(mgr); diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c index 7d8f5fc9caeb1c4c90ded1ccd63c39c794e41db8..83a79139af467ccbb77fb75488fa2413f9cad78b 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c @@ -1077,6 +1077,7 @@ struct sde_rotator_ctx *sde_rotator_ctx_open( mutex_unlock(&rot_dev->lock); error_lock: kfree(ctx); + ctx = NULL; return ERR_PTR(ret); } @@ -1089,10 +1090,18 @@ struct sde_rotator_ctx *sde_rotator_ctx_open( static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx, struct file *file) { - struct sde_rotator_device *rot_dev = ctx->rot_dev; - u32 session_id = ctx->session_id; + struct sde_rotator_device *rot_dev; + u32 session_id; struct list_head *curr, *next; + if (!ctx) { + SDEROT_DBG("ctx is NULL\n"); + return -EINVAL; + } + + rot_dev = ctx->rot_dev; + session_id = ctx->session_id; + ATRACE_END(ctx->kobj.name); SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id); @@ -3707,6 +3716,7 @@ static struct platform_driver rotator_driver = { .name = SDE_ROTATOR_DRV_NAME, .of_match_table = sde_rotator_dt_match, .pm = &sde_rotator_pm_ops, + .suppress_bind_attrs = true, }, }; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index d9f3fdc012cd850208565c74ff2f9bb9ec5b4061..faa2d91a151b0ae52b839ced7eac183252929e56 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -1492,6 +1492,8 @@ static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx, ((ctx->rot->ubwc_malsize & 0x3) << 8) | ((ctx->rot->highest_bank & 0x3) << 4) | ((ctx->rot->ubwc_swizzle & 0x1) << 0)); + else if (test_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map)) + SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(30)); /* setup source buffer plane security status */ if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION | @@ -2795,6 +2797,9 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw, item->input.format, item->output.format, entry->perf->config.frame_rate); + /* initialize static vbif setting */ + sde_mdp_init_vbif(); + if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) { struct sde_mdp_set_ot_params ot_params; @@ -3075,7 +3080,7 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot) if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_500)) { SDEROT_DBG("Supporting sys cache inline rotation\n"); set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map); - set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map); + set_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map); set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map); set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map); rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] = diff --git a/drivers/media/platform/msm/vidc/governors/Makefile b/drivers/media/platform/msm/vidc/governors/Makefile index 695a3ae4053a01676c736b9d7963e999d14b5bf6..ee4b29298a680ba2bb3ce1ad23a674c0911fbaa1 100644 --- a/drivers/media/platform/msm/vidc/governors/Makefile +++ b/drivers/media/platform/msm/vidc/governors/Makefile @@ -4,4 +4,6 @@ ccflags-y := -I$(srctree)/drivers/devfreq/ \ msm-vidc-dyn-gov-objs := msm_vidc_dyn_gov.o -obj-$(CONFIG_MSM_VIDC_GOVERNORS) := msm-vidc-dyn-gov.o +msm-vidc-ar50-dyn-gov-objs := msm_vidc_ar50_dyn_gov.o + +obj-$(CONFIG_MSM_VIDC_GOVERNORS) := msm-vidc-dyn-gov.o msm-vidc-ar50-dyn-gov.o diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_ar50_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_ar50_dyn_gov.c new file mode 100644 index 0000000000000000000000000000000000000000..7fd073a140bf002aff1497781a9e3cb5e648f294 --- /dev/null +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_ar50_dyn_gov.c @@ -0,0 +1,982 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "governor.h" +#include "fixedpoint.h" +#include "msm_vidc_internal.h" +#include "msm_vidc_debug.h" +#include "vidc_hfi_api.h" +#define COMPRESSION_RATIO_MAX 5 + +static bool debug; +module_param(debug, bool, 0644); + +enum governor_mode { + GOVERNOR_DDR, + GOVERNOR_LLCC, +}; + +struct governor { + enum governor_mode mode; + struct devfreq_governor devfreq_gov; +}; + +/* + * Minimum dimensions that the governor is willing to calculate + * bandwidth for. This means that anything bandwidth(0, 0) == + * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height) + */ +static const struct { + int height, width; +} BASELINE_DIMENSIONS = { + .width = 1280, + .height = 720, +}; + +/* + * These are hardcoded AB values that the governor votes for in certain + * situations, where a certain bus frequency is desired. It isn't exactly + * scalable since different platforms have different bus widths, but we'll + * deal with that in the future. + */ +static const unsigned long NOMINAL_BW_MBPS = 6000 /* ideally 320 Mhz */, + SVS_BW_MBPS = 2000 /* ideally 100 Mhz */; + +/* converts Mbps to bps (the "b" part can be bits or bytes based on context) */ +#define kbps(__mbps) ((__mbps) * 1000) +#define bps(__mbps) (kbps(__mbps) * 1000) + +#define GENERATE_COMPRESSION_PROFILE(__bpp, __worst) { \ + .bpp = __bpp, \ + .ratio = __worst, \ +} + +/* + * The below table is a structural representation of the following table: + * Resolution | Bitrate | Compression Ratio | + * ............|............|.........................................| + * Width Height|Average High|Avg_8bpc Worst_8bpc Avg_10bpc Worst_10bpc| + * 1280 720| 7 14| 1.69 1.28 1.49 1.23| + * 1920 1080| 20 40| 1.69 1.28 1.49 1.23| + * 2560 1440| 32 64| 2.2 1.26 1.97 1.22| + * 3840 2160| 42 84| 2.2 1.26 1.97 1.22| + * 4096 2160| 44 88| 2.2 1.26 1.97 1.22| + * 4096 2304| 48 96| 2.2 1.26 1.97 1.22| + */ +static struct lut { + int frame_size; /* width x height */ + int frame_rate; + unsigned long bitrate; + struct { + int bpp; + fp_t ratio; + } compression_ratio[COMPRESSION_RATIO_MAX]; +} const LUT[] = { + { + .frame_size = 1280 * 720, + .frame_rate = 30, + .bitrate = 14, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1280 * 720, + .frame_rate = 60, + .bitrate = 22, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1920 * 1088, + .frame_rate = 30, + .bitrate = 40, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 1920 * 1088, + .frame_rate = 60, + .bitrate = 64, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 28, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 23, 100)), + } + }, + { + .frame_size = 2560 * 1440, + .frame_rate = 30, + .bitrate = 64, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 2560 * 1440, + .frame_rate = 60, + .bitrate = 102, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 3840 * 2160, + .frame_rate = 30, + .bitrate = 84, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 3840 * 2160, + .frame_rate = 60, + .bitrate = 134, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2160, + .frame_rate = 30, + .bitrate = 88, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2160, + .frame_rate = 60, + .bitrate = 141, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2304, + .frame_rate = 30, + .bitrate = 96, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, + { + .frame_size = 4096 * 2304, + .frame_rate = 60, + .bitrate = 154, + .compression_ratio = { + GENERATE_COMPRESSION_PROFILE(8, + FP(1, 26, 100)), + GENERATE_COMPRESSION_PROFILE(10, + FP(1, 22, 100)), + } + }, +}; + +static struct lut const *__lut(int width, int height, int fps) +{ + int frame_size = height * width, c = 0; + + do { + if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps) + return &LUT[c]; + } while (++c < ARRAY_SIZE(LUT)); + + return &LUT[ARRAY_SIZE(LUT) - 1]; +} + +static fp_t __compression_ratio(struct lut const *entry, int bpp) +{ + int c = 0; + + for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) { + if (entry->compression_ratio[c].bpp == bpp) + return entry->compression_ratio[c].ratio; + } + + WARN(true, "Shouldn't be here, LUT possibly corrupted?\n"); + return FP_ZERO; /* impossible */ +} + +#define DUMP_HEADER_MAGIC 0xdeadbeef +#define DUMP_FP_FMT "%FP" /* special format for fp_t */ +struct dump { + char *key; + char *format; + size_t val; +}; + +static void __dump(struct dump dump[], int len) +{ + int c = 0; + + for (c = 0; c < len; ++c) { + char format_line[128] = "", formatted_line[128] = ""; + + if (dump[c].val == DUMP_HEADER_MAGIC) { + snprintf(formatted_line, sizeof(formatted_line), "%s\n", + dump[c].key); + } else { + bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT); + + if (!fp_format) { + snprintf(format_line, sizeof(format_line), + " %-35s: %s\n", dump[c].key, + dump[c].format); + snprintf(formatted_line, sizeof(formatted_line), + format_line, dump[c].val); + } else { + size_t integer_part, fractional_part; + + integer_part = fp_int(dump[c].val); + fractional_part = fp_frac(dump[c].val); + snprintf(formatted_line, sizeof(formatted_line), + " %-35s: %zd + %zd/%zd\n", + dump[c].key, integer_part, + fractional_part, + fp_frac_base()); + + + } + } + + dprintk(VIDC_DBG, "%s", formatted_line); + } +} + +static unsigned long __calculate_vpe(struct vidc_bus_vote_data *d, + enum governor_mode gm) +{ + return 0; +} + +static bool __ubwc(enum hal_uncompressed_format f) +{ + switch (f) { + case HAL_COLOR_FORMAT_NV12_UBWC: + case HAL_COLOR_FORMAT_NV12_TP10_UBWC: + return true; + default: + return false; + } +} + +static int __bpp(enum hal_uncompressed_format f) +{ + switch (f) { + case HAL_COLOR_FORMAT_NV12: + case HAL_COLOR_FORMAT_NV21: + case HAL_COLOR_FORMAT_NV12_UBWC: + return 8; + case HAL_COLOR_FORMAT_NV12_TP10_UBWC: + case HAL_COLOR_FORMAT_P010: + return 10; + default: + dprintk(VIDC_ERR, + "What's this? We don't support this colorformat (%x)", + f); + return INT_MAX; + } +} + +static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, + enum governor_mode gm) +{ + /* + * XXX: Don't fool around with any of the hardcoded numbers unless you + * know /exactly/ what you're doing. Many of these numbers are + * measured heuristics and hardcoded numbers taken from the firmware. + */ + /* Decoder parameters */ + int width, height, lcu_size, dpb_bpp, opb_bpp, fps, opb_factor; + bool unified_dpb_opb, dpb_compression_enabled, opb_compression_enabled, + llc_ref_read_l2_cache_enabled = false, + llc_vpss_ds_line_buf_enabled = false; + fp_t dpb_opb_scaling_ratio, dpb_read_compression_factor, + dpb_write_compression_factor, opb_compression_factor, + qsmmu_bw_overhead_factor, height_ratio; + + /* Derived parameters */ + int lcu_per_frame, tnbr_per_lcu, colocated_bytes_per_lcu; + unsigned long bitrate; + + fp_t bins_to_bit_factor, dpb_write_factor, ten_bpc_packing_factor, + ten_bpc_bpp_factor, vsp_read_factor, vsp_write_factor, + bw_for_1x_8bpc, dpb_bw_for_1x, + motion_vector_complexity = 0, row_cache_penalty = 0, opb_bw = 0, + dpb_total = 0; + + /* Output parameters */ + struct { + fp_t vsp_read, vsp_write, collocated_read, collocated_write, + line_buffer_read, line_buffer_write, recon_read, + recon_write, opb_read, opb_write, dpb_read, dpb_write, + total; + } ddr = {0}; + + struct { + fp_t dpb_read, opb_read, total; + } llc = {0}; + + unsigned long ret = 0; + unsigned int integer_part, frac_part; + + width = max(d->input_width, BASELINE_DIMENSIONS.width); + height = max(d->input_height, BASELINE_DIMENSIONS.height); + + lcu_size = d->lcu_size; + + dpb_bpp = d->num_formats >= 1 ? __bpp(d->color_formats[0]) : INT_MAX; + opb_bpp = d->num_formats >= 2 ? __bpp(d->color_formats[1]) : dpb_bpp; + + fps = d->fps; + + unified_dpb_opb = d->num_formats == 1; + + dpb_opb_scaling_ratio = fp_div(FP_INT( + (int)(d->input_width * d->input_height)), + FP_INT((int)(d->output_width * d->output_height))); + height_ratio = fp_div(d->input_height, d->output_height); + + dpb_compression_enabled = d->num_formats >= 1 && + __ubwc(d->color_formats[0]); + opb_compression_enabled = d->num_formats >= 2 && + __ubwc(d->color_formats[1]); + + /* + * Convert Q16 number into Integer and Fractional part upto 2 places. + * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752; + * Integer part = 105752 / 65536 = 1; + * Reminder = 105752 - 1 * 65536 = 40216; + * Fractional part = 40216 * 100 / 65536 = 61; + * Now converto to FP(1, 61, 100) for below code. + */ + + integer_part = d->compression_ratio >> 16; + frac_part = + ((d->compression_ratio - (integer_part << 16)) * 100) >> 16; + + dpb_read_compression_factor = FP(integer_part, frac_part, 100); + + integer_part = d->complexity_factor >> 16; + frac_part = + ((d->complexity_factor - (integer_part << 16)) * 100) >> 16; + + motion_vector_complexity = FP(integer_part, frac_part, 100); + + dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE : + __compression_ratio(__lut(width, height, fps), opb_bpp); + + dpb_write_compression_factor = d->use_dpb_read ? + dpb_read_compression_factor : + dpb_write_compression_factor; + + opb_compression_factor = !opb_compression_enabled ? FP_ONE : + __compression_ratio(__lut(width, height, fps), opb_bpp); + + llc_ref_read_l2_cache_enabled = llc_vpss_ds_line_buf_enabled = false; + if (d->use_sys_cache) { + llc_ref_read_l2_cache_enabled = true; + llc_vpss_ds_line_buf_enabled = true; + } + + /* Derived parameters setup */ + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + + bitrate = __lut(width, height, fps)->bitrate; + + bins_to_bit_factor = d->work_mode == VIDC_WORK_MODE_1 ? + FP_INT(0) : FP_INT(4); + + vsp_read_factor = bins_to_bit_factor + FP_INT(2); + + dpb_write_factor = FP(1, 5, 100); + + ten_bpc_packing_factor = FP(1, 67, 1000); + ten_bpc_bpp_factor = FP(1, 1, 4); + + vsp_write_factor = bins_to_bit_factor; + + tnbr_per_lcu = lcu_size == 16 ? 128 : + lcu_size == 32 ? 64 : 128; + + colocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + /* ........................................ for DDR */ + ddr.vsp_read = fp_div(fp_mult(FP_INT(bitrate), + vsp_read_factor), FP_INT(8)); + ddr.vsp_write = fp_div(fp_mult(FP_INT(bitrate), + vsp_write_factor), FP_INT(8)); + + ddr.collocated_read = FP_INT(lcu_per_frame * + colocated_bytes_per_lcu * fps / bps(1)); + ddr.collocated_write = FP_INT(lcu_per_frame * + colocated_bytes_per_lcu * fps / bps(1)); + + ddr.line_buffer_read = FP_INT(tnbr_per_lcu * + lcu_per_frame * fps / bps(1)); + ddr.line_buffer_write = ddr.line_buffer_read; + + bw_for_1x_8bpc = fp_div(FP_INT((int)(width * height)), FP_INT(32 * 8)); + + bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc, + fp_div(FP_INT(((int)(256 * fps))), FP_INT(1000 * 1000))); + + dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc : + fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor, + ten_bpc_bpp_factor)); + + ddr.dpb_read = fp_div(fp_mult(fp_mult(dpb_bw_for_1x, + motion_vector_complexity), dpb_write_factor), + dpb_read_compression_factor); + + ddr.dpb_write = fp_div(fp_mult(dpb_bw_for_1x, dpb_write_factor), + dpb_write_compression_factor); + dpb_total = ddr.dpb_read + ddr.dpb_write; + if (llc_ref_read_l2_cache_enabled) { + row_cache_penalty = FP(1, 30, 100); + ddr.dpb_read = fp_div(ddr.dpb_read, row_cache_penalty); + llc.dpb_read = dpb_total - ddr.dpb_read; + } + + opb_factor = dpb_bpp == 8 ? 8 : 4; + + ddr.opb_read = unified_dpb_opb ? 0 : opb_compression_enabled ? + fp_div(fp_mult(fp_div(dpb_bw_for_1x, dpb_opb_scaling_ratio), + FP_INT(opb_factor)), height_ratio) : 0; + ddr.opb_write = unified_dpb_opb ? 0 : opb_compression_enabled ? + ddr.dpb_read : fp_div(fp_div(fp_mult(dpb_bw_for_1x, + FP(1, 50, 100)), dpb_opb_scaling_ratio), + opb_compression_factor); + + if (llc_vpss_ds_line_buf_enabled) { + llc.opb_read = ddr.opb_read; + ddr.opb_write -= ddr.opb_read; + ddr.opb_read = 0; + } + ddr.total = ddr.vsp_read + ddr.vsp_write + + ddr.collocated_read + ddr.collocated_write + + ddr.opb_read + ddr.opb_write + + ddr.dpb_read + ddr.dpb_write; + + qsmmu_bw_overhead_factor = FP(1, 3, 100); + + ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); + llc.total = llc.dpb_read + llc.opb_read; + + /* Dump all the variables for easier debugging */ + if (debug) { + struct dump dump[] = { + {"DECODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"LCU size", "%d", lcu_size}, + {"DPB bitdepth", "%d", dpb_bpp}, + {"frame rate", "%d", fps}, + {"DPB/OPB unified", "%d", unified_dpb_opb}, + {"DPB/OPB downscaling ratio", DUMP_FP_FMT, + dpb_opb_scaling_ratio}, + {"DPB compression", "%d", dpb_compression_enabled}, + {"OPB compression", "%d", opb_compression_enabled}, + {"DPB Read compression factor", DUMP_FP_FMT, + dpb_read_compression_factor}, + {"DPB Write compression factor", DUMP_FP_FMT, + dpb_write_compression_factor}, + {"OPB compression factor", DUMP_FP_FMT, + opb_compression_factor}, + {"frame width", "%d", width}, + {"frame height", "%d", height}, + + {"DERIVED PARAMETERS (1)", "", DUMP_HEADER_MAGIC}, + {"LCUs/frame", "%d", lcu_per_frame}, + {"bitrate (Mbit/sec)", "%d", bitrate}, + {"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor}, + {"DPB write factor", DUMP_FP_FMT, dpb_write_factor}, + {"10bpc packing factor", DUMP_FP_FMT, + ten_bpc_packing_factor}, + {"10bpc,BPP factor", DUMP_FP_FMT, ten_bpc_bpp_factor}, + {"VSP read factor", DUMP_FP_FMT, vsp_read_factor}, + {"VSP write factor", DUMP_FP_FMT, vsp_write_factor}, + {"TNBR/LCU", "%d", tnbr_per_lcu}, + {"colocated bytes/LCU", "%d", colocated_bytes_per_lcu}, + {"B/W for 1x (NV12 8bpc)", DUMP_FP_FMT, bw_for_1x_8bpc}, + {"DPB B/W For 1x (NV12)", DUMP_FP_FMT, dpb_bw_for_1x}, + + {"DERIVED PARAMETERS (2)", "", DUMP_HEADER_MAGIC}, + {"MV complexity", DUMP_FP_FMT, motion_vector_complexity}, + {"row cache penalty", DUMP_FP_FMT, row_cache_penalty}, + {"qsmmu_bw_overhead_factor", DUMP_FP_FMT, + qsmmu_bw_overhead_factor}, + {"OPB B/W (single instance)", DUMP_FP_FMT, opb_bw}, + + {"INTERMEDIATE DDR B/W", "", DUMP_HEADER_MAGIC}, + {"VSP read", DUMP_FP_FMT, ddr.vsp_read}, + {"VSP write", DUMP_FP_FMT, ddr.vsp_write}, + {"collocated read", DUMP_FP_FMT, ddr.collocated_read}, + {"collocated write", DUMP_FP_FMT, ddr.collocated_write}, + {"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read}, + {"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write}, + {"recon read", DUMP_FP_FMT, ddr.recon_read}, + {"recon write", DUMP_FP_FMT, ddr.recon_write}, + {"OPB read", DUMP_FP_FMT, ddr.opb_read}, + {"OPB write", DUMP_FP_FMT, ddr.opb_write}, + {"DPB read", DUMP_FP_FMT, ddr.dpb_read}, + {"DPB write", DUMP_FP_FMT, ddr.dpb_write}, + {"LLC DPB read", DUMP_FP_FMT, llc.dpb_read}, + {"LLC OPB read", DUMP_FP_FMT, llc.opb_read}, + + }; + __dump(dump, ARRAY_SIZE(dump)); + } + + switch (gm) { + case GOVERNOR_DDR: + ret = kbps(fp_round(ddr.total)); + break; + case GOVERNOR_LLCC: + ret = kbps(fp_round(llc.total)); + break; + default: + dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__); + } + + return ret; +} + +static unsigned long __calculate_encoder(struct vidc_bus_vote_data *d, + enum governor_mode gm) +{ + /* + * XXX: Don't fool around with any of the hardcoded numbers unless you + * know /exactly/ what you're doing. Many of these numbers are + * measured heuristics and hardcoded numbers taken from the firmware. + */ + /* Encoder Parameters */ + + int width, height, fps, dpb_bpp, lcu_per_frame, lcu_size, + vertical_tile_width, colocated_bytes_per_lcu, bitrate, + ref_overlap_bw_factor; + enum hal_uncompressed_format dpb_color_format, original_color_format; + bool dpb_compression_enabled, original_compression_enabled, + work_mode_1, low_power, rotation, cropping_or_scaling, + b_frames_enabled = false, + llc_dual_core_ref_read_buf_enabled = false, + llc_top_line_buf_enabled = false, + llc_ref_chroma_cache_enabled = false; + fp_t dpb_compression_factor, original_compression_factor, + input_compression_factor, qsmmu_bw_overhead_factor, + ref_y_bw_factor, ref_cb_cr_bw_factor, ten_bpc_bpp_factor, + bw_for_1x_8bpc, dpb_bw_for_1x, ref_cb_cr_read, + bins_to_bit_factor, ref_y_read, ten_bpc_packing_factor, + dpb_write_factor, ref_overlap_bw, llc_ref_y_read, + llc_ref_cb_cr_read; + fp_t integer_part, frac_part; + unsigned long ret = 0; + + /* Output parameters */ + struct { + fp_t vsp_read, vsp_write, collocated_read, collocated_write, + line_buffer_read, line_buffer_write, original_read, + original_write, dpb_read, dpb_write, total; + } ddr = {0}; + + struct { + fp_t dpb_read, line_buffer, total; + } llc = {0}; + + /* Encoder Parameters setup */ + ten_bpc_packing_factor = FP(1, 67, 1000); + ten_bpc_bpp_factor = FP(1, 1, 4); + rotation = false; + cropping_or_scaling = false; + vertical_tile_width = 960; + ref_y_bw_factor = FP(1, 30, 100); + ref_cb_cr_bw_factor = FP(1, 50, 100); + dpb_write_factor = FP(1, 8, 100); + + + /* Derived Parameters */ + lcu_size = d->lcu_size; + fps = d->fps; + b_frames_enabled = d->b_frames_enabled; + width = max(d->input_width, BASELINE_DIMENSIONS.width); + height = max(d->input_height, BASELINE_DIMENSIONS.height); + bitrate = __lut(width, height, fps)->bitrate; + lcu_per_frame = DIV_ROUND_UP(width, lcu_size) * + DIV_ROUND_UP(height, lcu_size); + + dpb_color_format = HAL_COLOR_FORMAT_NV12_UBWC; + original_color_format = d->num_formats >= 1 ? + d->color_formats[0] : HAL_UNUSED_COLOR; + + dpb_bpp = d->num_formats >= 1 ? __bpp(d->color_formats[0]) : INT_MAX; + + dpb_compression_enabled = __ubwc(dpb_color_format); + original_compression_enabled = __ubwc(original_color_format); + + work_mode_1 = d->work_mode == VIDC_WORK_MODE_1; + low_power = d->power_mode == VIDC_POWER_LOW; + bins_to_bit_factor = work_mode_1 ? + FP_INT(0) : FP_INT(4); + + if (d->use_sys_cache) { + llc_dual_core_ref_read_buf_enabled = true; + llc_ref_chroma_cache_enabled = true; + } + + /* + * Convert Q16 number into Integer and Fractional part upto 2 places. + * Ex : 105752 / 65536 = 1.61; 1.61 in Q16 = 105752; + * Integer part = 105752 / 65536 = 1; + * Reminder = 105752 - 1 * 65536 = 40216; + * Fractional part = 40216 * 100 / 65536 = 61; + * Now converto to FP(1, 61, 100) for below code. + */ + + integer_part = d->compression_ratio >> 16; + frac_part = + ((d->compression_ratio - (integer_part * 65536)) * 100) >> 16; + + dpb_compression_factor = FP(integer_part, frac_part, 100); + + integer_part = d->input_cr >> 16; + frac_part = + ((d->input_cr - (integer_part * 65536)) * 100) >> 16; + + input_compression_factor = FP(integer_part, frac_part, 100); + + original_compression_factor = + original_compression_enabled ? d->use_dpb_read ? + dpb_compression_factor : input_compression_factor : + FP_ONE; + + ddr.vsp_read = fp_mult(fp_div(FP_INT(bitrate), FP_INT(8)), + bins_to_bit_factor); + ddr.vsp_write = ddr.vsp_read + fp_div(FP_INT(bitrate), FP_INT(8)); + + colocated_bytes_per_lcu = lcu_size == 16 ? 16 : + lcu_size == 32 ? 64 : 256; + + ddr.collocated_read = FP_INT(lcu_per_frame * + colocated_bytes_per_lcu * fps / bps(1)); + + ddr.collocated_write = ddr.collocated_read; + + ddr.line_buffer_read = FP_INT(16 * lcu_per_frame * fps / bps(1)); + + ddr.line_buffer_write = ddr.line_buffer_read; + + llc.line_buffer = ddr.line_buffer_read + ddr.line_buffer_write; + if (llc_top_line_buf_enabled) + ddr.line_buffer_read = ddr.line_buffer_write = FP_INT(0); + + llc.line_buffer -= (ddr.line_buffer_read + ddr.line_buffer_write); + + bw_for_1x_8bpc = fp_div(FP_INT((int)(width * height)), FP_INT(32 * 8)); + + bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc, + fp_div(FP_INT(((int)(256 * fps))), FP_INT(1000 * 1000))); + + dpb_bw_for_1x = dpb_bpp == 8 ? bw_for_1x_8bpc : + fp_mult(bw_for_1x_8bpc, fp_mult(ten_bpc_packing_factor, + ten_bpc_bpp_factor)); + + ddr.original_read = fp_div(fp_mult(FP(1, 50, 100), dpb_bw_for_1x), + input_compression_factor); + + ddr.original_write = FP_ZERO; + + ref_y_bw_factor = + width == vertical_tile_width ? FP_INT(1) : ref_y_bw_factor; + + ref_y_read = fp_mult(ref_y_bw_factor, dpb_bw_for_1x); + + ref_y_read = fp_div(ref_y_read, dpb_compression_factor); + + ref_y_read = + b_frames_enabled ? fp_mult(ref_y_read, FP_INT(2)) : ref_y_read; + + llc_ref_y_read = ref_y_read; + if (llc_dual_core_ref_read_buf_enabled) + ref_y_read = fp_div(ref_y_read, FP_INT(2)); + + llc_ref_y_read -= ref_y_read; + + ref_cb_cr_read = fp_mult(ref_cb_cr_bw_factor, dpb_bw_for_1x); + + ref_cb_cr_read = fp_div(ref_cb_cr_read, dpb_compression_factor); + + ref_cb_cr_read = + b_frames_enabled ? fp_mult(ref_cb_cr_read, FP_INT(2)) : + ref_cb_cr_read; + + llc_ref_cb_cr_read = ref_cb_cr_read; + + if (llc_ref_chroma_cache_enabled) + ref_cb_cr_read = fp_div(ref_cb_cr_read, ref_cb_cr_bw_factor); + + if (llc_dual_core_ref_read_buf_enabled) + ref_cb_cr_read = fp_div(ref_cb_cr_read, FP_INT(2)); + + llc_ref_cb_cr_read -= ref_cb_cr_read; + + ddr.dpb_write = fp_mult(dpb_write_factor, dpb_bw_for_1x); + + ddr.dpb_write = fp_mult(ddr.dpb_write, FP(1, 50, 100)); + + ddr.dpb_write = fp_div(ddr.dpb_write, input_compression_factor); + + ref_overlap_bw_factor = + width <= vertical_tile_width ? FP_INT(0) : FP_INT(1); + + ref_overlap_bw = fp_mult(ddr.dpb_write, ref_overlap_bw_factor); + + ref_overlap_bw = fp_div(ref_overlap_bw, dpb_write_factor); + + ref_overlap_bw = fp_mult(ref_overlap_bw, + (dpb_write_factor - FP_INT(1))); + + ddr.dpb_read = ref_y_read + ref_cb_cr_read + ref_overlap_bw; + + llc.dpb_read = llc_ref_y_read + llc_ref_cb_cr_read; + + ddr.total = ddr.vsp_read + ddr.vsp_write + + ddr.collocated_read + ddr.collocated_write + + ddr.line_buffer_read + ddr.line_buffer_write + + ddr.original_read + ddr.original_write + + ddr.dpb_read + ddr.dpb_write; + + llc.total = llc.dpb_read + llc.line_buffer; + + qsmmu_bw_overhead_factor = FP(1, 3, 100); + ddr.total = fp_mult(ddr.total, qsmmu_bw_overhead_factor); + + if (debug) { + struct dump dump[] = { + {"ENCODER PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"width", "%d", width}, + {"height", "%d", height}, + {"DPB format", "%#x", dpb_color_format}, + {"original frame format", "%#x", original_color_format}, + {"fps", "%d", fps}, + {"DPB compression enable", "%d", dpb_compression_enabled}, + {"original compression enable", "%d", + original_compression_enabled}, + {"low power mode", "%d", low_power}, + {"Work Mode", "%d", work_mode_1}, + {"DPB compression factor", DUMP_FP_FMT, + dpb_compression_factor}, + {"original compression factor", DUMP_FP_FMT, + original_compression_factor}, + {"rotation", "%d", rotation}, + {"cropping or scaling", "%d", cropping_or_scaling}, + + {"DERIVED PARAMETERS", "", DUMP_HEADER_MAGIC}, + {"LCU size", "%d", lcu_size}, + {"bitrate (Mbit/sec)", "%lu", bitrate}, + {"bins to bit factor", DUMP_FP_FMT, bins_to_bit_factor}, + {"qsmmu_bw_overhead_factor", + DUMP_FP_FMT, qsmmu_bw_overhead_factor}, + + {"INTERMEDIATE B/W DDR", "", DUMP_HEADER_MAGIC}, + {"ref_y_read", DUMP_FP_FMT, ref_y_read}, + {"ref_cb_cr_read", DUMP_FP_FMT, ref_cb_cr_read}, + {"ref_overlap_bw", DUMP_FP_FMT, ref_overlap_bw}, + {"VSP read", DUMP_FP_FMT, ddr.vsp_read}, + {"VSP write", DUMP_FP_FMT, ddr.vsp_write}, + {"collocated read", DUMP_FP_FMT, ddr.collocated_read}, + {"collocated write", DUMP_FP_FMT, ddr.collocated_write}, + {"line buffer read", DUMP_FP_FMT, ddr.line_buffer_read}, + {"line buffer write", DUMP_FP_FMT, ddr.line_buffer_write}, + {"original read", DUMP_FP_FMT, ddr.original_read}, + {"original write", DUMP_FP_FMT, ddr.original_write}, + {"DPB read", DUMP_FP_FMT, ddr.dpb_read}, + {"DPB write", DUMP_FP_FMT, ddr.dpb_write}, + {"LLC DPB read", DUMP_FP_FMT, llc.dpb_read}, + {"LLC Line buffer", DUMP_FP_FMT, llc.line_buffer}, + }; + __dump(dump, ARRAY_SIZE(dump)); + } + + switch (gm) { + case GOVERNOR_DDR: + ret = kbps(fp_round(ddr.total)); + break; + case GOVERNOR_LLCC: + ret = kbps(fp_round(llc.total)); + break; + default: + dprintk(VIDC_ERR, "%s - Unknown governor\n", __func__); + } + + return ret; +} + +static unsigned long __calculate(struct vidc_bus_vote_data *d, + enum governor_mode gm) +{ + unsigned long (*calc[])(struct vidc_bus_vote_data *, + enum governor_mode) = { + [HAL_VIDEO_DOMAIN_VPE] = __calculate_vpe, + [HAL_VIDEO_DOMAIN_ENCODER] = __calculate_encoder, + [HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder, + }; + + if (d->domain >= ARRAY_SIZE(calc)) { + dprintk(VIDC_ERR, "%s: invalid domain %d\n", + __func__, d->domain); + return 0; + } + return calc[d->domain](d, gm); +} + + +static int __get_target_freq(struct devfreq *dev, unsigned long *freq) +{ + unsigned long ab_kbps = 0, c = 0; + struct devfreq_dev_status stats = {0}; + struct msm_vidc_gov_data *vidc_data = NULL; + struct governor *gov = NULL; + + if (!dev || !freq) + return -EINVAL; + + gov = container_of(dev->governor, + struct governor, devfreq_gov); + dev->profile->get_dev_status(dev->dev.parent, &stats); + vidc_data = (struct msm_vidc_gov_data *)stats.private_data; + + if (!vidc_data || !vidc_data->data_count) + goto exit; + + for (c = 0; c < vidc_data->data_count; ++c) { + if (vidc_data->data->power_mode == VIDC_POWER_TURBO) { + ab_kbps = INT_MAX; + goto exit; + } + } + + for (c = 0; c < vidc_data->data_count; ++c) + ab_kbps += __calculate(&vidc_data->data[c], gov->mode); + +exit: + *freq = clamp(ab_kbps, dev->min_freq, dev->max_freq ?: UINT_MAX); + trace_msm_vidc_perf_bus_vote(gov->devfreq_gov.name, *freq); + return 0; +} + +static int __event_handler(struct devfreq *devfreq, unsigned int event, + void *data) +{ + int rc = 0; + + if (!devfreq) + return -EINVAL; + + switch (event) { + case DEVFREQ_GOV_START: + case DEVFREQ_GOV_RESUME: + case DEVFREQ_GOV_SUSPEND: + mutex_lock(&devfreq->lock); + rc = update_devfreq(devfreq); + mutex_unlock(&devfreq->lock); + break; + } + + return rc; +} + +static struct governor governors[] = { + { + .mode = GOVERNOR_DDR, + .devfreq_gov = { + .name = "vidc-ar50-ddr", + .get_target_freq = __get_target_freq, + .event_handler = __event_handler, + }, + }, + { + .mode = GOVERNOR_LLCC, + .devfreq_gov = { + .name = "vidc-ar50-llcc", + .get_target_freq = __get_target_freq, + .event_handler = __event_handler, + }, + }, +}; + +static int __init msm_vidc_ar50_bw_gov_init(void) +{ + int c = 0, rc = 0; + + for (c = 0; c < ARRAY_SIZE(governors); ++c) { + dprintk(VIDC_DBG, "Adding governor %s\n", + governors[c].devfreq_gov.name); + + rc = devfreq_add_governor(&governors[c].devfreq_gov); + if (rc) { + dprintk(VIDC_ERR, "Error adding governor %s: %d\n", + governors[c].devfreq_gov.name, rc); + break; + } + } + + return rc; +} +module_init(msm_vidc_ar50_bw_gov_init); + +static void __exit msm_vidc_ar50_bw_gov_exit(void) +{ + int c = 0; + + for (c = 0; c < ARRAY_SIZE(governors); ++c) { + dprintk(VIDC_DBG, "Removing governor %s\n", + governors[c].devfreq_gov.name); + devfreq_remove_governor(&governors[c].devfreq_gov); + } +} +module_exit(msm_vidc_ar50_bw_gov_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index 5dbed452206c91b3bf2f23fc4c4329d4be5a8d87..611c52cfe9d0d7c17dc8d826919ded5c39339acd 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -36,7 +36,7 @@ struct governor { * bandwidth for. This means that anything bandwidth(0, 0) == * bandwidth(BASELINE_DIMENSIONS.width, BASELINE_DIMENSIONS.height) */ -const struct { +static const struct { int height, width; } BASELINE_DIMENSIONS = { .width = 1280, diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index f1ba2f85a4152080e2af7f6ff27f6427c0fb9aa7..4be2369ac7389d307d47950d7623aec7b42990e8 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -1001,6 +1001,18 @@ int create_pkt_cmd_session_set_property( pkt->size += sizeof(struct hfi_frame_rate); break; } + case HAL_CONFIG_OPERATING_RATE: + { + struct hfi_operating_rate *hfi; + struct hal_operating_rate *prop = + (struct hal_operating_rate *) pdata; + + pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_OPERATING_RATE; + hfi = (struct hfi_operating_rate *) &pkt->rg_property_data[1]; + hfi->operating_rate = prop->operating_rate; + pkt->size += sizeof(struct hfi_operating_rate); + break; + } case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT: { u32 buffer_type; @@ -1025,7 +1037,35 @@ int create_pkt_cmd_session_set_property( break; } case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: + { + struct hfi_uncompressed_plane_actual_constraints_info *hfi; + struct hal_uncompressed_plane_actual_constraints_info *prop = + (struct hal_uncompressed_plane_actual_constraints_info *) pdata; + u32 buffer_type; + u32 num_plane = prop->num_planes; + u32 hfi_pkt_size = + 2 * sizeof(u32) + + num_plane + * sizeof(struct hal_uncompressed_plane_constraints); + + pkt->rg_property_data[0] = + HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO; + + hfi = (struct hfi_uncompressed_plane_actual_constraints_info *) + &pkt->rg_property_data[1]; + buffer_type = get_hfi_buffer(prop->buffer_type); + if (buffer_type) + hfi->buffer_type = buffer_type; + else + return -EINVAL; + + hfi->num_planes = prop->num_planes; + memcpy(hfi->rg_plane_format, prop->rg_plane_format, + hfi->num_planes + *sizeof(struct hal_uncompressed_plane_constraints)); + pkt->size += hfi_pkt_size; break; + } case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: break; case HAL_PARAM_FRAME_SIZE: diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c index ca658a6ea288cdb4a4c04b545c698993c09b74d2..94a79ad672d3952b2cce48fc1b66773fc4518eeb 100644 --- a/drivers/media/platform/msm/vidc/msm_smem.c +++ b/drivers/media/platform/msm/vidc/msm_smem.c @@ -78,6 +78,13 @@ static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align, * Mapping of sg is taken care by map attachment */ attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP; + /* + * We do not need dma_map function to perform cache operations + * on the whole buffer size and hence pass skip sync flag. + * We do the required cache operations separately for the + * required buffer size + */ + attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; if (res->sys_cache_present) attach->dma_map_attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; @@ -511,6 +518,44 @@ int msm_smem_free(struct msm_smem *smem) return rc; }; +int msm_smem_cache_operations(struct dma_buf *dbuf, + enum smem_cache_ops cache_op, unsigned long offset, unsigned long size) +{ + int rc = 0; + + if (!dbuf) { + dprintk(VIDC_ERR, "%s: Invalid params\n", __func__); + return -EINVAL; + } + + switch (cache_op) { + case SMEM_CACHE_CLEAN: + case SMEM_CACHE_CLEAN_INVALIDATE: + rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + if (rc) + break; + rc = dma_buf_end_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + break; + case SMEM_CACHE_INVALIDATE: + rc = dma_buf_begin_cpu_access_partial(dbuf, DMA_TO_DEVICE, + offset, size); + if (rc) + break; + rc = dma_buf_end_cpu_access_partial(dbuf, DMA_FROM_DEVICE, + offset, size); + break; + default: + dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n", + __func__, cache_op); + rc = -EINVAL; + break; + } + + return rc; +} + struct context_bank_info *msm_smem_get_context_bank(u32 session_type, bool is_secure, struct msm_vidc_platform_resources *res, enum hal_buffer buffer_type) diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index 2262ae6dc3a14ff307f81ecdb79e906fe26a873c..376f204815cc27393b3b0b18b751ef9b66c47fe6 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -364,13 +364,6 @@ static int msm_vidc_initialize_core(struct platform_device *pdev, INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler); INIT_WORK(&core->ssr_work, msm_vidc_ssr_handler); - mutex_lock(&core->lock); - core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES, - sizeof(*core->vote_data), GFP_KERNEL); - if (!core->vote_data) - dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__); - mutex_unlock(&core->lock); - msm_vidc_init_core_clk_ops(core); return rc; } @@ -764,7 +757,6 @@ static int msm_vidc_remove(struct platform_device *pdev) v4l2_device_unregister(&core->v4l2_dev); msm_vidc_free_platform_resources(&core->resources); - kfree(core->vote_data); sysfs_remove_group(&pdev->dev.kobj, &msm_vidc_core_attr_group); dev_set_drvdata(&pdev->dev, NULL); mutex_destroy(&core->lock); diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 5044fbf7c2ce4841c53c29180eb988b101ca9670..6f6c2fe32acc46444c78937a30f53c57a36c1c4a 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -394,19 +394,33 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = { static u32 get_frame_size_compressed_full_yuv(int plane, u32 max_mbs_per_frame, u32 size_per_mb) { + u32 frame_size; + if (max_mbs_per_frame > MAX_4K_MBPF) - return (max_mbs_per_frame * size_per_mb * 3 / 2) / 4; + frame_size = (max_mbs_per_frame * size_per_mb * 3 / 2) / 4; else - return (max_mbs_per_frame * size_per_mb * 3 / 2); + frame_size = (max_mbs_per_frame * size_per_mb * 3 / 2); + + /* multiply by 10/8 (1.25) to get size for 10 bit case */ + frame_size = frame_size + (frame_size >> 2); + + return frame_size; } static u32 get_frame_size_compressed(int plane, u32 max_mbs_per_frame, u32 size_per_mb) { + u32 frame_size; + if (max_mbs_per_frame > MAX_4K_MBPF) - return (max_mbs_per_frame * size_per_mb * 3 / 2) / 4; + frame_size = (max_mbs_per_frame * size_per_mb * 3 / 2) / 4; else - return (max_mbs_per_frame * size_per_mb * 3/2)/2; + frame_size = (max_mbs_per_frame * size_per_mb * 3/2)/2; + + /* multiply by 10/8 (1.25) to get size for 10 bit case */ + frame_size = frame_size + (frame_size >> 2); + + return frame_size; } static u32 get_frame_size(struct msm_vidc_inst *inst, @@ -476,6 +490,8 @@ struct msm_vidc_format vdec_formats[] = { .get_frame_size = get_frame_size_compressed, .type = OUTPUT_PORT, .defer_outputs = false, + .input_min_count = 4, + .output_min_count = 6, }, { .name = "H264", @@ -484,6 +500,8 @@ struct msm_vidc_format vdec_formats[] = { .get_frame_size = get_frame_size_compressed, .type = OUTPUT_PORT, .defer_outputs = false, + .input_min_count = 4, + .output_min_count = 8, }, { .name = "HEVC", @@ -492,6 +510,8 @@ struct msm_vidc_format vdec_formats[] = { .get_frame_size = get_frame_size_compressed, .type = OUTPUT_PORT, .defer_outputs = false, + .input_min_count = 4, + .output_min_count = 8, }, { .name = "VP8", @@ -500,6 +520,8 @@ struct msm_vidc_format vdec_formats[] = { .get_frame_size = get_frame_size_compressed_full_yuv, .type = OUTPUT_PORT, .defer_outputs = false, + .input_min_count = 4, + .output_min_count = 6, }, { .name = "VP9", @@ -508,6 +530,23 @@ struct msm_vidc_format vdec_formats[] = { .get_frame_size = get_frame_size_compressed_full_yuv, .type = OUTPUT_PORT, .defer_outputs = true, + .input_min_count = 4, + .output_min_count = 11, + }, +}; + +struct msm_vidc_format_constraint dec_pix_format_constraints[] = { + { + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + .num_planes = 2, + .y_stride_multiples = 256, + .y_max_stride = 8192, + .y_min_plane_buffer_height_multiple = 32, + .y_buffer_alignment = 256, + .uv_stride_multiples = 256, + .uv_max_stride = 8192, + .uv_min_plane_buffer_height_multiple = 16, + .uv_buffer_alignment = 256, }, }; @@ -532,6 +571,7 @@ static bool msm_vidc_check_for_vp9d_overload(struct msm_vidc_core *core) int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) { struct msm_vidc_format *fmt = NULL; + struct msm_vidc_format_constraint *fmt_constraint = NULL; struct hal_frame_size frame_sz; unsigned int extra_idx = 0; int rc = 0; @@ -579,6 +619,28 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) msm_comm_get_hal_output_buffer(inst), f->fmt.pix_mp.pixelformat); + fmt_constraint = + msm_comm_get_pixel_fmt_constraints(dec_pix_format_constraints, + ARRAY_SIZE(dec_pix_format_constraints), + f->fmt.pix_mp.pixelformat); + + if (!fmt_constraint) { + dprintk(VIDC_INFO, + "Format constraint not required for %d on CAPTURE port\n", + f->fmt.pix_mp.pixelformat); + } else { + rc = msm_comm_set_color_format_constraints(inst, + msm_comm_get_hal_output_buffer(inst), + fmt_constraint); + if (rc) { + dprintk(VIDC_ERR, + "Set constraint for %d failed on CAPTURE port\n", + f->fmt.pix_mp.pixelformat); + rc = -EINVAL; + goto err_invalid_fmt; + } + } + inst->clk_data.opb_fourcc = f->fmt.pix_mp.pixelformat; if (msm_comm_get_stream_output_mode(inst) == HAL_VIDEO_DECODER_SECONDARY) { @@ -769,6 +831,16 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst) inst->buff_req.buffer[3].buffer_count_min_host = inst->buff_req.buffer[3].buffer_count_actual = MIN_NUM_DEC_CAPTURE_BUFFERS; + inst->buff_req.buffer[4].buffer_type = HAL_BUFFER_EXTRADATA_INPUT; + inst->buff_req.buffer[5].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT; + inst->buff_req.buffer[6].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT2; + inst->buff_req.buffer[7].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH; + inst->buff_req.buffer[8].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_1; + inst->buff_req.buffer[9].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_2; + inst->buff_req.buffer[10].buffer_type = HAL_BUFFER_INTERNAL_PERSIST; + inst->buff_req.buffer[11].buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1; + inst->buff_req.buffer[12].buffer_type = HAL_BUFFER_INTERNAL_CMD_QUEUE; + inst->buff_req.buffer[13].buffer_type = HAL_BUFFER_INTERNAL_RECON; /* By default, initialize OUTPUT port to H264 decoder */ fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats, @@ -811,6 +883,7 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) struct hal_profile_level profile_level; struct hal_frame_size frame_sz; struct hal_buffer_requirements *bufreq; + struct hal_buffer_requirements *bufreq_out2; if (!inst || !inst->core || !inst->core->device) { dprintk(VIDC_ERR, "%s invalid parameters\n", __func__); @@ -1013,6 +1086,16 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) dprintk(VIDC_ERR, "Failed:Disabling OUTPUT2 port : %d\n", rc); + + bufreq_out2 = get_buff_req_buffer(inst, + HAL_BUFFER_OUTPUT2); + if (!bufreq_out2) + break; + + bufreq_out2->buffer_count_min = + bufreq_out2->buffer_count_min_host = + bufreq_out2->buffer_count_actual = 0; + break; case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY: switch (inst->bit_depth) { @@ -1079,21 +1162,43 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) dprintk(VIDC_ERR, "Failed setting OUTPUT2 size : %d\n", rc); + /* Populate output2 bufreqs with output bufreqs */ + bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT); + if (!bufreq) + break; - rc = msm_comm_try_get_bufreqs(inst); - if (rc) { - dprintk(VIDC_ERR, - "%s Failed to get buffer reqs : %d\n", - __func__, rc); + bufreq_out2 = get_buff_req_buffer(inst, + HAL_BUFFER_OUTPUT2); + if (!bufreq_out2) break; - } - rc = msm_vidc_update_host_buff_counts(inst); + memcpy(bufreq_out2, bufreq, + sizeof(struct hal_buffer_requirements)); + bufreq_out2->buffer_type = HAL_BUFFER_OUTPUT2; + rc = msm_comm_set_buffer_count(inst, + bufreq_out2->buffer_count_min_host, + bufreq_out2->buffer_count_actual, + HAL_BUFFER_OUTPUT2); if (rc) { dprintk(VIDC_ERR, - "%s Failed: update buff counts : %d\n", - __func__, rc); + "%s: Failed to set opb buffer count to FW\n"); + return -EINVAL; } + /* Do the same for extradata but no set is required */ + bufreq = get_buff_req_buffer(inst, + HAL_BUFFER_EXTRADATA_OUTPUT); + if (!bufreq) + break; + + bufreq_out2 = get_buff_req_buffer(inst, + HAL_BUFFER_EXTRADATA_OUTPUT2); + if (!bufreq_out2) + break; + + memcpy(bufreq_out2, bufreq, + sizeof(struct hal_buffer_requirements)); + bufreq_out2->buffer_type = + HAL_BUFFER_EXTRADATA_OUTPUT2; break; default: dprintk(VIDC_ERR, @@ -1228,7 +1333,6 @@ int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "%s Failed setting stream output mode : %d\n", __func__, rc); - rc = msm_vidc_update_host_buff_counts(inst); break; case V4L2_CID_MPEG_VIDC_VIDEO_CONCEAL_COLOR_8BIT: conceal_color.conceal_color_8bit = ext_control[i].value; diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index 405575f8f904f07b1953d600fc5a6abeccc70a92..fdf6b4cfda20b53e4b7cb1f77f6f795fbb385e93 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -1160,6 +1160,8 @@ static struct msm_vidc_format venc_formats[] = { .fourcc = V4L2_PIX_FMT_H264, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, + .input_min_count = 4, + .output_min_count = 4, }, { .name = "VP8", @@ -1167,6 +1169,8 @@ static struct msm_vidc_format venc_formats[] = { .fourcc = V4L2_PIX_FMT_VP8, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, + .input_min_count = 4, + .output_min_count = 4, }, { .name = "HEVC", @@ -1174,6 +1178,8 @@ static struct msm_vidc_format venc_formats[] = { .fourcc = V4L2_PIX_FMT_HEVC, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, + .input_min_count = 4, + .output_min_count = 4, }, { .name = "YCrCb Semiplanar 4:2:0", @@ -1195,6 +1201,8 @@ static struct msm_vidc_format venc_formats[] = { .fourcc = V4L2_PIX_FMT_TME, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, + .input_min_count = 4, + .output_min_count = 4, }, { .name = "YCbCr Semiplanar 4:2:0 10bit", @@ -1205,6 +1213,22 @@ static struct msm_vidc_format venc_formats[] = { }, }; +struct msm_vidc_format_constraint enc_pix_format_constraints[] = { + { + .fourcc = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS, + .num_planes = 2, + .y_stride_multiples = 256, + .y_max_stride = 8192, + .y_min_plane_buffer_height_multiple = 32, + .y_buffer_alignment = 256, + .uv_stride_multiples = 256, + .uv_max_stride = 8192, + .uv_min_plane_buffer_height_multiple = 16, + .uv_buffer_alignment = 256, + }, +}; + + static int msm_venc_set_csc(struct msm_vidc_inst *inst, u32 color_primaries, u32 custom_matrix); @@ -1224,6 +1248,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) int rc = 0; struct hal_request_iframe request_iframe; struct hal_bitrate bitrate; + struct hal_operating_rate operating_rate; struct hal_profile_level profile_level; enum hal_h264_entropy h264_entropy; struct hal_intra_period intra_period; @@ -1506,15 +1531,16 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) } case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: { int temp = 0; - - enable.enable = false; + if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC && + inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264) { + return rc; + } switch (ctrl->val) { case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB: temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB; break; case V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES: temp = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES; - enable.enable = true; break; case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE: default: @@ -1522,19 +1548,6 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; } - temp_ctrl = - TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE); - if (!temp_ctrl->val) { - rc = msm_comm_try_set_prop(inst, - HAL_PARAM_VENC_LOW_LATENCY, &enable.enable); - if (rc) - dprintk(VIDC_ERR, - "SliceMode Low Latency enable fail\n"); - else - inst->clk_data.low_latency_mode = - (bool) enable.enable; - } - if (temp) temp_ctrl = TRY_GET_CTRL(temp); @@ -1547,6 +1560,10 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) } case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: + if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC && + inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264) { + return rc; + } temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); property_id = HAL_PARAM_VENC_MULTI_SLICE_CONTROL; @@ -1909,6 +1926,10 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) ctrl->val >> 16); inst->clk_data.operating_rate = ctrl->val; inst->clk_data.turbo_mode = false; + property_id = HAL_CONFIG_OPERATING_RATE; + operating_rate.operating_rate = + inst->clk_data.operating_rate; + pdata = &operating_rate; } break; case V4L2_CID_MPEG_VIDC_VIDEO_VENC_BITRATE_TYPE: @@ -2414,6 +2435,20 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst) inst->buff_req.buffer[2].buffer_count_min_host = inst->buff_req.buffer[2].buffer_count_actual = MIN_NUM_ENC_CAPTURE_BUFFERS; + inst->buff_req.buffer[3].buffer_type = HAL_BUFFER_OUTPUT2; + inst->buff_req.buffer[3].buffer_count_min_host = + inst->buff_req.buffer[3].buffer_count_actual = + MIN_NUM_ENC_CAPTURE_BUFFERS; + inst->buff_req.buffer[4].buffer_type = HAL_BUFFER_EXTRADATA_INPUT; + inst->buff_req.buffer[5].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT; + inst->buff_req.buffer[6].buffer_type = HAL_BUFFER_EXTRADATA_OUTPUT2; + inst->buff_req.buffer[7].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH; + inst->buff_req.buffer[8].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_1; + inst->buff_req.buffer[9].buffer_type = HAL_BUFFER_INTERNAL_SCRATCH_2; + inst->buff_req.buffer[10].buffer_type = HAL_BUFFER_INTERNAL_PERSIST; + inst->buff_req.buffer[11].buffer_type = HAL_BUFFER_INTERNAL_PERSIST_1; + inst->buff_req.buffer[12].buffer_type = HAL_BUFFER_INTERNAL_CMD_QUEUE; + inst->buff_req.buffer[13].buffer_type = HAL_BUFFER_INTERNAL_RECON; /* By default, initialize OUTPUT port to UBWC YUV format */ fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats, @@ -2521,6 +2556,7 @@ static int msm_venc_set_csc(struct msm_vidc_inst *inst, int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) { struct msm_vidc_format *fmt = NULL; + struct msm_vidc_format_constraint *fmt_constraint = NULL; int rc = 0; struct hfi_device *hdev; int extra_idx = 0, i = 0; @@ -2708,6 +2744,29 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) } msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc); + + fmt_constraint = + msm_comm_get_pixel_fmt_constraints(enc_pix_format_constraints, + ARRAY_SIZE(enc_pix_format_constraints), + f->fmt.pix_mp.pixelformat); + + if (!fmt_constraint) { + dprintk(VIDC_INFO, + "Format constraint not required for %d on OUTPUT port\n", + f->fmt.pix_mp.pixelformat); + } else { + rc = msm_comm_set_color_format_constraints(inst, + HAL_BUFFER_INPUT, + fmt_constraint); + if (rc) { + dprintk(VIDC_ERR, + "Set constraint for %d failed on CAPTURE port\n", + f->fmt.pix_mp.pixelformat); + rc = -EINVAL; + goto exit; + } + } + } else { dprintk(VIDC_ERR, "%s - Unsupported buf type: %d\n", __func__, f->type); diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 3b8e85fe700112c8a880149d811fa5d181cb7d28..d540fad258c4a8b7ea3c7ef04fc368d41e892a3d 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -30,8 +30,6 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl); -static int msm_vidc_get_count(struct msm_vidc_inst *inst, - struct v4l2_ctrl *ctrl); static int get_poll_flags(void *instance) { @@ -376,7 +374,6 @@ int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control) { struct msm_vidc_inst *inst = instance; struct v4l2_ext_control *ext_control; - struct v4l2_ctrl ctrl; int i = 0, rc = 0; if (!inst || !control) @@ -386,19 +383,10 @@ int msm_vidc_g_ext_ctrl(void *instance, struct v4l2_ext_controls *control) for (i = 0; i < control->count; i++) { switch (ext_control[i].id) { - case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: - case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: - ctrl.id = ext_control[i].id; - ctrl.val = ext_control[i].value; - - msm_vidc_get_count(inst, &ctrl); - ext_control->value = ctrl.val; - break; default: dprintk(VIDC_ERR, "This control %x is not supported yet\n", ext_control[i].id); - rc = -EINVAL; break; } } @@ -742,29 +730,6 @@ static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb) __func__, rc); } -static int set_buffer_count(struct msm_vidc_inst *inst, - int host_count, int act_count, enum hal_buffer type) -{ - int rc = 0; - struct hfi_device *hdev; - struct hal_buffer_count_actual buf_count; - - hdev = inst->core->device; - - buf_count.buffer_type = type; - buf_count.buffer_count_actual = act_count; - buf_count.buffer_count_min_host = host_count; - dprintk(VIDC_DBG, "%s : Act count = %d Host count = %d\n", - __func__, act_count, host_count); - rc = call_hfi_op(hdev, session_set_property, - inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count); - if (rc) - dprintk(VIDC_ERR, - "Failed to set actual buffer count %d for buffer type %d\n", - act_count, type); - return rc; -} - static int msm_vidc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) @@ -811,7 +776,8 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i]; bufreq->buffer_count_actual = *num_buffers; - rc = set_buffer_count(inst, bufreq->buffer_count_min_host, + rc = msm_comm_set_buffer_count(inst, + bufreq->buffer_count_min_host, bufreq->buffer_count_actual, HAL_BUFFER_INPUT); } break; @@ -844,7 +810,8 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i]; bufreq->buffer_count_actual = *num_buffers; - rc = set_buffer_count(inst, bufreq->buffer_count_min_host, + rc = msm_comm_set_buffer_count(inst, + bufreq->buffer_count_min_host, bufreq->buffer_count_actual, buffer_type); } break; @@ -913,7 +880,11 @@ int msm_vidc_set_internal_config(struct msm_vidc_inst *inst) struct hal_vbv_hdr_buf_size hrd_buf_size; struct hal_enable latency; struct hfi_device *hdev; + struct hal_multi_slice_control multi_slice_control; u32 codec; + u32 mbps, mb_per_frame, fps, bitrate; + u32 slice_val, slice_mode, max_avg_slicesize; + u32 output_width, output_height; if (!inst || !inst->core || !inst->core->device) { dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__); @@ -965,6 +936,61 @@ int msm_vidc_set_internal_config(struct msm_vidc_inst *inst) inst->clk_data.low_latency_mode = latency.enable; } + /* Update Slice Config */ + slice_mode = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); + + if ((codec == V4L2_PIX_FMT_H264 || codec == V4L2_PIX_FMT_HEVC) && + slice_mode != V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) { + + output_height = inst->prop.height[CAPTURE_PORT]; + output_width = inst->prop.width[CAPTURE_PORT]; + fps = inst->prop.fps; + bitrate = inst->clk_data.bitrate; + mb_per_frame = NUM_MBS_PER_FRAME(output_height, output_width); + mbps = NUM_MBS_PER_SEC(output_height, output_width, fps); + + if (rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_RC_OFF && + rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR_VFR && + rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) { + slice_mode = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE; + slice_val = 0; + } else if (slice_mode == + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { + if (output_width > 3840 || output_height > 3840 || + mb_per_frame > NUM_MBS_PER_FRAME(3840, 2160) || + fps > 60) { + slice_mode = + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE; + slice_val = 0; + } else { + slice_val = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB); + slice_val = max(slice_val, mb_per_frame / 10); + } + } else { + if (output_width > 1920 || output_height > 1920 || + mb_per_frame > NUM_MBS_PER_FRAME(1920, 1088) || + fps > 30) { + slice_mode = + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE; + slice_val = 0; + } else { + slice_val = msm_comm_g_ctrl_for_id(inst, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES); + max_avg_slicesize = ((bitrate / fps) / 8) / 10; + slice_val = + max(slice_val, max_avg_slicesize); + } + } + + multi_slice_control.multi_slice = slice_mode; + multi_slice_control.slice_size = slice_val; + + rc = call_hfi_op(hdev, session_set_property, + (void *)inst->session, HAL_PARAM_VENC_MULTI_SLICE_CONTROL, + (void *)&multi_slice_control); + } return rc; } @@ -1555,114 +1581,6 @@ static int msm_vidc_op_s_ctrl(struct v4l2_ctrl *ctrl) inst, v4l2_ctrl_get_name(ctrl->id)); return rc; } - -static int msm_vidc_get_count(struct msm_vidc_inst *inst, - struct v4l2_ctrl *ctrl) -{ - int rc = 0; - struct hal_buffer_requirements *bufreq; - enum hal_buffer buffer_type; - - if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) { - bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed to find bufreqs for buffer type = %d\n", - HAL_BUFFER_INPUT); - return 0; - } - if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming) { - ctrl->val = bufreq->buffer_count_min_host; - return 0; - } - if (ctrl->val > bufreq->buffer_count_min_host && - ctrl->val <= MAX_NUM_OUTPUT_BUFFERS) { - dprintk(VIDC_DBG, - "Buffer count Host changed from %d to %d\n", - bufreq->buffer_count_min_host, - ctrl->val); - bufreq->buffer_count_actual = - bufreq->buffer_count_min = - bufreq->buffer_count_min_host = - ctrl->val; - } else { - ctrl->val = bufreq->buffer_count_min_host; - } - rc = set_buffer_count(inst, - bufreq->buffer_count_min_host, - bufreq->buffer_count_actual, - HAL_BUFFER_INPUT); - - msm_vidc_update_host_buff_counts(inst); - ctrl->val = bufreq->buffer_count_min_host; - dprintk(VIDC_DBG, - "g_count: %x : OUTPUT: min %d min_host %d actual %d\n", - hash32_ptr(inst->session), - bufreq->buffer_count_min, - bufreq->buffer_count_min_host, - bufreq->buffer_count_actual); - return rc; - - } else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) { - - buffer_type = msm_comm_get_hal_output_buffer(inst); - bufreq = get_buff_req_buffer(inst, - buffer_type); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed to find bufreqs for buffer type = %d\n", - buffer_type); - return 0; - } - if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming) { - if (ctrl->val != bufreq->buffer_count_min_host) - return -EINVAL; - else - return 0; - } - - if (inst->session_type == MSM_VIDC_DECODER && - !inst->in_reconfig && - inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) { - dprintk(VIDC_DBG, - "Clients updates Buffer count from %d to %d\n", - bufreq->buffer_count_min_host, ctrl->val); - bufreq->buffer_count_actual = - bufreq->buffer_count_min = - bufreq->buffer_count_min_host = - ctrl->val; - } - if (ctrl->val > bufreq->buffer_count_min_host && - ctrl->val <= MAX_NUM_CAPTURE_BUFFERS) { - dprintk(VIDC_DBG, - "Buffer count Host changed from %d to %d\n", - bufreq->buffer_count_min_host, - ctrl->val); - bufreq->buffer_count_actual = - bufreq->buffer_count_min = - bufreq->buffer_count_min_host = - ctrl->val; - } else { - ctrl->val = bufreq->buffer_count_min_host; - } - rc = set_buffer_count(inst, - bufreq->buffer_count_min_host, - bufreq->buffer_count_actual, - HAL_BUFFER_OUTPUT); - - msm_vidc_update_host_buff_counts(inst); - ctrl->val = bufreq->buffer_count_min_host; - dprintk(VIDC_DBG, - "g_count: %x : CAPTURE: min %d min_host %d actual %d\n", - hash32_ptr(inst->session), - bufreq->buffer_count_min, - bufreq->buffer_count_min_host, - bufreq->buffer_count_actual); - return rc; - } - return -EINVAL; -} - static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) { int rc = 0; @@ -1703,8 +1621,6 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: - if (inst->in_reconfig) - msm_vidc_update_host_buff_counts(inst); buffer_type = msm_comm_get_hal_output_buffer(inst); bufreq = get_buff_req_buffer(inst, buffer_type); diff --git a/drivers/media/platform/msm/vidc/msm_vidc.h b/drivers/media/platform/msm/vidc/msm_vidc.h index 776c74357cd2d717ed8629d0788c9d05d29342df..c22970833f83392515a8ae36282a274ef5c66d77 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.h +++ b/drivers/media/platform/msm/vidc/msm_vidc.h @@ -21,7 +21,7 @@ #include #include -#define HAL_BUFFER_MAX 0xd +#define HAL_BUFFER_MAX 0xe enum smem_type { SMEM_DMA = 1, @@ -133,5 +133,4 @@ int msm_vidc_g_crop(void *instance, struct v4l2_crop *a); int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize); int msm_vidc_private(void *vidc_inst, unsigned int cmd, struct msm_vidc_arg *arg); - #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 1a258c452820e5676dea6f313d3ef89c2b6067c3..a5b3013c16c868bdb93d12b98a362689c294ab42 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -100,6 +100,19 @@ int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst) return NUM_MBS_PER_FRAME(height, width); } +static int msm_vidc_get_fps(struct msm_vidc_inst *inst) +{ + int fps; + + if ((inst->clk_data.operating_rate >> 16) > inst->prop.fps) + fps = (inst->clk_data.operating_rate >> 16) ? + (inst->clk_data.operating_rate >> 16) : 1; + else + fps = inst->prop.fps; + + return fps; +} + void update_recon_stats(struct msm_vidc_inst *inst, struct recon_stats_type *recon_stats) { @@ -194,19 +207,23 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core); return -EINVAL; } - hdev = core->device; - mutex_lock(&core->lock); - vote_data = core->vote_data; + vote_data = kzalloc(sizeof(struct vidc_bus_vote_data) * + MAX_SUPPORTED_INSTANCES, GFP_ATOMIC); if (!vote_data) { - dprintk(VIDC_PROF, - "Failed to get vote_data for inst %pK\n", - inst); - mutex_unlock(&core->lock); - return -EINVAL; + dprintk(VIDC_DBG, + "vote_data allocation with GFP_ATOMIC failed\n"); + vote_data = kzalloc(sizeof(struct vidc_bus_vote_data) * + MAX_SUPPORTED_INSTANCES, GFP_KERNEL); + if (!vote_data) { + dprintk(VIDC_DBG, + "vote_data allocation failed\n"); + return -EINVAL; + } } + mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { int codec = 0; struct msm_vidc_buffer *temp, *next; @@ -280,12 +297,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES) != 0; - if (inst->clk_data.operating_rate) - vote_data[i].fps = - (inst->clk_data.operating_rate >> 16) ? - inst->clk_data.operating_rate >> 16 : 1; - else - vote_data[i].fps = inst->prop.fps; + vote_data[i].fps = msm_vidc_get_fps(inst); vote_data[i].power_mode = 0; if (msm_vidc_clock_voting || is_turbo || @@ -328,6 +340,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data, vote_data_count); + kfree(vote_data); return rc; } @@ -604,7 +617,7 @@ static unsigned long msm_vidc_calc_freq_ar50(struct msm_vidc_inst *inst, struct msm_vidc_core *core = NULL; int i = 0; struct allowed_clock_rates_table *allowed_clks_tbl = NULL; - u64 rate = 0; + u64 rate = 0, fps; struct clock_data *dcvs = NULL; core = inst->core; @@ -613,6 +626,8 @@ static unsigned long msm_vidc_calc_freq_ar50(struct msm_vidc_inst *inst, mbs_per_second = msm_comm_get_inst_load_per_core(inst, LOAD_CALC_NO_QUIRKS); + fps = msm_vidc_get_fps(inst); + /* * Calculate vpp, vsp cycles separately for encoder and decoder. * Even though, most part is common now, in future it may change @@ -635,7 +650,7 @@ static unsigned long msm_vidc_calc_freq_ar50(struct msm_vidc_inst *inst, vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; /* 10 / 7 is overhead factor */ - vsp_cycles += ((inst->prop.fps * filled_len * 8) * 10) / 7; + vsp_cycles += ((fps * filled_len * 8) * 10) / 7; } else { dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__); @@ -676,7 +691,7 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, struct msm_vidc_core *core = NULL; int i = 0; struct allowed_clock_rates_table *allowed_clks_tbl = NULL; - u64 rate = 0; + u64 rate = 0, fps; struct clock_data *dcvs = NULL; u32 operating_rate, vsp_factor_num = 10, vsp_factor_den = 7; @@ -686,6 +701,8 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, mbs_per_second = msm_comm_get_inst_load_per_core(inst, LOAD_CALC_NO_QUIRKS); + fps = msm_vidc_get_fps(inst); + /* * Calculate vpp, vsp cycles separately for encoder and decoder. * Even though, most part is common now, in future it may change @@ -721,7 +738,7 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; /* 10 / 7 is overhead factor */ - vsp_cycles += ((inst->prop.fps * filled_len * 8) * 10) / 7; + vsp_cycles += ((fps * filled_len * 8) * 10) / 7; } else { dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__); @@ -971,18 +988,12 @@ int msm_dcvs_try_enable(struct msm_vidc_inst *inst) inst->clk_data.low_latency_mode || inst->batch.enable) { dprintk(VIDC_PROF, "DCVS disabled: %pK\n", inst); - inst->clk_data.extra_capture_buffer_count = 0; - inst->clk_data.extra_output_buffer_count = 0; inst->clk_data.dcvs_mode = false; return false; } inst->clk_data.dcvs_mode = true; dprintk(VIDC_PROF, "DCVS enabled: %pK\n", inst); - inst->clk_data.extra_capture_buffer_count = - DCVS_DEC_EXTRA_OUTPUT_BUFFERS; - inst->clk_data.extra_output_buffer_count = - DCVS_DEC_EXTRA_OUTPUT_BUFFERS; return true; } @@ -1090,6 +1101,17 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) __func__); } +static bool is_output_buffer(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type) +{ + if (msm_comm_get_stream_output_mode(inst) == + HAL_VIDEO_DECODER_SECONDARY) { + return buffer_type == HAL_BUFFER_OUTPUT2; + } else { + return buffer_type == HAL_BUFFER_OUTPUT; + } +} + int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst, enum hal_buffer buffer_type) { @@ -1106,15 +1128,22 @@ int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst, if (is_thumbnail_session(inst)) return 0; - count = buffer_type == HAL_BUFFER_INPUT ? - inst->clk_data.extra_output_buffer_count : - inst->clk_data.extra_capture_buffer_count; + /* Add DCVS extra buffer count */ + if (inst->core->resources.dcvs) { + if (is_decode_session(inst) && + is_output_buffer(inst, buffer_type)) { + count += DCVS_DEC_EXTRA_OUTPUT_BUFFERS; + } else if ((is_encode_session(inst) && + buffer_type == HAL_BUFFER_INPUT)) { + count += DCVS_ENC_EXTRA_INPUT_BUFFERS; + } + } /* * if platform supports decode batching ensure minimum * batch size count of extra buffers added on output port */ - if (buffer_type == HAL_BUFFER_OUTPUT) { + if (is_output_buffer(inst, buffer_type)) { if (inst->core->resources.decode_batching && is_decode_session(inst) && count < inst->batch.size) @@ -1266,12 +1295,11 @@ int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) hdev = inst->core->device; - if (inst->clk_data.low_latency_mode) { - pdata.video_work_mode = VIDC_WORK_MODE_1; - goto decision_done; - } - if (inst->session_type == MSM_VIDC_DECODER) { + if (inst->clk_data.low_latency_mode) { + pdata.video_work_mode = VIDC_WORK_MODE_1; + goto decision_done; + } pdata.video_work_mode = VIDC_WORK_MODE_2; switch (inst->fmts[OUTPUT_PORT].fourcc) { case V4L2_PIX_FMT_MPEG2: @@ -1295,6 +1323,14 @@ int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst) pdata.video_work_mode = VIDC_WORK_MODE_2; + if (codec == V4L2_PIX_FMT_H264 && width > 3840) + goto decision_done; + + if (inst->clk_data.low_latency_mode) { + pdata.video_work_mode = VIDC_WORK_MODE_1; + goto decision_done; + } + switch (codec) { case V4L2_PIX_FMT_VP8: { @@ -1356,7 +1392,7 @@ static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst, } mbs_per_frame = msm_vidc_get_mbs_per_frame(inst); if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame || - inst->prop.fps > inst->core->resources.max_hq_fps) { + msm_vidc_get_fps(inst) > inst->core->resources.max_hq_fps) { enable = true; } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h index 73dcc1670f6d2cc36698eeebe4d2bf20733a5eae..c25bf8103765c217c369e240b8dba51f826f4e4e 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h @@ -16,7 +16,7 @@ #include "msm_vidc_internal.h" /* extra o/p buffers in case of encoder dcvs */ -#define DCVS_ENC_EXTRA_OUTPUT_BUFFERS 2 +#define DCVS_ENC_EXTRA_INPUT_BUFFERS 4 /* extra o/p buffers in case of decoder dcvs */ #define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4 diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 79b7c665d4f70d03db3504340543e58e08d09316..7e5657cf6dcdc3e70f2dcd83f61d5ace86b3e3a3 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -733,18 +733,13 @@ static int msm_comm_get_mbs_per_sec(struct msm_vidc_inst *inst) capture_port_mbs = NUM_MBS_PER_FRAME(inst->prop.width[CAPTURE_PORT], inst->prop.height[CAPTURE_PORT]); - if (inst->clk_data.operating_rate) { + if ((inst->clk_data.operating_rate >> 16) > inst->prop.fps) fps = (inst->clk_data.operating_rate >> 16) ? inst->clk_data.operating_rate >> 16 : 1; - /* - * Check if operating rate is less than fps. - * If Yes, then use fps to scale clocks - */ - fps = fps > inst->prop.fps ? fps : inst->prop.fps; - return max(output_port_mbs, capture_port_mbs) * fps; - } else { - return max(output_port_mbs, capture_port_mbs) * inst->prop.fps; - } + else + fps = inst->prop.fps; + + return max(output_port_mbs, capture_port_mbs) * fps; } int msm_comm_get_inst_load(struct msm_vidc_inst *inst, @@ -991,6 +986,26 @@ struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc( return &fmt[i]; } +struct msm_vidc_format_constraint *msm_comm_get_pixel_fmt_constraints( + struct msm_vidc_format_constraint fmt[], int size, int fourcc) +{ + int i; + + if (!fmt) { + dprintk(VIDC_ERR, "Invalid inputs, fmt = %pK\n", fmt); + return NULL; + } + for (i = 0; i < size; i++) { + if (fmt[i].fourcc == fourcc) + break; + } + if (i == size) { + dprintk(VIDC_INFO, "Format constraint not found.\n"); + return NULL; + } + return &fmt[i]; +} + struct buf_queue *msm_comm_get_vb2q( struct msm_vidc_inst *inst, enum v4l2_buf_type type) { @@ -1567,6 +1582,7 @@ static void handle_event_change(enum hal_command_response cmd, void *data) struct hfi_device *hdev; u32 *ptr = NULL; struct hal_buffer_requirements *bufreq; + int extra_buff_count = 0; if (!event_notify) { dprintk(VIDC_WARN, "Got an empty event from hfi\n"); @@ -1669,17 +1685,17 @@ static void handle_event_change(enum hal_command_response cmd, void *data) event_notify->level); dprintk(VIDC_DBG, - "Event payload: height = %d width = %d profile = %d level = %d\n", + "Event payload: height = %u width = %u profile = %u level = %u\n", event_notify->height, event_notify->width, ptr[9], ptr[10]); dprintk(VIDC_DBG, - "Event payload: bit_depth = %d pic_struct = %d colour_space = %d\n", + "Event payload: bit_depth = %u pic_struct = %u colour_space = %u\n", event_notify->bit_depth, event_notify->pic_struct, event_notify->colour_space); dprintk(VIDC_DBG, - "Event payload: CROP top = %d left = %d Height = %d Width = %d\n", + "Event payload: CROP top = %u left = %u Height = %u Width = %u\n", event_notify->crop_data.top, event_notify->crop_data.left, event_notify->crop_data.height, @@ -1693,39 +1709,37 @@ static void handle_event_change(enum hal_command_response cmd, void *data) if (msm_comm_get_stream_output_mode(inst) == HAL_VIDEO_DECODER_SECONDARY) { - bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT); + if (!bufreq) return; - } + /* No need to add extra buffers to DPBs */ bufreq->buffer_count_min = event_notify->capture_buf_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_min; bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT2); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT2); + if (!bufreq) return; - } + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_OUTPUT2); bufreq->buffer_count_min = event_notify->capture_buf_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_min + + extra_buff_count; } else { bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT); + if (!bufreq) return; - } + + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_OUTPUT); bufreq->buffer_count_min = event_notify->capture_buf_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_min + + extra_buff_count; } mutex_unlock(&inst->lock); @@ -2417,6 +2431,11 @@ static void handle_ebd(enum hal_command_response cmd, void *data) update_recon_stats(inst, &empty_buf_done->recon_stats); msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr); + /* + * dma cache operations need to be performed before dma_unmap + * which is done inside msm_comm_put_vidc_buffer() + */ + msm_comm_dqbuf_cache_operations(inst, mbuf); /* * put_buffer should be done before vb2_buffer_done else * client might queue the same buffer before it is unmapped @@ -2602,6 +2621,11 @@ static void handle_fbd(enum hal_command_response cmd, void *data) } mutex_unlock(&inst->registeredbufs.lock); + /* + * dma cache operations need to be performed before dma_unmap + * which is done inside msm_comm_put_vidc_buffer() + */ + msm_comm_dqbuf_cache_operations(inst, mbuf); /* * put_buffer should be done before vb2_buffer_done else * client might queue the same buffer before it is unmapped @@ -2752,16 +2776,19 @@ bool is_batching_allowed(struct msm_vidc_inst *inst) /* * Enable decode batching based on below conditions - * - decode session * - platform supports batching + * - decode session and H264/HEVC/VP9 format * - session resolution <= 1080p * - low latency not enabled * - not a thumbnail session * - UBWC color format */ - if (is_decode_session(inst) && inst->core->resources.decode_batching && + if (inst->core->resources.decode_batching && is_decode_session(inst) && + (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264 || + inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC || + inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9) && (msm_vidc_get_mbs_per_frame(inst) <= - MAX_DEC_BATCH_WIDTH * MAX_DEC_BATCH_HEIGHT) && + NUM_MBS_PER_FRAME(MAX_DEC_BATCH_HEIGHT, MAX_DEC_BATCH_WIDTH)) && !inst->clk_data.low_latency_mode && !is_thumbnail_session(inst) && (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_NV12_UBWC || @@ -3038,6 +3065,86 @@ static int msm_comm_session_init_done(int flipped_state, return rc; } +static int msm_comm_init_buffer_count(struct msm_vidc_inst *inst) +{ + int extra_buff_count = 0; + struct hal_buffer_requirements *bufreq; + int rc = 0; + int port; + + if (!is_decode_session(inst) && !is_encode_session(inst)) + return 0; + + if (is_decode_session(inst)) + port = OUTPUT_PORT; + else + port = CAPTURE_PORT; + + /* Update input buff counts */ + bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT); + if (!bufreq) + return -EINVAL; + + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_INPUT); + bufreq->buffer_count_min = inst->fmts[port].input_min_count; + /* batching needs minimum batch size count of input buffers */ + if (inst->core->resources.decode_batching && + is_decode_session(inst) && + bufreq->buffer_count_min < inst->batch.size) + bufreq->buffer_count_min = inst->batch.size; + bufreq->buffer_count_min_host = bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buff_count; + + rc = msm_comm_set_buffer_count(inst, + bufreq->buffer_count_min_host, + bufreq->buffer_count_actual, HAL_BUFFER_INPUT); + if (rc) { + dprintk(VIDC_ERR, + "%s: Failed to set in buffer count to FW\n", + __func__); + return -EINVAL; + } + + bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT); + if (!bufreq) + return -EINVAL; + + bufreq->buffer_count_min = inst->fmts[port].input_min_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buff_count; + + /* Update output buff count */ + bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT); + if (!bufreq) + return -EINVAL; + + extra_buff_count = msm_vidc_get_extra_buff_count(inst, + HAL_BUFFER_OUTPUT); + bufreq->buffer_count_min = inst->fmts[port].output_min_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buff_count; + + rc = msm_comm_set_buffer_count(inst, + bufreq->buffer_count_min_host, + bufreq->buffer_count_actual, HAL_BUFFER_OUTPUT); + if (rc) { + dprintk(VIDC_ERR, + "%s: Failed to set out buffer count to FW\n"); + return -EINVAL; + } + + bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT); + if (!bufreq) + return -EINVAL; + + bufreq->buffer_count_min = inst->fmts[port].output_min_count; + bufreq->buffer_count_min_host = bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buff_count; + + return 0; +} + static int msm_comm_session_init(int flipped_state, struct msm_vidc_inst *inst) { @@ -3083,7 +3190,14 @@ static int msm_comm_session_init(int flipped_state, rc = -EINVAL; goto exit; } + + rc = msm_comm_init_buffer_count(inst); + if (rc) { + dprintk(VIDC_ERR, "Failed to initialize buff counts\n"); + goto exit; + } change_inst_state(inst, MSM_VIDC_OPEN); + exit: return rc; } @@ -3091,10 +3205,11 @@ static int msm_comm_session_init(int flipped_state, static void msm_vidc_print_running_insts(struct msm_vidc_core *core) { struct msm_vidc_inst *temp; + int op_rate = 0; dprintk(VIDC_ERR, "Running instances:\n"); - dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%4s\n", - "type", "w", "h", "fps", "prop"); + dprintk(VIDC_ERR, "%4s|%4s|%4s|%4s|%4s|%4s\n", + "type", "w", "h", "fps", "opr", "prop"); mutex_lock(&core->lock); list_for_each_entry(temp, &core->instances, list) { @@ -3108,13 +3223,21 @@ static void msm_vidc_print_running_insts(struct msm_vidc_core *core) if (is_turbo_session(temp)) strlcat(properties, "T", sizeof(properties)); - dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%4s\n", + if (is_realtime_session(temp)) + strlcat(properties, "R", sizeof(properties)); + + if (temp->clk_data.operating_rate) + op_rate = temp->clk_data.operating_rate >> 16; + else + op_rate = temp->prop.fps; + + dprintk(VIDC_ERR, "%4d|%4d|%4d|%4d|%4d|%4s\n", temp->session_type, max(temp->prop.width[CAPTURE_PORT], temp->prop.width[OUTPUT_PORT]), max(temp->prop.height[CAPTURE_PORT], temp->prop.height[OUTPUT_PORT]), - temp->prop.fps, properties); + temp->prop.fps, op_rate, properties); } } mutex_unlock(&core->lock); @@ -3359,6 +3482,7 @@ struct hal_buffer_requirements *get_buff_req_buffer( if (inst->buff_req.buffer[i].buffer_type == buffer_type) return &inst->buff_req.buffer[i]; } + dprintk(VIDC_ERR, "Failed to get buff req for : %x", buffer_type); return NULL; } @@ -4174,7 +4298,6 @@ int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, count = num_pending_qbufs(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (count < inst->batch.size) { - mbuf->flags |= MSM_VIDC_FLAG_DEFERRED; print_vidc_buffer(VIDC_DBG, "batch-qbuf deferred", inst, mbuf); return 0; @@ -4215,122 +4338,6 @@ int msm_comm_qbuf_decode_batch(struct msm_vidc_inst *inst, return rc; } -int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) -{ - int extra_buffers; - struct hal_buffer_requirements *bufreq; - struct hal_buffer_requirements *bufreq_extra; - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_INPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_INPUT); - return -EINVAL; - } - extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT); - bufreq->buffer_count_min_host = bufreq->buffer_count_min + - extra_buffers; - - /* decode batching needs minimum batch size count of input buffers */ - if (is_decode_session(inst) && !is_thumbnail_session(inst) && - inst->core->resources.decode_batching && - bufreq->buffer_count_min_host < inst->batch.size) - bufreq->buffer_count_min_host = inst->batch.size; - - /* adjust min_host count for VP9 decoder */ - if (is_decode_session(inst) && !is_thumbnail_session(inst) && - inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 && - bufreq->buffer_count_min_host < MIN_NUM_OUTPUT_BUFFERS_VP9) - bufreq->buffer_count_min_host = MIN_NUM_OUTPUT_BUFFERS_VP9; - - bufreq_extra = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT); - if (bufreq_extra) { - if (bufreq_extra->buffer_count_min) - bufreq_extra->buffer_count_min_host = - bufreq->buffer_count_min_host; - } - - if (msm_comm_get_stream_output_mode(inst) == - HAL_VIDEO_DECODER_SECONDARY) { - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_OUTPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT); - return -EINVAL; - } - - /* For DPB buffers, no need to add Extra buffers */ - bufreq->buffer_count_min_host = bufreq->buffer_count_actual = - bufreq->buffer_count_min; - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_OUTPUT2); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT2); - return -EINVAL; - } - - extra_buffers = msm_vidc_get_extra_buff_count(inst, - HAL_BUFFER_OUTPUT); - - bufreq->buffer_count_min_host = bufreq->buffer_count_actual = - bufreq->buffer_count_min + extra_buffers; - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_EXTRADATA_OUTPUT2); - if (!bufreq) { - dprintk(VIDC_DBG, - "No buffer requirements : %x\n", - HAL_BUFFER_EXTRADATA_OUTPUT2); - } else { - if (bufreq->buffer_count_min) { - bufreq->buffer_count_min_host = - bufreq->buffer_count_actual = - bufreq->buffer_count_min + extra_buffers; - } - } - } else { - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_OUTPUT); - if (!bufreq) { - dprintk(VIDC_ERR, - "Failed : No buffer requirements : %x\n", - HAL_BUFFER_OUTPUT); - return -EINVAL; - } - - extra_buffers = msm_vidc_get_extra_buff_count(inst, - HAL_BUFFER_OUTPUT); - - bufreq->buffer_count_min_host = bufreq->buffer_count_actual = - bufreq->buffer_count_min + extra_buffers; - - bufreq = get_buff_req_buffer(inst, - HAL_BUFFER_EXTRADATA_OUTPUT); - if (!bufreq) { - dprintk(VIDC_DBG, - "No buffer requirements : %x\n", - HAL_BUFFER_EXTRADATA_OUTPUT); - } else { - if (bufreq->buffer_count_min) { - bufreq->buffer_count_min_host = - bufreq->buffer_count_actual = - bufreq->buffer_count_min + extra_buffers; - } - } - } - - return 0; -} - int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst) { int rc = 0, i = 0; @@ -4350,8 +4357,32 @@ int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst) "buffer type", "count", "mincount_host", "mincount_fw", "size"); for (i = 0; i < HAL_BUFFER_MAX; i++) { struct hal_buffer_requirements req = hprop.buf_req.buffer[i]; + struct hal_buffer_requirements *curr_req; + + /* + * For decoder we can ignore the buffer counts that firmware + * sends for inp/out buffers. + * FW buffer counts for these are used only in reconfig + */ + curr_req = get_buff_req_buffer(inst, req.buffer_type); + if (!curr_req) + return -EINVAL; + + if (req.buffer_type == HAL_BUFFER_INPUT || + req.buffer_type == HAL_BUFFER_OUTPUT || + req.buffer_type == HAL_BUFFER_OUTPUT2 || + req.buffer_type == HAL_BUFFER_EXTRADATA_INPUT || + req.buffer_type == HAL_BUFFER_EXTRADATA_OUTPUT || + req.buffer_type == HAL_BUFFER_EXTRADATA_OUTPUT2) { + curr_req->buffer_size = req.buffer_size; + curr_req->buffer_region_size = req.buffer_region_size; + curr_req->contiguous = req.contiguous; + curr_req->buffer_alignment = req.buffer_alignment; + } else { + memcpy(curr_req, &req, + sizeof(struct hal_buffer_requirements)); + } - inst->buff_req.buffer[i] = req; if (req.buffer_type != HAL_BUFFER_NONE) { dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n", get_buffer_name(req.buffer_type), @@ -4360,10 +4391,8 @@ int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst) req.buffer_count_min, req.buffer_size); } } - if (inst->session_type == MSM_VIDC_ENCODER) - rc = msm_vidc_update_host_buff_counts(inst); - dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n"); + dprintk(VIDC_DBG, "Buffer requirements driver adjusted:\n"); dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n", "buffer type", "count", "mincount_host", "mincount_fw", "size"); for (i = 0; i < HAL_BUFFER_MAX; i++) { @@ -4783,6 +4812,29 @@ int msm_comm_try_set_prop(struct msm_vidc_inst *inst, return rc; } +int msm_comm_set_buffer_count(struct msm_vidc_inst *inst, + int host_count, int act_count, enum hal_buffer type) +{ + int rc = 0; + struct hfi_device *hdev; + struct hal_buffer_count_actual buf_count; + + hdev = inst->core->device; + + buf_count.buffer_type = type; + buf_count.buffer_count_actual = act_count; + buf_count.buffer_count_min_host = host_count; + dprintk(VIDC_DBG, "%s : Act count = %d Host count = %d\n", + __func__, act_count, host_count); + rc = call_hfi_op(hdev, session_set_property, + inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count); + if (rc) + dprintk(VIDC_ERR, + "Failed to set actual buffer count %d for buffer type %d\n", + act_count, type); + return rc; +} + int msm_comm_set_output_buffers(struct msm_vidc_inst *inst) { int rc = 0; @@ -6080,6 +6132,137 @@ int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, return rc; } +int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0, i; + struct vb2_buffer *vb; + bool skip; + + if (!inst || !mbuf) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_op; + + skip = true; + if (inst->session_type == MSM_VIDC_DECODER) { + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) { /* bitstream */ + skip = false; + offset = vb->planes[i].data_offset; + size = vb->planes[i].bytesused; + cache_op = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (vb->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* yuv */ + skip = false; + offset = 0; + size = vb->planes[i].length; + cache_op = SMEM_CACHE_INVALIDATE; + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) { /* yuv */ + skip = false; + offset = vb->planes[i].data_offset; + size = vb->planes[i].bytesused; + cache_op = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (vb->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* bitstream */ + skip = false; + offset = 0; + size = vb->planes[i].length; + cache_op = SMEM_CACHE_INVALIDATE; + } + } + } + + if (!skip) { + rc = msm_smem_cache_operations(mbuf->smem[i].dma_buf, + cache_op, offset, size); + if (rc) + print_vidc_buffer(VIDC_ERR, + "qbuf cache ops failed", inst, mbuf); + } + } + + return rc; +} + +int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0, i; + struct vb2_buffer *vb; + bool skip; + + if (!inst || !mbuf) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_op; + + skip = true; + if (inst->session_type == MSM_VIDC_DECODER) { + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + /* bitstream and extradata */ + /* we do not need cache operations */ + } else if (vb->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* yuv */ + skip = false; + offset = vb->planes[i].data_offset; + size = vb->planes[i].bytesused; + cache_op = SMEM_CACHE_INVALIDATE; + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + /* yuv and extradata */ + /* we do not need cache operations */ + } else if (vb->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* bitstream */ + skip = false; + /* + * Include vp8e header bytes as well + * by making offset equal to zero + */ + offset = 0; + size = vb->planes[i].bytesused + + vb->planes[i].data_offset; + cache_op = SMEM_CACHE_INVALIDATE; + } + } + } + + if (!skip) { + rc = msm_smem_cache_operations(mbuf->smem[i].dma_buf, + cache_op, offset, size); + if (rc) + print_vidc_buffer(VIDC_ERR, + "dqbuf cache ops failed", inst, mbuf); + } + } + + return rc; +} + struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, struct vb2_buffer *vb2) { @@ -6168,6 +6351,8 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, goto exit; } } + /* dma cache operations need to be performed after dma_map */ + msm_comm_qbuf_cache_operations(inst, mbuf); /* special handling for decoder */ if (inst->session_type == MSM_VIDC_DECODER) { @@ -6291,6 +6476,7 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, struct msm_vidc_buffer *temp; bool found = false; int i = 0; + u32 planes[VIDEO_MAX_PLANES] = {0}; mutex_lock(&inst->flush_lock); mutex_lock(&inst->registeredbufs.lock); @@ -6304,6 +6490,10 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, } } if (found) { + /* save device_addr */ + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) + planes[i] = mbuf->smem[i].device_addr; + /* send RBR event to client */ msm_vidc_queue_rbr_event(inst, mbuf->vvb.vb2_buf.planes[0].m.fd, @@ -6321,6 +6511,7 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, if (!mbuf->smem[0].refcount) { list_del(&mbuf->list); kref_put_mbuf(mbuf); + mbuf = NULL; } } else { print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf); @@ -6338,8 +6529,8 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, */ found = false; list_for_each_entry(temp, &inst->registeredbufs.list, list) { - if (msm_comm_compare_vb2_plane(inst, mbuf, - &temp->vvb.vb2_buf, 0)) { + if (msm_comm_compare_device_plane(temp, planes, 0)) { + mbuf = temp; found = true; break; } @@ -6359,9 +6550,11 @@ void handle_release_buffer_reference(struct msm_vidc_inst *inst, /* don't queue the buffer */ found = false; } - /* clear DEFERRED flag, if any, as the buffer is going to be queued */ - if (found) + /* clear required flags as the buffer is going to be queued */ + if (found) { mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED; + mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING; + } unlock: mutex_unlock(&inst->registeredbufs.lock); @@ -6533,3 +6726,73 @@ int msm_comm_release_mark_data(struct msm_vidc_inst *inst) return 0; } +int msm_comm_set_color_format_constraints(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, + struct msm_vidc_format_constraint *pix_constraint) +{ + struct hal_uncompressed_plane_actual_constraints_info + *pconstraint = NULL; + u32 num_planes = 2; + u32 size = 0; + int rc = 0; + struct hfi_device *hdev; + + if (!inst || !inst->core || !inst->core->device) { + dprintk(VIDC_ERR, "%s - invalid param\n", __func__); + return -EINVAL; + } + + hdev = inst->core->device; + + size = sizeof(buffer_type) + + sizeof(u32) + + num_planes + * sizeof(struct hal_uncompressed_plane_constraints); + + pconstraint = kzalloc(size, GFP_KERNEL); + if (!pconstraint) { + dprintk(VIDC_ERR, "No memory cannot alloc constrain\n"); + rc = -ENOMEM; + goto exit; + } + + pconstraint->buffer_type = buffer_type; + pconstraint->num_planes = pix_constraint->num_planes; + //set Y plan constraints + dprintk(VIDC_INFO, "Set Y plan constraints.\n"); + pconstraint->rg_plane_format[0].stride_multiples = + pix_constraint->y_stride_multiples; + pconstraint->rg_plane_format[0].max_stride = + pix_constraint->y_max_stride; + pconstraint->rg_plane_format[0].min_plane_buffer_height_multiple = + pix_constraint->y_min_plane_buffer_height_multiple; + pconstraint->rg_plane_format[0].buffer_alignment = + pix_constraint->y_buffer_alignment; + + //set UV plan constraints + dprintk(VIDC_INFO, "Set UV plan constraints.\n"); + pconstraint->rg_plane_format[1].stride_multiples = + pix_constraint->uv_stride_multiples; + pconstraint->rg_plane_format[1].max_stride = + pix_constraint->uv_max_stride; + pconstraint->rg_plane_format[1].min_plane_buffer_height_multiple = + pix_constraint->uv_min_plane_buffer_height_multiple; + pconstraint->rg_plane_format[1].buffer_alignment = + pix_constraint->uv_buffer_alignment; + + rc = call_hfi_op(hdev, + session_set_property, + inst->session, + HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO, + pconstraint); + if (rc) + dprintk(VIDC_ERR, + "Failed to set input color format constraint\n"); + else + dprintk(VIDC_DBG, "Set color format constraint success\n"); + +exit: + if (!pconstraint) + kfree(pconstraint); + return rc; +} diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index d73da96f89b66879f231660a22268943a762b1a2..ecadf76b101d7cbb4c0362a49ca0d09eb1fe072c 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -92,6 +92,11 @@ const struct msm_vidc_format *msm_comm_get_pixel_fmt_index( const struct msm_vidc_format fmt[], int size, int index, int fmt_type); struct msm_vidc_format *msm_comm_get_pixel_fmt_fourcc( struct msm_vidc_format fmt[], int size, int fourcc, int fmt_type); +struct msm_vidc_format_constraint *msm_comm_get_pixel_fmt_constraints( + struct msm_vidc_format_constraint fmt[], int size, int fourcc); +int msm_comm_set_color_format_constraints(struct msm_vidc_inst *inst, + enum hal_buffer buffer_type, + struct msm_vidc_format_constraint *pix_constraint); struct buf_queue *msm_comm_get_vb2q( struct msm_vidc_inst *inst, enum v4l2_buf_type type); int msm_comm_try_state(struct msm_vidc_inst *inst, int state); @@ -103,6 +108,8 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, int msm_comm_set_recon_buffers(struct msm_vidc_inst *inst); int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst); int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst); +int msm_comm_set_buffer_count(struct msm_vidc_inst *inst, + int host_count, int act_count, enum hal_buffer type); int msm_comm_set_output_buffers(struct msm_vidc_inst *inst); int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst); int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); @@ -119,7 +126,6 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst, void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst); int msm_comm_force_cleanup(struct msm_vidc_inst *inst); int msm_comm_suspend(int core_id); -int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst); enum hal_extradata_id msm_comm_get_hal_extradata_index( enum v4l2_mpeg_vidc_extradata index); struct hal_buffer_requirements *get_buff_req_buffer( @@ -203,9 +209,9 @@ bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf, bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf, u32 *planes); int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, - struct v4l2_buffer *b); + struct msm_vidc_buffer *mbuf); int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, - struct v4l2_buffer *b); + struct msm_vidc_buffer *mbuf); void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 3b3882026bd4d53ddcecc0241ef2fb6dca386d21..455fa0dbbc1e55726abfad3e4247f881aed8739a 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -253,6 +253,21 @@ struct msm_vidc_format { int type; u32 (*get_frame_size)(int plane, u32 height, u32 width); bool defer_outputs; + u32 input_min_count; + u32 output_min_count; +}; + +struct msm_vidc_format_constraint { + u32 fourcc; + u32 num_planes; + u32 y_stride_multiples; + u32 y_max_stride; + u32 y_min_plane_buffer_height_multiple; + u32 y_buffer_alignment; + u32 uv_stride_multiples; + u32 uv_max_stride; + u32 uv_min_plane_buffer_height_multiple; + u32 uv_buffer_alignment; }; struct msm_vidc_drv { @@ -320,8 +335,6 @@ struct clock_data { int load_high; int min_threshold; int max_threshold; - unsigned int extra_capture_buffer_count; - unsigned int extra_output_buffer_count; enum hal_buffer buffer_type; bool dcvs_mode; unsigned long bitrate; @@ -396,7 +409,6 @@ struct msm_vidc_core { bool trigger_ssr; unsigned long min_freq; unsigned long curr_freq; - struct vidc_bus_vote_data *vote_data; struct msm_vidc_core_ops *core_ops; }; @@ -514,6 +526,8 @@ int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); struct dma_buf *msm_smem_get_dma_buf(int fd); void msm_smem_put_dma_buf(void *dma_buf); +int msm_smem_cache_operations(struct dma_buf *dbuf, + enum smem_cache_ops cache_op, unsigned long offset, unsigned long size); void msm_vidc_fw_unload_handler(struct work_struct *work); void msm_vidc_ssr_handler(struct work_struct *work); /* diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c index 8281ea7fec3dea54fedb88844eea633dea7da17d..a1df5b17a5aa0608a11a3afae50665ba3aa56abb 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c @@ -196,7 +196,12 @@ static struct msm_vidc_common_data sm8150_common_data[] = { }, { .key = "qcom,max-hw-load", - .value = 4147200, /* 4096x2160/256 MBs@120fps */ + .value = 3916800, /* + * 1920x1088/256 MBs@480fps. It is less + * any other usecases (ex: + * 3840x2160@120fps, 4096x2160@96ps, + * 7680x4320@30fps) + */ }, { .key = "qcom,max-hq-mbs-per-frame", @@ -234,6 +239,10 @@ static struct msm_vidc_common_data sm8150_common_data[] = { .key = "qcom,decode-batching", .value = 1, }, + { + .key = "qcom,dcvs", + .value = 1, + }, }; static struct msm_vidc_common_data sdm845_common_data[] = { @@ -289,6 +298,10 @@ static struct msm_vidc_common_data sdm845_common_data[] = { .key = "qcom,debug-timeout", .value = 0, }, + { + .key = "qcom,dcvs", + .value = 1, + }, }; static struct msm_vidc_common_data sdm670_common_data_v0[] = { @@ -336,6 +349,10 @@ static struct msm_vidc_common_data sdm670_common_data_v0[] = { .key = "qcom,hw-resp-timeout", .value = 250, }, + { + .key = "qcom,dcvs", + .value = 1, + }, }; static struct msm_vidc_common_data sdm670_common_data_v1[] = { @@ -383,6 +400,10 @@ static struct msm_vidc_common_data sdm670_common_data_v1[] = { .key = "qcom,hw-resp-timeout", .value = 250, }, + { + .key = "qcom,dcvs", + .value = 1, + }, }; static struct msm_vidc_efuse_data sdm670_efuse_data[] = { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index 8ff33e8a025751c4e2a01153042966fcc261164a..c006fdd473b750f9dc354f115a4a6d8210595d3d 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -789,6 +789,8 @@ int read_platform_resources_from_drv_data( "qcom,domain-attr-cache-pagetables"); res->decode_batching = find_key_value(platform_data, "qcom,decode-batching"); + res->dcvs = find_key_value(platform_data, + "qcom,dcvs"); res->csc_coeff_data = &platform_data->csc_data; @@ -979,6 +981,17 @@ static int msm_vidc_setup_context_bank(struct msm_vidc_platform_resources *res, goto release_mapping; } + /* + * configure device segment size and segment boundary to ensure + * iommu mapping returns one mapping (which is required for partial + * cache operations) + */ + if (!dev->dma_parms) + dev->dma_parms = + devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, DMA_BIT_MASK(64)); + dprintk(VIDC_DBG, "Attached %s and created mapping\n", dev_name(dev)); dprintk(VIDC_DBG, "Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, mapping: %pK", diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h index 981aafaa8fe6761f563ce63f2c597544309968bf..ab7271ff512d5129a8ef129d03d6be82cf985629 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h @@ -193,6 +193,7 @@ struct msm_vidc_platform_resources { bool non_fatal_pagefaults; bool cache_pagetables; bool decode_batching; + bool dcvs; struct msm_vidc_codec_data *codec_data; int codec_data_count; struct msm_vidc_csc_coeff *csc_coeff_data; diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index 1776769e34bbc6a3a5279deefdf508f1138a0076..424d80d3d5b7becd1f18e87991235be04b71ec5d 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -4071,7 +4071,6 @@ static int __init_bus(struct venus_hfi_device *device) devfreq_suspend_device(bus->devfreq); } - device->bus_vote = DEFAULT_BUS_VOTE; return 0; err_add_dev: diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 65e1545927db6410c6c6846dc5cb55f8025166f7..fdcc9664c6dda2f59dc3fd138b57759070834bb0 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -118,6 +118,7 @@ enum hal_extradata_id { enum hal_property { HAL_CONFIG_FRAME_RATE = 0x04000001, + HAL_CONFIG_OPERATING_RATE, HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT, HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO, HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO, @@ -408,6 +409,10 @@ struct hal_frame_rate { u32 frame_rate; }; +struct hal_operating_rate { + u32 operating_rate; +}; + enum hal_uncompressed_format { HAL_COLOR_FORMAT_MONOCHROME = 0x00000001, HAL_COLOR_FORMAT_NV12 = 0x00000002, diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index c16a407a48334d00e5fa0f0bb9bfbab261aea82e..42210f980da1a64f33915ef00f0231c293250513 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -247,6 +247,8 @@ struct hfi_buffer_info { (HFI_PROPERTY_CONFIG_COMMON_START + 0x001) #define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE \ (HFI_PROPERTY_CONFIG_COMMON_START + 0x002) +#define HFI_PROPERTY_CONFIG_OPERATING_RATE \ + (HFI_PROPERTY_CONFIG_COMMON_START + 0x003) #define HFI_PROPERTY_PARAM_VDEC_COMMON_START \ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000) @@ -479,6 +481,10 @@ struct hfi_frame_rate { u32 frame_rate; }; +struct hfi_operating_rate { + u32 operating_rate; +}; + #define HFI_INTRA_REFRESH_NONE (HFI_COMMON_BASE + 0x1) #define HFI_INTRA_REFRESH_CYCLIC (HFI_COMMON_BASE + 0x2) #define HFI_INTRA_REFRESH_RANDOM (HFI_COMMON_BASE + 0x5) diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index c2d3b8f0f4874766abd34975371184a0697c5a29..93f69b3ac9111233e999138851574e5c336f56bb 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) return 0; } -static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) +static void vsp1_video_release_buffers(struct vsp1_video *video) { - struct vsp1_video *video = pipe->output->video; struct vsp1_vb2_buffer *buffer; unsigned long flags; @@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&video->irqqueue); spin_unlock_irqrestore(&video->irqlock, flags); +} + +static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) +{ + lockdep_assert_held(&pipe->lock); /* Release our partition table allocation */ - mutex_lock(&pipe->lock); kfree(pipe->part_table); pipe->part_table = NULL; - mutex_unlock(&pipe->lock); + + vsp1_dl_list_put(pipe->dl); + pipe->dl = NULL; } static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) @@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) if (pipe->stream_count == pipe->num_inputs) { ret = vsp1_video_setup_pipeline(pipe); if (ret < 0) { - mutex_unlock(&pipe->lock); + vsp1_video_release_buffers(video); vsp1_video_cleanup_pipeline(pipe); + mutex_unlock(&pipe->lock); return ret; } @@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) if (ret == -ETIMEDOUT) dev_err(video->vsp1->dev, "pipeline stop timeout\n"); - vsp1_dl_list_put(pipe->dl); - pipe->dl = NULL; + vsp1_video_cleanup_pipeline(pipe); } mutex_unlock(&pipe->lock); media_pipeline_stop(&video->video.entity); - vsp1_video_cleanup_pipeline(pipe); + vsp1_video_release_buffers(video); vsp1_video_pipeline_put(pipe); } diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 9b742d569fb511db5d4de93538205d41f6ff42ea..c30cb0fb165d183f339309cae45f60746c7fc637 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -918,6 +918,9 @@ struct usb_device_id cx231xx_id_table[] = { .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A0), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, + /* AverMedia DVD EZMaker 7 */ + {USB_DEVICE(0x07ca, 0xc039), + .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER}, {USB_DEVICE(0x2040, 0xb110), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL}, {USB_DEVICE(0x2040, 0xb111), diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 20397aba6849e01bf4e0d08604e1f48322cabb1f..9f2a64cb691df7492ecce533b094c77cc5ef7a44 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1992,6 +1992,9 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, if (!found) return -ENOENT; + if (ctrl->info.size < mapping->size) + return -EINVAL; + if (mutex_lock_interruptible(&chain->ctrl_mutex)) return -ERESTARTSYS; diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index dae6273017ef4f0b147a810715897742dbb37045..8a8da81b50e7092cdecc84945c95a97919a15f01 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -877,7 +877,7 @@ static int put_v4l2_ext_controls32(struct file *file, get_user(kcontrols, &kp->controls)) return -EFAULT; - if (!count) + if (!count || count > (U32_MAX/sizeof(*ucontrols))) return 0; if (get_user(p, &up->controls)) return -EFAULT; diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index d1c46de89eb49ac21329fbd3e7c053089fd6edc0..d9ae983095c5447a0a20bcf0636aaf7feee3655d 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = { .properties = apl_i2c_properties, }; +static const struct intel_lpss_platform_info cnl_i2c_info = { + .clk_rate = 216000000, + .properties = spt_i2c_properties, +}; + static const struct pci_device_id intel_lpss_pci_ids[] = { /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, @@ -207,13 +212,13 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info }, - { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info }, /* SPT-H */ { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info }, @@ -240,10 +245,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info }, { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info }, - { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 0e0ab9bb15305d895e082e479ef3f86922c71d33..40e8d9b59d07761e56f47011ab48fee4a3ceb1c2 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss) intel_lpss_deassert_reset(lpss); + intel_lpss_set_remap_addr(lpss); + if (!intel_lpss_has_idma(lpss)) return; - intel_lpss_set_remap_addr(lpss); - /* Make sure that SPI multiblock DMA transfers are re-enabled */ if (lpss->type == LPSS_DEV_SPI) writel(value, lpss->priv + LPSS_PRIV_SSP_REG); diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index a8b6d6a635e962b057325d9d5d6af96382474eba..393a80bdb846af0e6fafced9ca3e7ca528e6f767 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -331,12 +331,20 @@ static ssize_t prefault_mode_store(struct device *device, struct cxl_afu *afu = to_cxl_afu(device); enum prefault_modes mode = -1; - if (!strncmp(buf, "work_element_descriptor", 23)) - mode = CXL_PREFAULT_WED; - if (!strncmp(buf, "all", 3)) - mode = CXL_PREFAULT_ALL; if (!strncmp(buf, "none", 4)) mode = CXL_PREFAULT_NONE; + else { + if (!radix_enabled()) { + + /* only allowed when not in radix mode */ + if (!strncmp(buf, "work_element_descriptor", 23)) + mode = CXL_PREFAULT_WED; + if (!strncmp(buf, "all", 3)) + mode = CXL_PREFAULT_ALL; + } else { + dev_err(device, "Cannot prefault with radix enabled\n"); + } + } if (mode == -1) return -EINVAL; diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c index 87f1b338106c55aa6ef5f8c757b2bb45bf07d24e..fa5528cff904195f3fe272767219da46622fd2cf 100644 --- a/drivers/misc/hdcp_qseecom.c +++ b/drivers/misc/hdcp_qseecom.c @@ -33,8 +33,9 @@ #include "qseecom_kernel.h" -#define TZAPP_NAME "hdcp2p2" +#define HDCP2P2_APP_NAME "hdcp2p2" #define HDCP1_APP_NAME "hdcp1" +#define HDCPSRM_APP_NAME "hdcpsrm" #define QSEECOM_SBUFF_SIZE 0x1000 #define MAX_TX_MESSAGE_SIZE 129 @@ -71,6 +72,9 @@ */ #define SLEEP_SET_HW_KEY_MS 300 +/* Wait 200ms after authentication */ +#define SLEEP_FORCE_ENCRYPTION_MS 200 + /* hdcp command status */ #define HDCP_SUCCESS 0 #define HDCP_FAIL 1 @@ -110,6 +114,7 @@ #define HDCP_KEY_ALREADY_PROVISIONED 35 #define HDCP_KEY_NOT_PROVISIONED 36 #define HDCP_CALL_TOO_SOON 37 +#define HDCP_FORCE_ENCRYPTION_FAILED 38 /* flags set by tz in response message */ #define HDCP_TXMTR_SUBSTATE_INIT 0 @@ -141,7 +146,8 @@ struct hdcp_##x##_rsp *rsp_buf = NULL; \ if (!handle->qseecom_handle) { \ pr_err("invalid qseecom_handle while processing %s\n", #x); \ - return -EINVAL; \ + rc = -EINVAL; \ + goto error; \ } \ req_buf = (struct hdcp_##x##_req *) handle->qseecom_handle->sbuf; \ rsp_buf = (struct hdcp_##x##_rsp *) (handle->qseecom_handle->sbuf + \ @@ -177,6 +183,7 @@ enum { hdcp_cmd_session_init = SERVICE_CREATE_CMD(16), hdcp_cmd_session_deinit = SERVICE_CREATE_CMD(17), hdcp_cmd_start_auth = SERVICE_CREATE_CMD(18), + hdcp_cmd_force_encryption = SERVICE_CREATE_CMD(22), }; enum hdcp_state { @@ -448,12 +455,24 @@ struct __attribute__ ((__packed__)) hdcp_start_auth_rsp { uint8_t message[MAX_TX_MESSAGE_SIZE]; }; +struct __attribute__ ((__packed__)) hdcp_force_encryption_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t enable; +}; + +struct __attribute__ ((__packed__)) hdcp_force_encryption_rsp { + uint32_t status; + uint32_t commandid; +}; + struct hdcp2_handle { struct hdcp2_app_data app_data; uint32_t tz_ctxhandle; bool feature_supported; enum hdcp_state hdcp_state; struct qseecom_handle *qseecom_handle; + struct qseecom_handle *hdcpsrm_qseecom_handle; uint32_t session_id; bool legacy_app; uint32_t device_type; @@ -552,6 +571,8 @@ static const char *hdcp_cmd_status_to_str(uint32_t err) return HDCP_CMD_STATUS_TO_STR(HDCP_KEY_NOT_PROVISIONED); case HDCP_CALL_TOO_SOON: return HDCP_CMD_STATUS_TO_STR(HDCP_CALL_TOO_SOON); + case HDCP_FORCE_ENCRYPTION_FAILED: + return HDCP_CMD_STATUS_TO_STR(HDCP_FORCE_ENCRYPTION_FAILED); default: return ""; } @@ -566,12 +587,12 @@ static int hdcp_get_version(struct hdcp2_handle *handle) if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { pr_err("library already loaded\n"); - return rc; + goto error; } rc = hdcp2_app_process_cmd(version); if (rc) - goto exit; + goto error; app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion); @@ -580,7 +601,7 @@ static int hdcp_get_version(struct hdcp2_handle *handle) if (app_major_version == 1) handle->legacy_app = true; -exit: +error: return rc; } @@ -592,20 +613,21 @@ static int hdcp2_app_init_legacy(struct hdcp2_handle *handle) if (!handle->legacy_app) { pr_err("wrong init function\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { pr_err("library already loaded\n"); - goto exit; + goto error; } rc = hdcp2_app_process_cmd(init_v1); if (rc) - goto exit; + goto error; pr_debug("success\n"); -exit: +error: return rc; } @@ -618,12 +640,13 @@ static int hdcp2_app_init(struct hdcp2_handle *handle) if (handle->legacy_app) { pr_err("wrong init function\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { pr_err("library already loaded\n"); - goto exit; + goto error; } req_buf->clientversion = @@ -633,7 +656,7 @@ static int hdcp2_app_init(struct hdcp2_handle *handle) rc = hdcp2_app_process_cmd(init); if (rc) - goto exit; + goto error; app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion); if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) { @@ -641,7 +664,7 @@ static int hdcp2_app_init(struct hdcp2_handle *handle) ("client-app minor version mismatch app(%d), client(%d)\n", app_minor_version, HDCP_CLIENT_MINOR_VERSION); rc = -1; - goto exit; + goto error; } pr_debug("success\n"); @@ -654,7 +677,7 @@ static int hdcp2_app_init(struct hdcp2_handle *handle) HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion), HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion), HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion)); -exit: +error: return rc; } @@ -666,25 +689,26 @@ static int hdcp2_app_tx_init(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { pr_err("session not initialized\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) { pr_err("txmtr already initialized\n"); - goto exit; + goto error; } req_buf->sessionid = handle->session_id; rc = hdcp2_app_process_cmd(tx_init); if (rc) - goto exit; + goto error; handle->tz_ctxhandle = rsp_buf->ctxhandle; handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; pr_debug("success\n"); -exit: +error: return rc; } @@ -696,17 +720,18 @@ static int hdcp2_app_tx_init_legacy(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) { pr_err("txmtr already initialized\n"); - goto exit; + goto error; } rc = hdcp2_app_process_cmd(tx_init_v1); if (rc) - goto exit; + goto error; handle->app_data.response.data = rsp_buf->message; handle->app_data.response.length = rsp_buf->msglen; @@ -716,7 +741,7 @@ static int hdcp2_app_tx_init_legacy(struct hdcp2_handle *handle) handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; pr_debug("success\n"); -exit: +error: return rc; } @@ -726,19 +751,27 @@ static int hdcp2_app_load(struct hdcp2_handle *handle) if (!handle) { pr_err("invalid input\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { pr_err("library already loaded\n"); - goto exit; + goto error; } rc = qseecom_start_app(&handle->qseecom_handle, - TZAPP_NAME, QSEECOM_SBUFF_SIZE); + HDCP2P2_APP_NAME, QSEECOM_SBUFF_SIZE); if (rc) { - pr_err("qseecom_start_app failed %d\n", rc); - goto exit; + pr_err("qseecom_start_app failed for HDCP2P2 (%d)\n", rc); + goto error; + } + + rc = qseecom_start_app(&handle->hdcpsrm_qseecom_handle, + HDCPSRM_APP_NAME, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed for HDCPSRM (%d)\n", rc); + goto hdcpsrm_error; } pr_debug("qseecom_start_app success\n"); @@ -746,7 +779,7 @@ static int hdcp2_app_load(struct hdcp2_handle *handle) rc = hdcp_get_version(handle); if (rc) { pr_err("library get version failed\n"); - goto exit; + goto get_version_error; } if (handle->legacy_app) { @@ -757,19 +790,20 @@ static int hdcp2_app_load(struct hdcp2_handle *handle) handle->tx_init = hdcp2_app_tx_init; } - if (handle->app_init == NULL) { - pr_err("invalid app init function pointer\n"); - goto exit; - } - rc = handle->app_init(handle); if (rc) { pr_err("app init failed\n"); - goto exit; + goto get_version_error; } handle->hdcp_state |= HDCP_STATE_APP_LOADED; -exit: + + return rc; +get_version_error: + qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); +hdcpsrm_error: + qseecom_shutdown_app(&handle->qseecom_handle); +error: return rc; } @@ -779,20 +813,26 @@ static int hdcp2_app_unload(struct hdcp2_handle *handle) hdcp2_app_init_var(deinit); - rc = hdcp2_app_process_cmd(deinit); + hdcp2_app_process_cmd(deinit); + + /* deallocate the resources for qseecom HDCPSRM handle */ + rc = qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); if (rc) - goto exit; + pr_err("qseecom_shutdown_app failed for HDCPSRM (%d)\n", rc); - /* deallocate the resources for qseecom handle */ + /* deallocate the resources for qseecom HDCP2P2 handle */ rc = qseecom_shutdown_app(&handle->qseecom_handle); if (rc) { - pr_err("qseecom_shutdown_app failed err: %d\n", rc); - goto exit; + pr_err("qseecom_shutdown_app failed for HDCP2P2 (%d)\n", rc); + return rc; } handle->hdcp_state &= ~HDCP_STATE_APP_LOADED; pr_debug("success\n"); -exit: + + return rc; +error: + qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); return rc; } @@ -804,12 +844,13 @@ bool hdcp2_feature_supported(void *data) if (!handle) { pr_err("invalid input\n"); - goto exit; + rc = -EINVAL; + goto error; } if (handle->feature_supported) { supported = true; - goto exit; + goto error; } rc = hdcp2_app_load(handle); @@ -821,7 +862,7 @@ bool hdcp2_feature_supported(void *data) hdcp2_app_unload(handle); supported = true; } -exit: +error: return supported; } @@ -834,25 +875,25 @@ static int hdcp2_app_session_init(struct hdcp2_handle *handle) if (!handle->qseecom_handle || !handle->qseecom_handle->sbuf) { pr_err("invalid handle\n"); rc = -EINVAL; - goto exit; + goto error; } if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); rc = -EINVAL; - goto exit; + goto error; } if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) { pr_err("session already initialized\n"); - goto exit; + goto error; } req_buf->deviceid = handle->device_type; rc = hdcp2_app_process_cmd(session_init); if (rc) - goto exit; + goto error; pr_debug("session id %d\n", rsp_buf->sessionid); @@ -860,7 +901,7 @@ static int hdcp2_app_session_init(struct hdcp2_handle *handle) handle->hdcp_state |= HDCP_STATE_SESSION_INIT; pr_debug("success\n"); -exit: +error: return rc; } @@ -873,25 +914,24 @@ static int hdcp2_app_session_deinit(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); rc = -EINVAL; - goto exit; + goto error; } if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { - /* unload library here */ pr_err("session not initialized\n"); rc = -EINVAL; - goto exit; + goto error; } req_buf->sessionid = handle->session_id; rc = hdcp2_app_process_cmd(session_deinit); if (rc) - goto exit; + goto error; handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT; pr_debug("success\n"); -exit: +error: return rc; } @@ -904,25 +944,24 @@ static int hdcp2_app_tx_deinit(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { pr_err("app not loaded\n"); rc = -EINVAL; - goto exit; + goto error; } if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { - /* unload library here */ pr_err("txmtr not initialized\n"); rc = -EINVAL; - goto exit; + goto error; } req_buf->ctxhandle = handle->tz_ctxhandle; rc = hdcp2_app_process_cmd(tx_deinit); if (rc) - goto exit; + goto error; handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT; pr_debug("success\n"); -exit: +error: return rc; } @@ -935,20 +974,20 @@ static int hdcp2_app_start_auth(struct hdcp2_handle *handle) if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { pr_err("session not initialized\n"); rc = -EINVAL; - goto exit; + goto error; } if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { pr_err("txmtr not initialized\n"); rc = -EINVAL; - goto exit; + goto error; } req_buf->ctxHandle = handle->tz_ctxhandle; rc = hdcp2_app_process_cmd(start_auth); if (rc) - goto exit; + goto error; handle->app_data.response.data = rsp_buf->message; handle->app_data.response.length = rsp_buf->msglen; @@ -957,7 +996,7 @@ static int hdcp2_app_start_auth(struct hdcp2_handle *handle) handle->tz_ctxhandle = rsp_buf->ctxhandle; pr_debug("success\n"); -exit: +error: return rc; } @@ -967,30 +1006,27 @@ static int hdcp2_app_start(struct hdcp2_handle *handle) rc = hdcp2_app_load(handle); if (rc) - goto exit; + goto error; if (!handle->legacy_app) { rc = hdcp2_app_session_init(handle); if (rc) - goto exit; + goto error; } if (handle->tx_init == NULL) { pr_err("invalid txmtr init function pointer\n"); rc = -EINVAL; - goto exit; + goto error; } rc = handle->tx_init(handle); if (rc) - goto exit; + goto error; - if (!handle->legacy_app) { + if (!handle->legacy_app) rc = hdcp2_app_start_auth(handle); - if (rc) - goto exit; - } -exit: +error: return rc; } @@ -1021,7 +1057,8 @@ static int hdcp2_app_process_msg(struct hdcp2_handle *handle) if (!handle->app_data.request.data) { pr_err("invalid request buffer\n"); - return -EINVAL; + rc = -EINVAL; + goto error; } req_buf->msglen = handle->app_data.request.length; @@ -1029,7 +1066,7 @@ static int hdcp2_app_process_msg(struct hdcp2_handle *handle) rc = hdcp2_app_process_cmd(rcvd_msg); if (rc) - goto exit; + goto error; /* check if it's a repeater */ if (rsp_buf->flag == HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) @@ -1040,7 +1077,7 @@ static int hdcp2_app_process_msg(struct hdcp2_handle *handle) handle->app_data.response.data = rsp_buf->msg; handle->app_data.response.length = rsp_buf->msglen; handle->app_data.timeout = rsp_buf->timeout; -exit: +error: return rc; } @@ -1052,12 +1089,12 @@ static int hdcp2_app_timeout(struct hdcp2_handle *handle) rc = hdcp2_app_process_cmd(send_timeout); if (rc) - goto exit; + goto error; handle->app_data.response.data = rsp_buf->message; handle->app_data.response.length = rsp_buf->msglen; handle->app_data.timeout = rsp_buf->timeout; -exit: +error: return rc; } @@ -1069,7 +1106,7 @@ static int hdcp2_app_enable_encryption(struct hdcp2_handle *handle) /* * wait at least 200ms before enabling encryption - * as per hdcp2p2 sepcifications. + * as per hdcp2p2 specifications. */ msleep(SLEEP_SET_HW_KEY_MS); @@ -1077,13 +1114,54 @@ static int hdcp2_app_enable_encryption(struct hdcp2_handle *handle) rc = hdcp2_app_process_cmd(set_hw_key); if (rc) - goto exit; + goto error; handle->hdcp_state |= HDCP_STATE_AUTHENTICATED; + pr_debug("success\n"); + return rc; +error: + return rc; +} + +int hdcp2_force_encryption_utility(struct hdcp2_handle *handle, uint32_t enable) +{ + int rc = 0; + + hdcp2_app_init_var(force_encryption); + if (handle->hdcp_state == HDCP_STATE_AUTHENTICATED) + msleep(SLEEP_FORCE_ENCRYPTION_MS); + + req_buf->ctxhandle = handle->tz_ctxhandle; + req_buf->enable = enable; + + rc = hdcp2_app_process_cmd(force_encryption); + if (rc || (rsp_buf->commandid != hdcp_cmd_force_encryption)) + goto error; + + return 0; +error: + return rc; +} + +int hdcp2_force_encryption(void *ctx, uint32_t enable) +{ + int rc = 0; + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + rc = hdcp2_force_encryption_utility(handle, enable); + if (rc) + goto error; + pr_debug("success\n"); return 0; -exit: +error: pr_err("failed, rc=%d\n", rc); return rc; } @@ -1098,12 +1176,12 @@ static int hdcp2_app_query_stream(struct hdcp2_handle *handle) rc = hdcp2_app_process_cmd(query_stream_type); if (rc) - goto exit; + goto error; handle->app_data.response.data = rsp_buf->msg; handle->app_data.response.length = rsp_buf->msglen; handle->app_data.timeout = rsp_buf->timeout; -exit: +error: return rc; } @@ -1150,11 +1228,11 @@ int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, case HDCP2_CMD_STOP: rc = hdcp2_app_stop(handle); default: - goto exit; + goto error; } if (rc) - goto exit; + goto error; handle->app_data.request.data = hdcp2_get_recv_buf(handle); @@ -1164,7 +1242,7 @@ int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, app_data->response.length = handle->app_data.response.length; app_data->timeout = handle->app_data.timeout; app_data->repeater_flag = handle->app_data.repeater_flag; -exit: +error: return rc; } @@ -1174,10 +1252,10 @@ void *hdcp2_init(u32 device_type) handle = kzalloc(sizeof(struct hdcp2_handle), GFP_KERNEL); if (!handle) - goto exit; + goto error; handle->device_type = device_type; -exit: +error: return handle; } @@ -1208,25 +1286,25 @@ bool hdcp1_feature_supported(void *data) if (!handle) { pr_err("invalid input\n"); - goto exit; + goto error; } if (handle->feature_supported) { supported = true; - goto exit; + goto error; } rc = qseecom_start_app(&handle->qseecom_handle, HDCP1_APP_NAME, QSEECOM_SBUFF_SIZE); if (rc) { pr_err("qseecom_start_app failed %d\n", rc); - goto exit; + goto error; } pr_debug("HDCP 1.x supported\n"); handle->feature_supported = true; supported = true; -exit: +error: return supported; } diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 11ef17750881af4878894a8f4723826d1fd77690..34a86fefa520aa54dc8c5099694e528c8cd39655 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -8787,6 +8787,7 @@ static int qseecom_probe(struct platform_device *pdev) static int qseecom_remove(struct platform_device *pdev) { struct qseecom_registered_kclient_list *kclient = NULL; + struct qseecom_registered_kclient_list *kclient_tmp = NULL; unsigned long flags = 0; int ret = 0; int i; @@ -8796,10 +8797,8 @@ static int qseecom_remove(struct platform_device *pdev) atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY); spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); - list_for_each_entry(kclient, &qseecom.registered_kclient_list_head, - list) { - if (!kclient) - goto exit_irqrestore; + list_for_each_entry_safe(kclient, kclient_tmp, + &qseecom.registered_kclient_list_head, list) { /* Break the loop if client handle is NULL */ if (!kclient->handle) @@ -8823,7 +8822,7 @@ static int qseecom_remove(struct platform_device *pdev) kzfree(kclient->handle); exit_free_kclient: kzfree(kclient); -exit_irqrestore: + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); if (qseecom.qseos_version > QSEEE_VERSION_00) diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 9047c0a529b28221da8458668cd0673c0c5144d0..efd733472a3531804225c5515ade4f4cf69fd707 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b) } } - if (b->batch_page) { - vunmap(b->batch_page); - b->batch_page = NULL; - } - - if (b->page) { - __free_page(b->page); - b->page = NULL; - } + /* Clearing the batch_page unconditionally has no adverse effect */ + free_page((unsigned long)b->batch_page); + b->batch_page = NULL; } /* @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = { static bool vmballoon_init_batching(struct vmballoon *b) { - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP); - if (!b->page) - return false; + struct page *page; - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL); - if (!b->batch_page) { - __free_page(b->page); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) return false; - } + b->batch_page = page_address(page); return true; } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a34bbb100f278b2abf85d78f569d4e4f49c48216..a84c2ce7161aa52b9b6bda5ecebc810f39d2e734 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2225,7 +2225,7 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, } mqrq->areq.mrq = &brq->mrq; - mqrq->areq.mrq->req = mqrq->req; + mqrq->areq.mrq->req = req; mqrq->areq.err_check = mmc_blk_err_check; } @@ -2388,14 +2388,14 @@ static int mmc_blk_cmdq_switch(struct mmc_card *card, static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd( struct mmc_queue_req *mqrq, struct mmc_queue *mq) { - struct request *req = mqrq->req; + struct request *req = mmc_queue_req_to_req(mqrq); struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req; memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req)); cmdq_req->mrq.data = NULL; cmdq_req->cmd_flags = req->cmd_flags; - cmdq_req->mrq.req = mqrq->req; + cmdq_req->mrq.req = req; req->special = mqrq; cmdq_req->cmdq_req_flags |= DCMD; cmdq_req->mrq.cmdq_req = cmdq_req; @@ -2418,8 +2418,7 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq, set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state); - active_mqrq = &mq->mqrq_cmdq[req->tag]; - active_mqrq->req = req; + active_mqrq = req_to_mmc_queue_req(req); cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq); cmdq_req->cmdq_req_flags |= QBR; @@ -2553,7 +2552,7 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep( struct mmc_queue_req *mqrq, struct mmc_queue *mq) { struct mmc_card *card = mq->card; - struct request *req = mqrq->req; + struct request *req = mmc_queue_req_to_req(mqrq); struct mmc_blk_data *md = mq->blkdata; bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR); bool do_data_tag; @@ -2615,14 +2614,14 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep( } mqrq->cmdq_req.cmd_flags = req->cmd_flags; - mqrq->cmdq_req.mrq.req = mqrq->req; + mqrq->cmdq_req.mrq.req = req; mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req; mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data; - mqrq->req->special = mqrq; + req->special = mqrq; pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d\n", mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq, - mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz)); + req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz)); pr_debug("%s: %s: mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n", mmc_hostname(card->host), __func__, cmdq_rq, cmdq_rq->blk_addr, @@ -2658,8 +2657,7 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req) BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs)); BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs)); - active_mqrq = &mq->mqrq_cmdq[req->tag]; - active_mqrq->req = req; + active_mqrq = req_to_mmc_queue_req(req); mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq); @@ -2722,8 +2720,7 @@ int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req) set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state); - active_mqrq = &mq->mqrq_cmdq[req->tag]; - active_mqrq->req = req; + active_mqrq = req_to_mmc_queue_req(req); cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq); cmdq_req->cmdq_req_flags |= QBR; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 44746757111a1cb020e224db0274eb6623474664..91b50be61ac26fad699e496f23d05ba18c79cb3b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -3249,7 +3249,22 @@ int mmc_resume_bus(struct mmc_host *host) if (host->bus_ops && !host->bus_dead && host->card) { mmc_power_up(host, host->card->ocr); BUG_ON(!host->bus_ops->resume); - host->bus_ops->resume(host); + err = host->bus_ops->resume(host); + if (err) { + pr_err("%s: %s: resume failed: %d\n", + mmc_hostname(host), __func__, err); + /* + * If we have cd-gpio based detection mechanism and + * deferred resume is supported, we will not detect + * card removal event when system is suspended. So if + * resume fails after a system suspend/resume, + * schedule the work to detect card presence. + */ + if (mmc_card_is_removable(host) && + !(host->caps & MMC_CAP_NEEDS_POLL)) { + mmc_detect_change(host, 0); + } + } if (mmc_card_cmdq(host->card)) { err = mmc_cmdq_halt(host, false); if (err) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 1b91e693ef4623cad96b3aaf26f16b29dd359bd6..67fa942ec544968f02b1fdbd89dae60533632248 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -220,43 +220,23 @@ static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card) { - int i, ret = 0; + int ret = 0; /* one slot is reserved for dcmd requests */ int q_depth = card->ext_csd.cmdq_depth - 1; card->cmdq_init = false; if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) { - ret = -ENOTSUPP; - goto out; + return -ENOTSUPP; } init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq); init_waitqueue_head(&card->host->cmdq_ctx.wait); - mq->mqrq_cmdq = kcalloc(q_depth, - sizeof(struct mmc_queue_req), GFP_KERNEL); - if (!mq->mqrq_cmdq) { - ret = -ENOMEM; - goto out; - } - - /* sg is allocated for data request slots only */ - for (i = 0; i < q_depth; i++) { - mq->mqrq_cmdq[i].sg = - mmc_alloc_sg(card->host->max_segs, GFP_KERNEL); - if (ret) { - pr_warn("%s: unable to allocate cmdq sg of size %d\n", - mmc_card_name(card), - card->host->max_segs); - goto free_mqrq_sg; - } - } - ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO); if (ret) { pr_warn("%s: unable to allocate cmdq tags %d\n", mmc_card_name(card), q_depth); - goto free_mqrq_sg; + return ret; } blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done); @@ -268,30 +248,14 @@ int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card) blk_queue_rq_timeout(mq->queue, 120 * HZ); card->cmdq_init = true; - goto out; - -free_mqrq_sg: - for (i = 0; i < q_depth; i++) - kfree(mq->mqrq_cmdq[i].sg); - kfree(mq->mqrq_cmdq); - mq->mqrq_cmdq = NULL; -out: return ret; } void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card) { - int i; - int q_depth = card->ext_csd.cmdq_depth - 1; - blk_free_tags(mq->queue->queue_tags); mq->queue->queue_tags = NULL; blk_queue_free_tags(mq->queue); - - for (i = 0; i < q_depth; i++) - kfree(mq->mqrq_cmdq[i].sg); - kfree(mq->mqrq_cmdq); - mq->mqrq_cmdq = NULL; } static int mmc_queue_thread(void *d) @@ -430,9 +394,21 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, mq->card = card; if (card->ext_csd.cmdq_support && (area_type == MMC_BLK_DATA_AREA_MAIN)) { - mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock); + mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) return -ENOMEM; + mq->queue->queue_lock = lock; + mq->queue->request_fn = mmc_cmdq_dispatch_req; + mq->queue->init_rq_fn = mmc_init_request; + mq->queue->exit_rq_fn = mmc_exit_request; + mq->queue->cmd_size = sizeof(struct mmc_queue_req); + mq->queue->queuedata = mq; + ret = blk_init_allocated_queue(mq->queue); + if (ret) { + blk_cleanup_queue(mq->queue); + return ret; + } + mmc_cmdq_setup_queue(mq, card); ret = mmc_cmdq_init(mq, card); if (ret) { @@ -444,7 +420,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, /* hook for pm qos cmdq init */ if (card->host->cmdq_ops->init) card->host->cmdq_ops->init(card->host); - mq->queue->queuedata = mq; mq->thread = kthread_run(mmc_cmdq_thread, mq, "mmc-cmdqd/%d%s", host->index, diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index a8dccc53c2ed15b8ccf2caad8fc455d37f189726..7171761123fe011adcc8f714fb15c49be72f4e8c 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -48,7 +48,6 @@ enum mmc_drv_op { }; struct mmc_queue_req { - struct request *req; struct mmc_blk_request brq; struct scatterlist *sg; struct mmc_async_req areq; @@ -75,17 +74,11 @@ struct mmc_queue { bool asleep; struct mmc_blk_data *blkdata; struct request_queue *queue; - struct mmc_queue_req mqrq[2]; - struct mmc_queue_req *mqrq_cur; - struct mmc_queue_req *mqrq_prev; - struct mmc_queue_req *mqrq_cmdq; struct work_struct cmdq_err_work; struct completion cmdq_pending_req_done; struct completion cmdq_shutdown_complete; struct request *cmdq_req_peeked; - enum mmc_blk_status (*err_check_fn)(struct mmc_card *, - struct mmc_async_req *); void (*cmdq_shutdown)(struct mmc_queue *); /* * FIXME: this counter is not a very reliable way of keeping diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 0630c0caa1a44e742a829e719fecb358bbce232f..5924f59773595ca5e584339f0325da4291798dd3 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1303,6 +1303,8 @@ static int _mmc_sd_resume(struct mmc_host *host) mmc_hostname(host), __func__, err); mmc_card_set_removed(host->card); mmc_detect_change(host, msecs_to_jiffies(200)); + } else if (err) { + goto out; } mmc_card_clr_suspended(host->card); diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c index 4f210dd891dc7d550a27cbefcd21afd4dd500fb4..eade0cd87a0b858f99e0fa49e178ea6cf6eefc02 100644 --- a/drivers/mmc/host/cmdq_hci.c +++ b/drivers/mmc/host/cmdq_hci.c @@ -359,7 +359,7 @@ static int cmdq_host_alloc_tdl(struct cmdq_host *cq_host) if (!cq_host->desc_base || !cq_host->trans_desc_base) return -ENOMEM; - pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", + pr_debug("desc-base: 0x%pK trans-base: 0x%pK\n desc_dma 0x%llx trans_dma: 0x%llx\n", cq_host->desc_base, cq_host->trans_desc_base, (unsigned long long)cq_host->desc_dma_base, (unsigned long long) cq_host->trans_desc_dma_base); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index bdfa411e13ad1a14a63f013a305a5f713fb08f09..99a2fb0f7c08d7547842e271fdecc3ceb71448e0 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -4199,11 +4199,10 @@ void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host, group->latency = PM_QOS_DEFAULT_VALUE; pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY, group->latency); - pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n", + pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n", __func__, i, group->req.cpus_affine.bits[0], - group->latency, - &latency[i].latency[SDHCI_PERFORMANCE_MODE]); + group->latency); } msm_host->pm_qos_prev_cpu = -1; msm_host->pm_qos_group_enable = true; @@ -4769,8 +4768,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto vreg_deinit; } writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem); - dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n", - &tlmm_memres->start, readl_relaxed(tlmm_mem)); } /* diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 77d0c7de6b2e54b5ac793fa2b6e4a0f312fff7ab..7c0898c7b1c01d8863e7391ba6ee229bc1398280 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1597,6 +1597,7 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; + if (host->clock) sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) @@ -3158,13 +3159,13 @@ static void sdhci_adma_show_error(struct sdhci_host *host) struct sdhci_adma2_64_desc *dma_desc = desc; if (host->flags & SDHCI_USE_64_BIT_DMA) - DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", + DBG("%pK: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", desc, le32_to_cpu(dma_desc->addr_hi), le32_to_cpu(dma_desc->addr_lo), le16_to_cpu(dma_desc->len), le16_to_cpu(dma_desc->cmd)); else - DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", + DBG("%pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", desc, le32_to_cpu(dma_desc->addr_lo), le16_to_cpu(dma_desc->len), le16_to_cpu(dma_desc->cmd)); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index d524a64ed7546436e9efa0423e6ca81bd62a04d6..ac76c10c042f532f31d02eef45a458c969d27a7e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, if (time_after(jiffies, timeo) && !chip_ready(map, adr)) break; - if (chip_ready(map, adr)) { + if (chip_good(map, adr, datum)) { xip_enable(map, chip, adr); goto op_done; } @@ -2535,7 +2535,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) struct ppb_lock { struct flchip *chip; - loff_t offset; + unsigned long adr; int locked; }; @@ -2553,8 +2553,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, unsigned long timeo; int ret; + adr += chip->start; mutex_lock(&chip->mutex); - ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; @@ -2572,8 +2573,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { chip->state = FL_LOCKING; - map_write(map, CMD(0xA0), chip->start + adr); - map_write(map, CMD(0x00), chip->start + adr); + map_write(map, CMD(0xA0), adr); + map_write(map, CMD(0x00), adr); } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { /* * Unlocking of one specific sector is not supported, so we @@ -2611,7 +2612,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map, map_write(map, CMD(0x00), chip->start); chip->state = FL_READY; - put_chip(map, chip, adr + chip->start); + put_chip(map, chip, adr); mutex_unlock(&chip->mutex); return ret; @@ -2668,9 +2669,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, * sectors shall be unlocked, so lets keep their locking * status at "unlocked" (locked=0) for the final re-locking. */ - if ((adr < ofs) || (adr >= (ofs + len))) { + if ((offset < ofs) || (offset >= (ofs + len))) { sect[sectors].chip = &cfi->chips[chipnum]; - sect[sectors].offset = offset; + sect[sectors].adr = adr; sect[sectors].locked = do_ppb_xxlock( map, &cfi->chips[chipnum], adr, 0, DO_XXLOCK_ONEBLOCK_GETLOCK); @@ -2684,6 +2685,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, i++; if (adr >> cfi->chipshift) { + if (offset >= (ofs + len)) + break; adr = 0; chipnum++; @@ -2714,7 +2717,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, */ for (i = 0; i < sectors; i++) { if (sect[i].locked) - do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, DO_XXLOCK_ONEBLOCK_LOCK); } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 68d6f41588687d8a11c1f0524b898b56e7c7a809..21d316fd516e5a32b854c3e4388c8165c33d3d58 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -1082,6 +1082,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) if (ubi->bgt_thread) kthread_stop(ubi->bgt_thread); +#ifdef CONFIG_MTD_UBI_FASTMAP + cancel_work_sync(&ubi->fm_work); +#endif ubi_debugfs_exit_dev(ubi); uif_close(ubi); diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 388e46be6ad92805f2a6633da6960d8c56b1b837..d0884bd9d9553ca2cd38e52d0c673f0f2a59b4b3 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, return err; } +#ifdef CONFIG_MTD_UBI_FASTMAP +/** + * check_mapping - check and fixup a mapping + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @pnum: physical eraseblock number + * + * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap + * operations, if such an operation is interrupted the mapping still looks + * good, but upon first read an ECC is reported to the upper layer. + * Normaly during the full-scan at attach time this is fixed, for Fastmap + * we have to deal with it while reading. + * If the PEB behind a LEB shows this symthom we change the mapping to + * %UBI_LEB_UNMAPPED and schedule the PEB for erasure. + * + * Returns 0 on success, negative error code in case of failure. + */ +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + int *pnum) +{ + int err; + struct ubi_vid_io_buf *vidb; + + if (!ubi->fast_attach) + return 0; + + vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); + if (!vidb) + return -ENOMEM; + + err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0); + if (err > 0 && err != UBI_IO_BITFLIPS) { + int torture = 0; + + switch (err) { + case UBI_IO_FF: + case UBI_IO_FF_BITFLIPS: + case UBI_IO_BAD_HDR: + case UBI_IO_BAD_HDR_EBADMSG: + break; + default: + ubi_assert(0); + } + + if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS) + torture = 1; + + down_read(&ubi->fm_eba_sem); + vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; + up_read(&ubi->fm_eba_sem); + ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture); + + *pnum = UBI_LEB_UNMAPPED; + } else if (err < 0) { + ubi_err(ubi, "unable to read VID header back from PEB %i: %i", + *pnum, err); + + goto out_free; + } + + err = 0; + +out_free: + ubi_free_vid_buf(vidb); + + return err; +} +#else +static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, + int *pnum) +{ + return 0; +} +#endif + /** * ubi_eba_read_leb - read data. * @ubi: UBI device description object @@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, return err; pnum = vol->eba_tbl->entries[lnum].pnum; - if (pnum < 0) { + if (pnum >= 0) { + err = check_mapping(ubi, vol, lnum, &pnum); + if (err < 0) + goto out_unlock; + } + + if (pnum == UBI_LEB_UNMAPPED) { /* * The logical eraseblock is not mapped, fill the whole buffer * with 0xFF bytes. The exception is static volumes for which @@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, return err; pnum = vol->eba_tbl->entries[lnum].pnum; + if (pnum >= 0) { + err = check_mapping(ubi, vol, lnum, &pnum); + if (err < 0) + goto out; + } + if (pnum >= 0) { dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", len, offset, vol_id, lnum, pnum); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 668b46202507ce7f0b59a998d2b34e7d92cc2bbc..23a6986d512b4c75ffd22f5ca51b58a8609e5f75 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1505,6 +1505,7 @@ int ubi_thread(void *u) } dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); + ubi->thread_enabled = 0; return 0; } @@ -1514,9 +1515,6 @@ int ubi_thread(void *u) */ static void shutdown_work(struct ubi_device *ubi) { -#ifdef CONFIG_MTD_UBI_FASTMAP - flush_work(&ubi->fm_work); -#endif while (!list_empty(&ubi->works)) { struct ubi_work *wrk; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 5931aa2fe9974c1d20934be9d9ade6451af0cfcc..61084ba69a99f2ce2b1992d22608539c5675eba4 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond, slave->dev->name); rcu_assign_pointer(bond->primary_slave, slave); strcpy(bond->params.primary, slave->dev->name); + bond->force_primary = true; bond_select_active_slave(bond); goto out; } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 365a8cc6240506ed165bc9141daf15ab29018541..b6a681bce4000089faf6fe766bc24010b6b9050f 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -604,7 +604,7 @@ void can_bus_off(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - netdev_dbg(dev, "bus-off\n"); + netdev_info(dev, "bus-off\n"); netif_carrier_off(dev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index c93e5613d4cca3b5e21e6fd91d3601c1db8ea620..cc658a29cc33e9e60bfdb4db42bdb35bef215d07 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -310,6 +310,8 @@ int aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->hw_features |= aq_hw_caps->hw_features; self->ndev->features = aq_hw_caps->hw_features; + self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 7dd83d0ef0a0be93e63576d1ba1c5c9f30cdb63f..22243c480a05341238850e71b9bd196bc705a064 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, * slots for the highest priority. */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index aef40f02c77fe12885db1fb42dde28f3e990de65..a03a32a4ffca4928bf4132aa31a33c28f9d48c98 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2703,11 +2703,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 64-bit first, and + * limitation for the device. Try 47-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2721,10 +2721,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 64); + "for consistent allocations, aborting\n", 47); goto err_out_release_regions; } using_dac = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1b03c32afc1f00262971558110744714364a2e93..7e2b70c2bba309a23dcf136abab6f534b1bf6ee4 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3294,7 +3294,9 @@ void be_detect_error(struct be_adapter *adapter) if ((val & POST_STAGE_FAT_LOG_START) != POST_STAGE_FAT_LOG_START && (val & POST_STAGE_ARMFW_UE) - != POST_STAGE_ARMFW_UE) + != POST_STAGE_ARMFW_UE && + (val & POST_STAGE_RECOVERABLE_ERR) + != POST_STAGE_RECOVERABLE_ERR) return; } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 3e62692af0119e0e4f3dfe85d74457500951f19b..fa5b30f547f6620a6e761860be6afa341dc185c8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -87,7 +87,7 @@ do { \ #define HNAE_AE_REGISTER 0x1 -#define RCB_RING_NAME_LEN 16 +#define RCB_RING_NAME_LEN (IFNAMSIZ + 4) #define HNAE_LOWEST_LATENCY_COAL_PARAM 30 #define HNAE_LOW_LATENCY_COAL_PARAM 80 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 8a85217845ae434f7205b884c2f80361d203cbeb..cf6a245db6d50eca97f0dad7a397d725cd488de2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3413,6 +3413,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + /* Reset PHY */ if (!hw->phy.reset_disable && hw->phy.ops.reset) hw->phy.ops.reset(hw); diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 22a3bfe1ed8f56806f08963a70bf5b69b0457690..73419224367aac56dd8c103b56449de8c793be22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; - spin_lock(&qp_table->lock); + spin_lock_irq(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); - spin_unlock(&qp_table->lock); + spin_unlock_irq(&qp_table->lock); return qp; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3476f594c19562430c205d140b9fcd323a553af9..8285e6d24f301564ea46d63f6a70e3d5a17c2069 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -635,6 +635,45 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb) return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } +static __be32 mlx5e_get_fcs(struct sk_buff *skb) +{ + int last_frag_sz, bytes_in_prev, nr_frags; + u8 *fcs_p1, *fcs_p2; + skb_frag_t *last_frag; + __be32 fcs_bytes; + + if (!skb_is_nonlinear(skb)) + return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + + nr_frags = skb_shinfo(skb)->nr_frags; + last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; + last_frag_sz = skb_frag_size(last_frag); + + /* If all FCS data is in last frag */ + if (last_frag_sz >= ETH_FCS_LEN) + return *(__be32 *)(skb_frag_address(last_frag) + + last_frag_sz - ETH_FCS_LEN); + + fcs_p2 = (u8 *)skb_frag_address(last_frag); + bytes_in_prev = ETH_FCS_LEN - last_frag_sz; + + /* Find where the other part of the FCS is - Linear or another frag */ + if (nr_frags == 1) { + fcs_p1 = skb_tail_pointer(skb); + } else { + skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; + + fcs_p1 = skb_frag_address(prev_frag) + + skb_frag_size(prev_frag); + } + fcs_p1 -= bytes_in_prev; + + memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); + memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); + + return fcs_bytes; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -653,6 +692,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (is_first_ethertype_ip(skb)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + if (unlikely(netdev->features & NETIF_F_RXFCS)) + skb->csum = csum_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb)); rq->stats.csum_complete++; return; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index f88ff3f4b6612912d33b75b7570641b8e98e8f67..35d14af235f793890406aa6950e4c927a5052bbe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -277,8 +277,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, if ((*reg & mask) == val) return 0; - if (msleep_interruptible(25)) - return -ERESTARTSYS; + msleep(25); if (time_after(start_time, wait_until)) return -ETIMEDOUT; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 629bfa0cd3f03d6627c8c4aa10ef836bc3e98671..27ba476f761d4c2ad11094ff44516104f21a330e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -77,7 +77,7 @@ #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ -#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 085338990f49cabd6062e3cb4f977277d0243c7c..c5452b445c37c142de2eb3cc083ec1a2516a366d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) void qed_l2_setup(struct qed_hwfn *p_hwfn) { - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; mutex_init(&p_hwfn->p_l2_info->lock); @@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn) { u32 i; - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; if (!p_hwfn->p_l2_info) diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 50b142fad6b8206c806a91248a0b7ac9f5b196d7..1900bf7e67d1297dc9b24648e99d0ec50f779305 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev) } if (!found) { - event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); + event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); if (!event_node) { DP_NOTICE(edev, "qedr: Could not allocate memory for rdma work\n"); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index bbe23053cb33ff7d46d5a7a393b040bc495532e5..b0b0dbfa2f226016617b17a626533d86ea42a83d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -70,6 +70,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, if (port->nr_rmnet_devs) return -EINVAL; + rmnet_map_cmd_exit(port); rmnet_map_tx_aggregate_exit(port); kfree(port); @@ -111,6 +112,7 @@ static int rmnet_register_real_device(struct net_device *real_dev) INIT_HLIST_HEAD(&port->muxed_ep[entry]); rmnet_map_tx_aggregate_init(port); + rmnet_map_cmd_init(port); netdev_dbg(real_dev, "registered with rmnet\n"); return 0; @@ -256,6 +258,8 @@ static void rmnet_force_unassociate_device(struct net_device *dev) rcu_read_unlock(); unregister_netdevice_many(&list); + qmi_rmnet_qmi_exit(port->qmi_info, port); + rmnet_unregister_real_device(real_dev, port); } @@ -501,7 +505,7 @@ void *rmnet_get_rmnet_port(struct net_device *dev) struct rmnet_priv *priv; if (dev) { - priv = (struct rmnet_priv *)netdev_priv(dev); + priv = netdev_priv(dev); return (void *)rmnet_get_port(priv->real_dev); } @@ -509,13 +513,20 @@ void *rmnet_get_rmnet_port(struct net_device *dev) } EXPORT_SYMBOL(rmnet_get_rmnet_port); -struct net_device *rmnet_get_rmnet_dev(void *port, uint8_t mux_id) +struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id) { struct rmnet_endpoint *ep; - ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id); - if (ep) - return ep->egress_dev; + if (port) { + struct net_device *dev; + + ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id); + if (ep) { + dev = ep->egress_dev; + + return dev; + } + } return NULL; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 2dff6f6885822027fda01b15dc432d0bd5f22c09..8749905b19ccce840ddff6436245b0de6124c240 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -27,6 +27,20 @@ struct rmnet_endpoint { struct hlist_node hlnode; }; +struct rmnet_port_priv_stats { + u64 dl_hdr_last_seq; + u64 dl_hdr_last_bytes; + u64 dl_hdr_last_pkts; + u64 dl_hdr_last_flows; + u64 dl_hdr_count; + u64 dl_hdr_total_bytes; + u64 dl_hdr_total_pkts; + u64 dl_hdr_avg_bytes; + u64 dl_hdr_avg_pkts; + u64 dl_trl_last_seq; + u64 dl_trl_count; +}; + /* One instance of this structure is instantiated for each real_dev associated * with rmnet. */ @@ -52,6 +66,10 @@ struct rmnet_port { struct hrtimer hrtimer; void *qmi_info; + + /* dl marker elements */ + struct list_head dl_list; + struct rmnet_port_priv_stats stats; }; extern struct rtnl_link_ops rmnet_link_ops; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 7350852311be8e4d1bd0ac064d99a86c43322c96..6bb3f6f8773e33f6252a822d9199b58fd24f4cfa 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -92,6 +92,11 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + if (!rmnet_map_flow_command(skb, port)) + return; + } + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 13d5e1af77427e0de5f6737550127ac4d968164e..c0179b901b041789c9f3360e58bf4671b2148d87 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -34,6 +34,8 @@ enum rmnet_map_commands { RMNET_MAP_COMMAND_NONE, RMNET_MAP_COMMAND_FLOW_DISABLE, RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, /* These should always be the last 2 elements */ RMNET_MAP_COMMAND_UNKNOWN, RMNET_MAP_COMMAND_ENUM_LENGTH @@ -63,6 +65,60 @@ struct rmnet_map_ul_csum_header { u16 csum_enabled:1; } __aligned(1); +struct rmnet_map_control_command_header { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; +} __aligned(1); + +struct rmnet_map_flow_info_le { + __be32 mux_id; + __be32 flow_id; + __be32 bytes; + __be32 pkts; +} __aligned(1); + +struct rmnet_map_flow_info_be { + u32 mux_id; + u32 flow_id; + u32 bytes; + u32 pkts; +} __aligned(1); + +struct rmnet_map_dl_ind_hdr { + union { + struct { + u32 seq; + u32 bytes; + u32 pkts; + u32 flows; + struct rmnet_map_flow_info_le flow[0]; + } le __aligned(1); + struct { + __be32 seq; + __be32 bytes; + __be32 pkts; + __be32 flows; + struct rmnet_map_flow_info_be flow[0]; + } be __aligned(1); + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind_trl { + union { + __be32 seq_be; + u32 seq_le; + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind { + void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); + void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); + struct list_head list; +}; + #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ (Y)->data)->mux_id) #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ @@ -95,5 +151,11 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); void rmnet_map_tx_aggregate_init(struct rmnet_port *port); void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); - +int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port); +void rmnet_map_cmd_init(struct rmnet_port *port); +int rmnet_map_dl_ind_register(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind); +int rmnet_map_dl_ind_deregister(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind); +void rmnet_map_cmd_exit(struct rmnet_port *port); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 56a93df962e6a66c83653e720b4c4571679fdb73..0a7530548a39f3f37c00cf103d6cff0ae18a843b 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -16,6 +16,17 @@ #include "rmnet_private.h" #include "rmnet_vnd.h" +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) @@ -67,7 +78,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; - int xmit_status; + struct net_device *dev = skb->dev; if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) skb_trim(skb, @@ -78,9 +89,77 @@ static void rmnet_map_send_ack(struct sk_buff *skb, cmd = RMNET_MAP_GET_CMD_START(skb); cmd->cmd_type = type & 0x03; - netif_tx_lock(skb->dev); - xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); - netif_tx_unlock(skb->dev); + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + +static void rmnet_map_dl_hdr_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_hdr *dlhdr) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_hdr_handler(dlhdr); +} + +static void rmnet_map_dl_trl_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_trl *dltrl) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_trl_handler(dltrl); +} + +static void rmnet_map_process_flow_start(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_dl_ind_hdr *dlhdr; + + if (skb->len < RMNET_DL_IND_HDR_SIZE) + return; + + skb_pull(skb, RMNET_MAP_CMD_SIZE); + + dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data; + + port->stats.dl_hdr_last_seq = dlhdr->le.seq; + port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; + port->stats.dl_hdr_last_pkts = dlhdr->le.pkts; + port->stats.dl_hdr_last_flows = dlhdr->le.flows; + port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes; + port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts; + port->stats.dl_hdr_count++; + + if (unlikely(!(port->stats.dl_hdr_count))) + port->stats.dl_hdr_count = 1; + + port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes / + port->stats.dl_hdr_count; + + port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts / + port->stats.dl_hdr_count; + + rmnet_map_dl_hdr_notify(port, dlhdr); +} + +static void rmnet_map_process_flow_end(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_dl_ind_trl *dltrl; + + if (skb->len < RMNET_DL_IND_TRL_SIZE) + return; + + skb_pull(skb, RMNET_MAP_CMD_SIZE); + + dltrl = (struct rmnet_map_dl_ind_trl *)skb->data; + + port->stats.dl_trl_last_seq = dltrl->seq_le; + port->stats.dl_trl_count++; + + rmnet_map_dl_trl_notify(port, dltrl); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd @@ -112,3 +191,72 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) if (rc == RMNET_MAP_COMMAND_ACK) rmnet_map_send_ack(skb, rc, port); } + +int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + + cmd = RMNET_MAP_GET_CMD_START(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_START: + rmnet_map_process_flow_start(skb, port); + break; + + case RMNET_MAP_COMMAND_FLOW_END: + rmnet_map_process_flow_end(skb, port); + break; + + default: + return 1; + } + + consume_skb(skb); + return 0; +} + +void rmnet_map_cmd_exit(struct rmnet_port *port) +{ + struct rmnet_map_dl_ind *tmp, *idx; + + list_for_each_entry_safe(tmp, idx, &port->dl_list, list) + list_del_rcu(&tmp->list); +} + +void rmnet_map_cmd_init(struct rmnet_port *port) +{ + INIT_LIST_HEAD(&port->dl_list); +} + +int rmnet_map_dl_ind_register(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind) +{ + if (!port || !dl_ind || !dl_ind->dl_hdr_handler || + !dl_ind->dl_trl_handler) + return -EINVAL; + + list_add_rcu(&dl_ind->list, &port->dl_list); + + return 0; +} + +int rmnet_map_dl_ind_deregister(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind) +{ + struct rmnet_map_dl_ind *tmp; + + if (!port || !dl_ind) + return -EINVAL; + + list_for_each_entry(tmp, &port->dl_list, list) { + if (tmp == dl_ind) { + list_del_rcu(&dl_ind->list); + goto done; + } + } + +done: + return 0; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 623ddba1494586f5a63e03d3492fe5bd66e16d3e..9cee9597e45efd2cbaecc51bdf9df84cf505f2e8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -23,6 +23,7 @@ #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +#define RMNET_MAP_PKT_COPY_THRESHOLD 64 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, const void *txporthdr) @@ -307,11 +308,34 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, } /* Deaggregates a single packet - * A whole new buffer is allocated for each portion of an aggregated frame. + * A whole new buffer is allocated for each portion of an aggregated frame + * except when a UDP or command packet is received. * Caller should keep calling deaggregate() on the source skb until 0 is * returned, indicating that there are no more packets to deaggregate. Caller * is responsible for freeing the original skb. */ +static int rmnet_validate_clone(struct sk_buff *skb) +{ + if (RMNET_MAP_GET_CD_BIT(skb)) + return 0; + + if (skb->len < RMNET_MAP_PKT_COPY_THRESHOLD) + return 1; + + switch (skb->data[4] & 0xF0) { + case 0x40: + if (((struct iphdr *)&skb->data[4])->protocol == IPPROTO_UDP) + return 0; + break; + case 0x60: + if (((struct ipv6hdr *)&skb->data[4])->nexthdr == IPPROTO_UDP) + return 0; + /* Fall through */ + } + + return 1; +} + struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, struct rmnet_port *port) { @@ -335,13 +359,27 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, if (ntohs(maph->pkt_len) == 0) return NULL; - skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); - if (!skbn) - return NULL; - skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); - skb_put(skbn, packet_len); - memcpy(skbn->data, skb->data, packet_len); + if (rmnet_validate_clone(skb)) { + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, + GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); + skb_put(skbn, packet_len); + memcpy(skbn->data, skb->data, packet_len); + + } else { + skbn = skb_clone(skb, GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_trim(skbn, packet_len); + skbn->truesize = SKB_TRUESIZE(packet_len); + __skb_set_hash(skbn, 0, 0, 0); + } + skb_pull(skb, packet_len); return skbn; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index c30100ced465e718e59a7d9a579ed1ba5a5918db..823337c75ea72a17baad901b4c8d515dd7687f6d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -20,6 +20,7 @@ /* Constants */ #define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31) +#define RMNET_INGRESS_FORMAT_DL_MARKER BIT(30) /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index d644ce998cdde4a3bb9e96f9e0c787ac9969e7c0..37cd96d2a7ff67b07d4fed9149c792be6e1805ae 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -24,6 +24,8 @@ #include "rmnet_vnd.h" #include +#define CREATE_TRACE_POINTS +#include /* RX/TX Fixup */ @@ -62,8 +64,9 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, priv = netdev_priv(dev); if (priv->real_dev) { - rmnet_egress_handler(skb); + trace_rmnet_xmit_skb(skb); qmi_rmnet_burst_fc_check(dev, skb); + rmnet_egress_handler(skb); } else { this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); @@ -170,12 +173,29 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { "Checksum computed in software", }; +static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = { + "DL header last seen sequence", + "DL header last seen bytes", + "DL header last seen packets", + "DL header last seen flows", + "DL header pkts received", + "DL header total bytes received", + "DL header total pkts received", + "DL header average bytes", + "DL header average packets", + "DL trailer last seen sequence", + "DL trailer pkts received", +}; + static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &rmnet_gstrings_stats, sizeof(rmnet_gstrings_stats)); + memcpy(buf + sizeof(rmnet_gstrings_stats), + &rmnet_port_gstrings_stats, + sizeof(rmnet_port_gstrings_stats)); break; } } @@ -184,7 +204,8 @@ static int rmnet_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(rmnet_gstrings_stats); + return ARRAY_SIZE(rmnet_gstrings_stats) + + ARRAY_SIZE(rmnet_port_gstrings_stats); default: return -EOPNOTSUPP; } @@ -195,11 +216,19 @@ static void rmnet_get_ethtool_stats(struct net_device *dev, { struct rmnet_priv *priv = netdev_priv(dev); struct rmnet_priv_stats *st = &priv->stats; + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; - if (!data) + port = rmnet_get_port(priv->real_dev); + + if (!data || !port) return; + stp = &port->stats; + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); + memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp, + ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64)); } static const struct ethtool_ops rmnet_ethtool_ops = { @@ -227,6 +256,10 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->needs_free_netdev = true; rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; + + /* This perm addr will be used as interface identifier by IPv6 */ + rmnet_dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(rmnet_dev->perm_addr); } /* Exposed API */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3a7241c8713cf15530856c3509e42e82b9024741..6890478a085167379d4f2bf2a8ac38a83982dd1a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -123,8 +123,10 @@ static int netvsc_open(struct net_device *net) } rdev = nvdev->extension; - if (!rdev->link_state) + if (!rdev->link_state) { netif_carrier_on(net); + netif_tx_wake_all_queues(net); + } if (vf_netdev) { /* Setting synthetic device up transparently sets diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 3fe8cc5c177eea380255fc1c745754f2b5a221dd..9b27ca264c66b4a4a9845b619327b686dc67b073 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev) return rc; /* make rcal=100, since rdb default is 000 */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00); return 0; } diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 171010eb4d9c5c36da0be9888fb75cc54e136768..8d96c6f048d07fd0efd24e3a6f296f57afaa2e94 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum) /* The register must be written to both the Shadow Register Select and * the Shadow Read Register Selector */ - phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | + phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK | regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); return phy_read(phydev, MII_BCM54XX_AUX_CTL); } diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 7c73808cbbded22bb01b60ef2b616456267f99cc..81cceaa412fe32439a31561416610198c6c6c3e5 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -14,11 +14,18 @@ #ifndef _LINUX_BCM_PHY_LIB_H #define _LINUX_BCM_PHY_LIB_H +#include #include int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); +static inline int bcm_phy_write_exp_sel(struct phy_device *phydev, + u16 reg, u16 val) +{ + return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val); +} + int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 8b33f688ac8a123796da15ba9aefd0f11e2a7f6d..3c5b2a2e2fcc3a6c1b05c2dd7278a0d62177fa38 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv { static void r_rc_cal_reset(struct phy_device *phydev) { /* Reset R_CAL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0010); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); /* Disable Reset R_AL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0000); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); } static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index a3f456b91c99decaeed7b4505e7a7c33e9b59d5c..e9e67c22c8bb41f250f01d55e97b99a52bd19c25 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1409,6 +1409,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) return err; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ temp = phy_read(phydev, MII_88E1318S_PHY_CSIER); temp |= MII_88E1318S_PHY_CSIER_WOL_EIE; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index bfd4ded0a53fb015226d0b03ceb9e5dda9f904e5..773a3fea8f0eb50d29e231b5fa95b49e15549f96 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -777,13 +777,16 @@ static ssize_t tap_put_user(struct tap_queue *q, int total; if (q->flags & IFF_VNET_HDR) { + int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; struct virtio_net_hdr vnet_hdr; + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - tap_is_little_endian(q), true)) + tap_is_little_endian(q), true, + vlan_hlen)) BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 8a222ae5950e46dd9977f3384ad3e7a6cd98fad8..83c59171383731b1067034cbc732e25d0c63f4ce 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team, static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = TEAM_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 27897184b2f69a8f0386adb16d066174a7ffe4b4..e34ea5a2a69246a2566647ab0421334ff1a3b616 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1315,7 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; - preempt_disable(); + local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1338,7 +1338,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (err) goto err_redirect; rcu_read_unlock(); - preempt_enable(); + local_bh_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1360,7 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); - preempt_enable(); + local_bh_enable(); return ERR_PTR(-ENOMEM); } @@ -1373,12 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); rcu_read_unlock(); - preempt_enable(); + local_bh_enable(); return NULL; } rcu_read_unlock(); - preempt_enable(); + local_bh_enable(); return skb; @@ -1386,7 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); - preempt_enable(); + local_bh_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -1560,16 +1560,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct bpf_prog *xdp_prog; int ret; + local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, skb); if (ret != XDP_PASS) { rcu_read_unlock(); + local_bh_enable(); return total_len; } } rcu_read_unlock(); + local_bh_enable(); } rxhash = __skb_get_hash_symmetric(skb); @@ -1649,7 +1652,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, - tun_is_little_endian(tun), true)) { + tun_is_little_endian(tun), true, + vlan_hlen)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 7220cd62071726b171ab2ae792f672d7b513343b..0362acd5cdcaaf2debe7ab13941a06d1504ad21c 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = { */ static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { .description = "CDC MBIM", - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, .bind = cdc_mbim_bind, .unbind = cdc_mbim_unbind, .manage_power = cdc_mbim_manage_power, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 9e1b74590682e1b44242374f34fd628bdd5b1fbb..f5316ab68a0a89e749a7cc2d4d0a3ff5d03b5349 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * accordingly. Otherwise, we should check here. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) - delayed_ndp_size = ctx->max_ndp_size; + delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); else delayed_ndp_size = 0; @@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) /* If requested, put NDP at end of frame. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; - cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size); + cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size); nth16->wNdpIndex = cpu_to_le16(skb_out->len); skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 948611317c9767cf14e02d35a7b775a835cdc2f5..910c46b4776999e9f43251801c0cc65a70d186b2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -632,6 +632,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *data; u32 act; + /* Transient failure which in theory could occur if + * in-flight packets from before XDP was enabled reach + * the receive path after XDP is loaded. + */ + if (unlikely(hdr->hdr.gso_type)) + goto err_xdp; + /* This happens when rx buffer size is underestimated */ if (unlikely(num_buf > 1 || headroom < virtnet_get_headroom(vi))) { @@ -647,14 +654,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_page = page; } - /* Transient failure which in theory could occur if - * in-flight packets from before XDP was enabled reach - * the receive path after XDP is loaded. In practice I - * was not able to create this condition. - */ - if (unlikely(hdr->hdr.gso_type)) - goto err_xdp; - /* Allow consuming headroom but reserve enough space to push * the descriptor on if we get an XDP_TX return code. */ @@ -688,7 +687,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); if (unlikely(xdp_page != page)) - goto err_xdp; + put_page(page); rcu_read_unlock(); goto xdp_xmit; default: @@ -777,7 +776,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); err_skb: put_page(page); - while (--num_buf) { + while (num_buf-- > 1) { buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", @@ -1238,7 +1237,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) hdr = skb_vnet_hdr(skb); if (virtio_net_hdr_from_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev), false)) + virtio_is_little_endian(vi->vdev), false, + 0)) BUG(); if (vi->mergeable_rx_bufs) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 9bb402595b63e17498226f14fd50ce0ad857c9e1..5537982115e0f43cf7a08c7b0db49f2f0ba9babc 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -136,5 +136,6 @@ config CLD_LL_CORE support. source "drivers/net/wireless/cnss_utils/Kconfig" +source "drivers/net/wireless/cnss_genl/Kconfig" endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 3548c468a8c7e85408236d9ecb3b92858f9b6806..328b12251678fcf8eb377bfc0a5c7847b5fbf304 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/ obj-$(CONFIG_CNSS_UTILS) += cnss_utils/ +obj-$(CONFIG_CNSS_GENL) += cnss_genl/ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index eb0895a55b74bdb6a860c47f585844618974a772..0e254f073ca1af507aea2c754523b9c5ad3477cb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -54,6 +54,9 @@ MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true) .max_power = 40, \ } +#define WIL_BRP_ANT_LIMIT_MIN (1) +#define WIL_BRP_ANT_LIMIT_MAX (27) + static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(1, 0), CHAN60G(2, 0), @@ -61,6 +64,49 @@ static struct ieee80211_channel wil_60ghz_channels[] = { /* channel 4 not supported yet */ }; +enum wil_nl_60g_cmd_type { + NL_60G_CMD_FW_WMI, + NL_60G_CMD_DEBUG, + NL_60G_CMD_STATISTICS, + NL_60G_CMD_REGISTER, +}; + +enum wil_nl_60g_evt_type { + NL_60G_EVT_DRIVER_ERROR, + NL_60G_EVT_FW_ERROR, + NL_60G_EVT_FW_WMI, + NL_60G_EVT_DRIVER_SHUTOWN, + NL_60G_EVT_DRIVER_DEBUG_EVENT, +}; + +enum wil_nl_60g_debug_cmd { + NL_60G_DBG_FORCE_WMI_SEND, +}; + +struct wil_nl_60g_send_receive_wmi { + u32 cmd_id; /* enum wmi_command_id or enum wmi_event_id */ + u8 reserved[2]; + u8 dev_id; /* mid */ + u16 buf_len; + u8 buf[0]; +} __packed; + +struct wil_nl_60g_event { + u32 evt_type; /* wil_nl_60g_evt_type */ + u32 buf_len; + u8 reserved[9]; + u8 buf[0]; +} __packed; + +struct wil_nl_60g_debug { /* NL_60G_CMD_DEBUG */ + u32 cmd_id; /* wil_nl_60g_debug_cmd */ +} __packed; + +struct wil_nl_60g_debug_force_wmi { + struct wil_nl_60g_debug hdr; + u32 enable; +} __packed; + /* Vendor id to be used in vendor specific command and events * to user space. * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, @@ -74,17 +120,24 @@ static struct ieee80211_channel wil_60ghz_channels[] = { #define WIL_MAX_RF_SECTORS (128) #define WIL_CID_ALL (0xff) -enum qca_wlan_vendor_attr_rf_sector { +enum qca_wlan_vendor_attr_wil { QCA_ATTR_MAC_ADDR = 6, + QCA_ATTR_FEATURE_FLAGS = 7, + QCA_ATTR_TEST = 8, QCA_ATTR_PAD = 13, QCA_ATTR_TSF = 29, QCA_ATTR_DMG_RF_SECTOR_INDEX = 30, QCA_ATTR_DMG_RF_SECTOR_TYPE = 31, QCA_ATTR_DMG_RF_MODULE_MASK = 32, QCA_ATTR_DMG_RF_SECTOR_CFG = 33, - QCA_ATTR_DMG_RF_SECTOR_MAX, + QCA_ATTR_BRP_ANT_LIMIT_MODE = 38, + QCA_ATTR_BRP_ANT_NUM_LIMIT = 39, + QCA_ATTR_WIL_MAX, }; +#define WIL_ATTR_60G_CMD_TYPE QCA_ATTR_FEATURE_FLAGS +#define WIL_ATTR_60G_BUF QCA_ATTR_TEST + enum qca_wlan_vendor_attr_dmg_rf_sector_type { QCA_ATTR_DMG_RF_SECTOR_TYPE_RX, QCA_ATTR_DMG_RF_SECTOR_TYPE_TX, @@ -107,8 +160,22 @@ enum qca_wlan_vendor_attr_dmg_rf_sector_cfg { QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1 }; +enum qca_wlan_vendor_attr_brp_ant_limit_mode { + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_EFFECTIVE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_FORCE, + QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODES_NUM +}; + +static const struct +nla_policy wil_brp_ant_limit_policy[QCA_ATTR_WIL_MAX + 1] = { + [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN }, + [QCA_ATTR_BRP_ANT_NUM_LIMIT] = { .type = NLA_U8 }, + [QCA_ATTR_BRP_ANT_LIMIT_MODE] = { .type = NLA_U8 }, +}; + static const struct -nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = { +nla_policy wil_rf_sector_policy[QCA_ATTR_WIL_MAX + 1] = { [QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN }, [QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 }, [QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 }, @@ -127,7 +194,14 @@ nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = { [QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 }, }; +static const struct +nla_policy wil_nl_60g_policy[QCA_ATTR_WIL_MAX + 1] = { + [WIL_ATTR_60G_CMD_TYPE] = { .type = NLA_U32 }, + [WIL_ATTR_60G_BUF] = { .type = NLA_BINARY }, +}; + enum qca_nl80211_vendor_subcmds { + QCA_NL80211_VENDOR_SUBCMD_UNSPEC = 0, QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128, QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129, QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130, @@ -141,6 +215,7 @@ enum qca_nl80211_vendor_subcmds { QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140, QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141, QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142, + QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT = 153, }; static int wil_rf_sector_get_cfg(struct wiphy *wiphy, @@ -155,7 +230,11 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, static int wil_rf_sector_set_selected(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len); +static int wil_brp_set_ant_limit(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); /* vendor specific commands */ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { { @@ -230,6 +309,20 @@ static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { WIPHY_VENDOR_CMD_NEED_RUNNING, .doit = wil_rf_sector_set_selected }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_BRP_SET_ANT_LIMIT, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_brp_set_ant_limit + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_UNSPEC, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV, + .doit = wil_nl_60g_handle_cmd + }, }; /* vendor specific events */ @@ -246,6 +339,10 @@ static const struct nl80211_vendor_cmd_info wil_nl80211_vendor_events[] = { .vendor_id = QCA_NL80211_VENDOR_ID, .subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT }, + [QCA_NL80211_VENDOR_EVENT_UNSPEC_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_UNSPEC + }, }; static struct ieee80211_supported_band wil_band_60ghz = { @@ -338,6 +435,86 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type) return -EOPNOTSUPP; } +int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch) +{ + switch (spec_ch) { + case 1: + *wmi_ch = WMI_CHANNEL_1; + break; + case 2: + *wmi_ch = WMI_CHANNEL_2; + break; + case 3: + *wmi_ch = WMI_CHANNEL_3; + break; + case 4: + *wmi_ch = WMI_CHANNEL_4; + break; + case 5: + *wmi_ch = WMI_CHANNEL_5; + break; + case 6: + *wmi_ch = WMI_CHANNEL_6; + break; + case 9: + *wmi_ch = WMI_CHANNEL_9; + break; + case 10: + *wmi_ch = WMI_CHANNEL_10; + break; + case 11: + *wmi_ch = WMI_CHANNEL_11; + break; + case 12: + *wmi_ch = WMI_CHANNEL_12; + break; + default: + return -EINVAL; + } + + return 0; +} + +int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch) +{ + switch (wmi_ch) { + case WMI_CHANNEL_1: + *spec_ch = 1; + break; + case WMI_CHANNEL_2: + *spec_ch = 2; + break; + case WMI_CHANNEL_3: + *spec_ch = 3; + break; + case WMI_CHANNEL_4: + *spec_ch = 4; + break; + case WMI_CHANNEL_5: + *spec_ch = 5; + break; + case WMI_CHANNEL_6: + *spec_ch = 6; + break; + case WMI_CHANNEL_9: + *spec_ch = 9; + break; + case WMI_CHANNEL_10: + *spec_ch = 10; + break; + case WMI_CHANNEL_11: + *spec_ch = 11; + break; + case WMI_CHANNEL_12: + *spec_ch = 12; + break; + default: + return -EINVAL; + } + + return 0; +} + int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct station_info *sinfo) { @@ -353,6 +530,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1082,6 +1261,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, } conn.channel = ch - 1; + if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) + if (wil->force_edmg_channel) { + rc = wil_spec2wmi_ch(wil->force_edmg_channel, + &conn.edmg_channel); + if (rc) + wil_err(wil, + "wmi channel for channel %d not found", + wil->force_edmg_channel); + } + ether_addr_copy(conn.bssid, bss->bssid); ether_addr_copy(conn.dst_mac, bss->bssid); @@ -1158,17 +1347,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, u64 *cookie) { const u8 *buf = params->buf; - size_t len = params->len, total; + size_t len = params->len; struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - bool tx_status = false; - struct ieee80211_mgmt *mgmt_frame = (void *)buf; - struct wmi_sw_tx_req_cmd *cmd; - struct { - struct wmi_cmd_hdr wmi; - struct wmi_sw_tx_complete_event evt; - } __packed evt; + bool tx_status; /* Note, currently we do not support the "wait" parameter, user-space * must call remain_on_channel before mgmt_tx or listen on a channel @@ -1177,34 +1360,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); - wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, - len, true); - - if (len < sizeof(struct ieee80211_hdr_3addr)) - return -EINVAL; - - total = sizeof(*cmd) + len; - if (total < len) - return -EINVAL; - - cmd = kmalloc(total, GFP_KERNEL); - if (!cmd) { - rc = -ENOMEM; - goto out; - } - - memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); - cmd->len = cpu_to_le16(len); - memcpy(cmd->payload, buf, len); - - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, - WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); - if (rc == 0) - tx_status = !evt.evt.status; + rc = wmi_mgmt_tx(vif, buf, len); + tx_status = (rc == 0); - kfree(cmd); - out: cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); return rc; @@ -2414,7 +2572,7 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, struct wil6210_priv *wil = wdev_to_wil(wdev); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; u16 sector_index; u8 sector_type; u32 rf_modules_vec; @@ -2422,7 +2580,9 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_get_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; struct nlattr *nl_cfgs, *nl_cfg; u32 i; @@ -2431,7 +2591,7 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); @@ -2468,7 +2628,6 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2534,7 +2693,7 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, struct wil6210_priv *wil = wdev_to_wil(wdev); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc, tmp; - struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1]; u16 sector_index, rf_module_index; u8 sector_type; @@ -2543,14 +2702,16 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_set_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct nlattr *nl_cfg; struct wmi_rf_sector_info *si; if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); @@ -2626,7 +2787,6 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, } cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2643,20 +2803,22 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, struct wil6210_priv *wil = wdev_to_wil(wdev); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; u8 sector_type, mac_addr[ETH_ALEN]; int cid = 0; struct wmi_get_selected_rf_sector_index_cmd cmd; struct { struct wmi_cmd_hdr wmi; struct wmi_get_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); @@ -2690,7 +2852,6 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, memset(&cmd, 0, sizeof(cmd)); cmd.cid = (u8)cid; cmd.sector_type = sector_type; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, @@ -2731,14 +2892,15 @@ static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_set_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; int rc; memset(&cmd, 0, sizeof(cmd)); cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.cid = (u8)cid; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid, &cmd, sizeof(cmd), WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, @@ -2756,7 +2918,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, struct wil6210_priv *wil = wdev_to_wil(wdev); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1]; + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; u16 sector_index; u8 sector_type, mac_addr[ETH_ALEN], i; int cid = 0; @@ -2764,7 +2926,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) return -EOPNOTSUPP; - rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len, + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, wil_rf_sector_policy, NULL); if (rc) { wil_err(wil, "Invalid rf sector ATTR\n"); @@ -2846,3 +3008,261 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, return rc; } + +static int +wil_brp_wmi_set_ant_limit(struct wil6210_priv *wil, u8 mid, u8 cid, + u8 limit_mode, u8 antenna_num_limit) +{ + int rc; + struct wmi_brp_set_ant_limit_cmd cmd = { + .cid = cid, + .limit_mode = limit_mode, + .ant_limit = antenna_num_limit, + }; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_brp_set_ant_limit_event evt; + } __packed reply; + + reply.evt.status = WMI_FW_STATUS_FAILURE; + + rc = wmi_call(wil, WMI_BRP_SET_ANT_LIMIT_CMDID, mid, &cmd, sizeof(cmd), + WMI_BRP_SET_ANT_LIMIT_EVENTID, &reply, + sizeof(reply), 250); + if (rc) + return rc; + + if (reply.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "brp set antenna limit failed with status %d\n", + reply.evt.status); + rc = -EINVAL; + } + + return rc; +} + +static int wil_brp_set_ant_limit(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + struct wil6210_vif *vif = wdev_to_vif(wil, wdev); + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; + u8 mac_addr[ETH_ALEN]; + u8 antenna_num_limit = 0; + u8 limit_mode; + int cid = 0; + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) + return -ENOTSUPP; + + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, + wil_brp_ant_limit_policy, NULL); + if (rc) { + wil_err(wil, "Invalid ant limit ATTR\n"); + return rc; + } + + if (!tb[QCA_ATTR_BRP_ANT_LIMIT_MODE] || !tb[QCA_ATTR_MAC_ADDR]) { + wil_err(wil, "Invalid antenna limit spec\n"); + return -EINVAL; + } + + limit_mode = nla_get_u8(tb[QCA_ATTR_BRP_ANT_LIMIT_MODE]); + if (limit_mode >= QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODES_NUM) { + wil_err(wil, "Invalid limit mode %d\n", limit_mode); + return -EINVAL; + } + + if (limit_mode != QCA_WLAN_VENDOR_ATTR_BRP_ANT_LIMIT_MODE_DISABLE) { + if (!tb[QCA_ATTR_BRP_ANT_NUM_LIMIT]) { + wil_err(wil, "Invalid limit number\n"); + return -EINVAL; + } + + antenna_num_limit = nla_get_u8(tb[QCA_ATTR_BRP_ANT_NUM_LIMIT]); + if (antenna_num_limit > WIL_BRP_ANT_LIMIT_MAX || + antenna_num_limit < WIL_BRP_ANT_LIMIT_MIN) { + wil_err(wil, "Invalid number of antenna limit: %d\n", + antenna_num_limit); + return -EINVAL; + } + } + + ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR])); + cid = wil_find_cid(wil, vif->mid, mac_addr); + if (cid < 0) { + wil_err(wil, "invalid MAC address %pM\n", mac_addr); + return -ENOENT; + } + + return wil_brp_wmi_set_ant_limit(wil, vif->mid, cid, limit_mode, + antenna_num_limit); +} + +static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wdev_to_wil(wdev); + struct nlattr *tb[QCA_ATTR_WIL_MAX + 1]; + struct wil_nl_60g_send_receive_wmi *cmd; + struct wil_nl_60g_debug_force_wmi debug_force_wmi; + int rc, len; + u32 wil_nl_60g_cmd_type, publish; + + rc = nla_parse(tb, QCA_ATTR_WIL_MAX, data, data_len, + wil_nl_60g_policy, NULL); + if (rc) { + wil_err(wil, "Invalid nl_60g_cmd ATTR\n"); + return rc; + } + + if (!tb[WIL_ATTR_60G_CMD_TYPE]) { + wil_err(wil, "Invalid nl_60g_cmd type\n"); + return -EINVAL; + } + + wil_nl_60g_cmd_type = nla_get_u32(tb[WIL_ATTR_60G_CMD_TYPE]); + + switch (wil_nl_60g_cmd_type) { + case NL_60G_CMD_REGISTER: + if (!tb[WIL_ATTR_60G_BUF]) { + wil_err(wil, "Invalid nl_60g_cmd spec\n"); + return -EINVAL; + } + + len = nla_len(tb[WIL_ATTR_60G_BUF]); + if (len != sizeof(publish)) { + wil_err(wil, "cmd buffer wrong len %d\n", len); + return -EINVAL; + } + memcpy(&publish, nla_data(tb[WIL_ATTR_60G_BUF]), len); + wil->publish_nl_evt = publish; + + wil_dbg_wmi(wil, "Publish wmi event %s\n", + publish ? "enabled" : "disabled"); + break; + case NL_60G_CMD_DEBUG: + if (!tb[WIL_ATTR_60G_BUF]) { + wil_err(wil, "Invalid nl_60g_cmd spec\n"); + return -EINVAL; + } + + len = nla_len(tb[WIL_ATTR_60G_BUF]); + if (len < sizeof(struct wil_nl_60g_debug)) { + wil_err(wil, "cmd buffer too short %d\n", len); + return -EINVAL; + } + + memcpy(&debug_force_wmi, nla_data(tb[WIL_ATTR_60G_BUF]), + sizeof(struct wil_nl_60g_debug)); + + switch (debug_force_wmi.hdr.cmd_id) { + case NL_60G_DBG_FORCE_WMI_SEND: + if (len != sizeof(debug_force_wmi)) { + wil_err(wil, "cmd buffer wrong len %d\n", len); + return -EINVAL; + } + + memcpy(&debug_force_wmi, nla_data(tb[WIL_ATTR_60G_BUF]), + sizeof(debug_force_wmi)); + wil->force_wmi_send = debug_force_wmi.enable; + + wil_dbg_wmi(wil, "force sending wmi commands %d\n", + wil->force_wmi_send); + break; + default: + rc = -EINVAL; + wil_err(wil, "invalid debug_cmd id %d", + debug_force_wmi.hdr.cmd_id); + } + break; + case NL_60G_CMD_FW_WMI: + if (!tb[WIL_ATTR_60G_BUF]) { + wil_err(wil, "Invalid nl_60g_cmd spec\n"); + return -EINVAL; + } + + len = nla_len(tb[WIL_ATTR_60G_BUF]); + if (len < offsetof(struct wil_nl_60g_send_receive_wmi, buf)) { + wil_err(wil, "wmi cmd buffer too small\n"); + return -EINVAL; + } + + cmd = kmalloc(len, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memcpy(cmd, nla_data(tb[WIL_ATTR_60G_BUF]), (unsigned int)len); + + wil_dbg_wmi(wil, "sending user-space command (0x%04x) [%d]\n", + cmd->cmd_id, cmd->buf_len); + + if (wil->force_wmi_send) + rc = wmi_force_send(wil, cmd->cmd_id, cmd->dev_id, + cmd->buf, cmd->buf_len); + else + rc = wmi_send(wil, cmd->cmd_id, cmd->dev_id, + cmd->buf, cmd->buf_len); + + kfree(cmd); + break; + default: + rc = -EINVAL; + wil_err(wil, "invalid nl_60g_cmd type %d", wil_nl_60g_cmd_type); + } + + return rc; +} + +void wil_nl_60g_receive_wmi_evt(struct wil6210_priv *wil, u8 *cmd, int len) +{ + struct sk_buff *vendor_event = NULL; + struct wil_nl_60g_event *evt; + struct wil_nl_60g_send_receive_wmi *wmi_buf; + struct wmi_cmd_hdr *wmi_hdr = (struct wmi_cmd_hdr *)cmd; + int data_len; + + if (!wil->publish_nl_evt) + return; + + wil_dbg_wmi(wil, "report wmi event to user-space (0x%04x) [%d]\n", + le16_to_cpu(wmi_hdr->command_id), len); + + data_len = len - sizeof(struct wmi_cmd_hdr); + + evt = kzalloc(sizeof(*evt) + sizeof(*wmi_buf) + data_len, GFP_KERNEL); + if (!evt) + return; + + evt->evt_type = NL_60G_EVT_FW_WMI; + evt->buf_len = sizeof(*wmi_buf) + data_len; + + wmi_buf = (struct wil_nl_60g_send_receive_wmi *)evt->buf; + + wmi_buf->cmd_id = le16_to_cpu(wmi_hdr->command_id); + wmi_buf->dev_id = wmi_hdr->mid; + wmi_buf->buf_len = data_len; + memcpy(wmi_buf->buf, cmd + sizeof(struct wmi_cmd_hdr), data_len); + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + NULL, + data_len + 4 + NLMSG_HDRLEN + + sizeof(*evt) + sizeof(*wmi_buf), + QCA_NL80211_VENDOR_EVENT_UNSPEC_INDEX, + GFP_KERNEL); + if (!vendor_event) + goto out; + + if (nla_put(vendor_event, WIL_ATTR_60G_BUF, + sizeof(*evt) + sizeof(*wmi_buf) + data_len, evt)) { + wil_err(wil, "failed to fill WIL_ATTR_60G_BUF\n"); + goto out; + } + + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + +out: + kfree(evt); +} diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 4719dd5a1ef6a816758247be1433812d2ae8b4bf..4c1a9dad05649cd624217a208bdf3536a31cfd09 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1335,6 +1335,8 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) struct wmi_notify_req_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { u32 status; @@ -1678,8 +1680,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; - seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout, - r->head_seq_num); + seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num); for (i = 0; i < r->buf_size; i++) { if (i == index) seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|'); @@ -2165,6 +2166,8 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(wakeup_trigger, 0644, doff_u8), WIL_FIELD(ring_idle_trsh, 0644, doff_u32), WIL_FIELD(num_rx_status_rings, 0644, doff_u8), + WIL_FIELD(amsdu_en, 0644, doff_u8), + WIL_FIELD(force_edmg_channel, 0644, doff_u8), {}, }; diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h index 21923c27ec06ebe36daa07cb2296c87885bc9c9f..e9efad7405dd35b596ea71111b104f8325ff813f 100644 --- a/drivers/net/wireless/ath/wil6210/ftm.h +++ b/drivers/net/wireless/ath/wil6210/ftm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -417,6 +417,7 @@ enum qca_nl80211_vendor_events_index { QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX, QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX, QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX, + QCA_NL80211_VENDOR_EVENT_UNSPEC_INDEX, }; /* measurement parameters. Specified for each peer as part diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 980b84e6390026ac931bcebc2250c90adddda070..c0342e5a874d654238b8abcaa8725c1a71160181 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -397,6 +397,8 @@ void wil_disconnect_worker(struct work_struct *work) /* already disconnected */ return; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0, WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), WIL6210_DISCONNECT_TO_MS); @@ -653,6 +655,8 @@ int wil_priv_init(struct wil6210_priv *wil) /* num of rx srings can be updated via debugfs before allocation */ wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS; + wil->amsdu_en = 1; + return 0; out_wmi_wq: @@ -1524,6 +1528,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wmi_event_flush(wil); + wil->force_wmi_send = false; + flush_workqueue(wil->wq_service); flush_workqueue(wil->wmi_wq); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 0e0b8064fe015a7f4ed8f8002a6b480b43376066..eda79dd2739b5838a3e6d6485b8e381fc983f46e 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -529,16 +529,16 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid) return; } + mutex_lock(&wil->mutex); + wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); + mutex_unlock(&wil->mutex); + ndev = vif_to_ndev(vif); /* during unregister_netdevice cfg80211_leave may perform operations * such as stop AP, disconnect, so we only clear the VIF afterwards */ unregister_netdevice(ndev); - mutex_lock(&wil->mutex); - wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); - mutex_unlock(&wil->mutex); - if (any_active && vif->mid != 0) wmi_port_delete(wil, vif->mid); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 9321b5e91a9cc9a7f1b239a349cc0c2bba51066c..ec158a41264143812921e16c1d2a52ccdfd4d6c1 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -205,7 +205,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* put the frame in the reordering buffer */ r->reorder_buf[index] = skb; - r->reorder_time[index] = jiffies; r->stored_mpdu_num++; wil_reorder_release(ndev, r); @@ -251,11 +250,8 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, r->reorder_buf = kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); - r->reorder_time = - kcalloc(size, sizeof(unsigned long), GFP_KERNEL); - if (!r->reorder_buf || !r->reorder_time) { + if (!r->reorder_buf) { kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); return NULL; } @@ -285,7 +281,6 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, kfree_skb(r->reorder_buf[i]); kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); } @@ -319,7 +314,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) * bits 6..15: buffer size */ u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15); - bool agg_amsdu = !!(param_set & BIT(0)); + bool agg_amsdu = wil->use_enhanced_dma_hw && + use_rx_hw_reordering && + test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) && + wil->amsdu_en && (param_set & BIT(0)); int ba_policy = param_set & BIT(1); u16 status = WLAN_STATUS_SUCCESS; u16 ssn = seq_ctrl >> 4; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 04c9a7261c36afc2a2a733c0585e6046394b6608..8991d136916f36b68f85f5004501ebe9464d7037 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -608,8 +608,8 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) v->swtail = next_tail) { rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); if (unlikely(rc)) { - wil_err(wil, "Error %d in wil_rx_refill[%d]\n", - rc, v->swtail); + wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", + rc, v->swtail); break; } } @@ -956,7 +956,9 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct wil_ring *vring = &wil->ring_tx[id]; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; @@ -1039,7 +1041,9 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct wil_ring *vring = &wil->ring_tx[id]; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 2fe3c9aaba04c349ec3d5d9e8404a7bac62d7055..87f918f51d67455ebc7c8858756293fa70822d0d 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -411,6 +411,7 @@ struct fw_map { u32 host; /* PCI/Host address - BAR0 + 0x880000 */ const char *name; /* for debugfs */ bool fw; /* true if FW mapping, false if UCODE mapping */ + bool crash_dump; /* true if should be dumped during crash dump */ }; /* array size should be in sync with actual definition in the wmi.c */ @@ -620,38 +621,28 @@ struct pci_dev; * struct tid_ampdu_rx - TID aggregation information (Rx). * * @reorder_buf: buffer to reorder incoming aggregated MPDUs - * @reorder_time: jiffies when skb was added - * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) - * @reorder_timer: releases expired frames from the reorder buffer. * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs - * @timeout: reset timer value (in TUs). * @ssn_last_drop: SSN of the last dropped frame * @total: total number of processed incoming frames * @drop_dup: duplicate frames dropped for this reorder buffer * @drop_old: old frames dropped for this reorder buffer - * @dialog_token: dialog token for aggregation session * @first_time: true when this buffer used 1-st time */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; - unsigned long *reorder_time; - struct timer_list session_timer; - struct timer_list reorder_timer; unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; u16 buf_size; - u16 timeout; u16 ssn_last_drop; unsigned long long total; /* frames processed */ unsigned long long drop_dup; unsigned long long drop_old; - u8 dialog_token; bool first_time; /* is it 1-st time this buffer used? */ }; @@ -957,6 +948,7 @@ struct wil6210_priv { u8 wakeup_trigger; struct wil_suspend_stats suspend_stats; struct wil_debugfs_data dbg_data; + u8 force_edmg_channel; void *platform_handle; struct wil_platform_ops platform_ops; @@ -1004,6 +996,12 @@ struct wil6210_priv { bool secured_boot; u8 boot_config; + + bool publish_nl_evt; /* deliver WMI events to user space */ + bool force_wmi_send; /* allow WMI command while FW in sysassert */ + + /* relevant only for eDMA */ + bool amsdu_en; }; #define wil_to_wiphy(i) (i->wiphy) @@ -1169,9 +1167,11 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr); int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, struct wil6210_mbox_hdr *hdr); int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len); +int wmi_force_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, + u16 len); void wmi_recv_cmd(struct wil6210_priv *wil); int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec); + u16 reply_id, void *reply, u16 reply_size, int to_msec); void wmi_event_worker(struct work_struct *work); void wmi_event_flush(struct wil6210_priv *wil); int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid); @@ -1349,6 +1349,7 @@ void wil_ftm_evt_per_dest_res(struct wil6210_vif *vif, void wil_aoa_evt_meas(struct wil6210_vif *vif, struct wmi_aoa_meas_event *evt, int len); +void wil_nl_60g_receive_wmi_evt(struct wil6210_priv *wil, u8 *cmd, int len); /* link loss */ int wmi_link_maintain_cfg_write(struct wil6210_priv *wil, const u8 *addr, @@ -1359,6 +1360,10 @@ int wmi_set_snr_thresh(struct wil6210_priv *wil, short omni, short direct); int wmi_start_sched_scan(struct wil6210_priv *wil, struct cfg80211_sched_scan_request *request); int wmi_stop_sched_scan(struct wil6210_priv *wil); +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); + +int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch); +int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch); int reverse_memcmp(const void *cs, const void *ct, size_t count); diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c index 1ed330674d9b8d5aa17e9647cf9a80c8787c2151..dc33a0b4c3fac55c67371aa7ef762a2c90320636 100644 --- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c +++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil, for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; - if (!map->fw) + if (!map->crash_dump) continue; if (map->host < host_min) @@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size) for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { map = &fw_mapping[i]; - if (!map->fw) + if (!map->crash_dump) continue; data = (void * __force)wil->csr + HOSTADDR(map->host); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6d87369589db7bb3cc73eff66c3b4ecc5bc4d606..d87414c560fbf577c4fb88167553816576a76272 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -39,10 +39,6 @@ module_param(led_id, byte, 0444); MODULE_PARM_DESC(led_id, " 60G device led enablement. Set the led ID (0-2) to enable"); -static bool amsdu_en; -module_param(amsdu_en, bool, 0444); -MODULE_PARM_DESC(amsdu_en, " enable A-MSDU, default: false"); - #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200 #define WIL_WMI_CALL_GENERAL_TO_MS 100 @@ -94,28 +90,28 @@ MODULE_PARM_DESC(amsdu_en, " enable A-MSDU, default: false"); */ const struct fw_map sparrow_fw_mapping[] = { /* FW code RAM 256k */ - {0x000000, 0x040000, 0x8c0000, "fw_code", true}, + {0x000000, 0x040000, 0x8c0000, "fw_code", true, true}, /* FW data RAM 32k */ - {0x800000, 0x808000, 0x900000, "fw_data", true}, + {0x800000, 0x808000, 0x900000, "fw_data", true, true}, /* periph data 128k */ - {0x840000, 0x860000, 0x908000, "fw_peri", true}, + {0x840000, 0x860000, 0x908000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 512b */ - {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true}, /* upper area 548k */ - {0x8c0000, 0x949000, 0x8c0000, "upper", true}, + {0x8c0000, 0x949000, 0x8c0000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 128k */ - {0x000000, 0x020000, 0x920000, "uc_code", false}, + {0x000000, 0x020000, 0x920000, "uc_code", false, false}, /* ucode data RAM 16k */ - {0x800000, 0x804000, 0x940000, "uc_data", false}, + {0x800000, 0x804000, 0x940000, "uc_data", false, false}, }; /** @@ -123,7 +119,7 @@ const struct fw_map sparrow_fw_mapping[] = { * it is a bit larger to support extra features */ const struct fw_map sparrow_d0_mac_rgf_ext = { - 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true + 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true }; /** @@ -139,34 +135,34 @@ const struct fw_map sparrow_d0_mac_rgf_ext = { */ const struct fw_map talyn_fw_mapping[] = { /* FW code RAM 1M */ - {0x000000, 0x100000, 0x900000, "fw_code", true}, + {0x000000, 0x100000, 0x900000, "fw_code", true, true}, /* FW data RAM 128k */ - {0x800000, 0x820000, 0xa00000, "fw_data", true}, + {0x800000, 0x820000, 0xa00000, "fw_data", true, true}, /* periph. data RAM 96k */ - {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + {0x840000, 0x858000, 0xa20000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 1344b */ - {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true}, /* ext USER RGF 4k */ - {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true}, /* OTP 4k */ - {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false}, /* DMA EXT RGF 64k */ - {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true}, /* upper area 1536k */ - {0x900000, 0xa80000, 0x900000, "upper", true}, + {0x900000, 0xa80000, 0x900000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 256k */ - {0x000000, 0x040000, 0xa38000, "uc_code", false}, + {0x000000, 0x040000, 0xa38000, "uc_code", false, false}, /* ucode data RAM 32k */ - {0x800000, 0x808000, 0xa78000, "uc_data", false}, + {0x800000, 0x808000, 0xa78000, "uc_data", false, false}, }; /** @@ -182,46 +178,46 @@ const struct fw_map talyn_fw_mapping[] = { */ const struct fw_map talyn_mb_fw_mapping[] = { /* FW code RAM 768k */ - {0x000000, 0x0c0000, 0x900000, "fw_code", true}, + {0x000000, 0x0c0000, 0x900000, "fw_code", true, true}, /* FW data RAM 128k */ - {0x800000, 0x820000, 0xa00000, "fw_data", true}, + {0x800000, 0x820000, 0xa00000, "fw_data", true, true}, /* periph. data RAM 96k */ - {0x840000, 0x858000, 0xa20000, "fw_peri", true}, + {0x840000, 0x858000, 0xa20000, "fw_peri", true, true}, /* various RGF 40k */ - {0x880000, 0x88a000, 0x880000, "rgf", true}, + {0x880000, 0x88a000, 0x880000, "rgf", true, true}, /* AGC table 4k */ - {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, + {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true}, /* Pcie_ext_rgf 4k */ - {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, + {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true}, /* mac_ext_rgf 2256b */ - {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true}, + {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true}, /* ext USER RGF 4k */ - {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, + {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true}, /* SEC PKA 16k */ - {0x890000, 0x894000, 0x890000, "sec_pka", true}, + {0x890000, 0x894000, 0x890000, "sec_pka", true, true}, /* SEC KDF RGF 3096b */ - {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true}, + {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true}, /* SEC MAIN 2124b */ - {0x89a000, 0x89a84c, 0x89a000, "sec_main", true}, + {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true}, /* OTP 4k */ - {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, + {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false}, /* DMA EXT RGF 64k */ - {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, + {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true}, /* DUM USER RGF 528b */ - {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true}, + {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true}, /* DMA OFU 296b */ - {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true}, + {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true}, /* ucode debug 4k */ - {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true}, + {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true}, /* upper area 1536k */ - {0x900000, 0xa80000, 0x900000, "upper", true}, + {0x900000, 0xa80000, 0x900000, "upper", true, true}, /* UCODE areas - accessible by debugfs blobs but not by * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! */ /* ucode code RAM 256k */ - {0x000000, 0x040000, 0xa38000, "uc_code", false}, + {0x000000, 0x040000, 0xa38000, "uc_code", false, false}, /* ucode data RAM 32k */ - {0x800000, 0x808000, 0xa78000, "uc_data", false}, + {0x800000, 0x808000, 0xa78000, "uc_data", false, false}, }; struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; @@ -609,7 +605,7 @@ static const char *eventid2name(u16 eventid) } static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, - void *buf, u16 len) + void *buf, u16 len, bool force_send) { struct { struct wil6210_mbox_hdr hdr; @@ -641,7 +637,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, might_sleep(); - if (!test_bit(wil_status_fwready, wil->status)) { + if (!test_bit(wil_status_fwready, wil->status) && !force_send) { wil_err(wil, "WMI: cannot send command while FW not ready\n"); return -EAGAIN; } @@ -680,7 +676,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { - if (!test_bit(wil_status_fwready, wil->status)) { + if (!test_bit(wil_status_fwready, wil->status) && !force_send) { wil_err(wil, "WMI: cannot send command while FW not ready\n"); rc = -EAGAIN; goto out; @@ -735,7 +731,19 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len) int rc; mutex_lock(&wil->wmi_mutex); - rc = __wmi_send(wil, cmdid, mid, buf, len); + rc = __wmi_send(wil, cmdid, mid, buf, len, false); + mutex_unlock(&wil->wmi_mutex); + + return rc; +} + +int wmi_force_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, + u16 len) +{ + int rc; + + mutex_lock(&wil->wmi_mutex); + rc = __wmi_send(wil, cmdid, mid, buf, len, true); mutex_unlock(&wil->wmi_mutex); return rc; @@ -912,7 +920,8 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) struct net_device *ndev = vif_to_ndev(vif); struct wireless_dev *wdev = vif_to_wdev(vif); struct wmi_connect_event *evt = d; - int ch; /* channel number */ + int ch; /* channel number (primary) */ + u8 spec_ch = 0; /* spec channel number */ struct station_info sinfo; u8 *assoc_req_ie, *assoc_resp_ie; size_t assoc_req_ielen, assoc_resp_ielen; @@ -940,8 +949,16 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) } ch = evt->channel + 1; - wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n", - evt->bssid, ch, evt->cid, evt->aid); + if (evt->edmg_channel && + test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) + wil_wmi2spec_ch(evt->edmg_channel, &spec_ch); + if (spec_ch) + wil_info(wil, "Connect %pM EDMG channel [%d] primary channel [%d] cid %d aid %d\n", + evt->bssid, spec_ch, ch, evt->cid, evt->aid); + else + wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n", + evt->bssid, ch, evt->cid, evt->aid); + wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); @@ -1475,6 +1492,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) u16 id = le16_to_cpu(wmi->command_id); u8 mid = wmi->mid; u32 tstamp = le32_to_cpu(wmi->fw_timestamp); + wil_nl_60g_receive_wmi_evt(wil, cmd, len); if (test_bit(wil_status_resuming, wil->status)) { if (id == WMI_TRAFFIC_RESUME_EVENTID) clear_bit(wil_status_resuming, @@ -1535,7 +1553,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec) + u16 reply_id, void *reply, u16 reply_size, int to_msec) { int rc; unsigned long remain; @@ -1550,7 +1568,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, reinit_completion(&wil->wmi_call); spin_unlock(&wil->wmi_ev_lock); - rc = __wmi_send(wil, cmdid, mid, buf, len); + rc = __wmi_send(wil, cmdid, mid, buf, len, false); if (rc) goto out; @@ -1628,7 +1646,9 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable) struct { struct wmi_cmd_hdr wmi; struct wmi_led_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_FW_STATUS_FAILURE)}, + }; if (led_id == WIL_LED_INVALID_ID) goto out; @@ -1673,13 +1693,27 @@ int wmi_pcp_start(struct wil6210_vif *vif, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, - .disable_ap_sme = disable_ap_sme, + .ap_sme_offload_mode = disable_ap_sme ? + WMI_AP_SME_OFFLOAD_PARTIAL : + WMI_AP_SME_OFFLOAD_FULL, .abft_len = wil->abft_len, }; struct { struct wmi_cmd_hdr wmi; struct wmi_pcp_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + + if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) + if (wil->force_edmg_channel) { + rc = wil_spec2wmi_ch(wil->force_edmg_channel, + &cmd.edmg_channel); + if (rc) + wil_err(wil, + "wmi channel for channel %d not found", + wil->force_edmg_channel); + } if (!vif->privacy) cmd.disable_sec = 1; @@ -1693,7 +1727,7 @@ int wmi_pcp_start(struct wil6210_vif *vif, } if (disable_ap_sme && - !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + !test_bit(WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL, wil->fw_capabilities)) { wil_err(wil, "disable_ap_sme not supported by FW\n"); return -EOPNOTSUPP; @@ -1756,6 +1790,8 @@ int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid) } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0, WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1791,6 +1827,8 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) struct wmi_set_pcp_channel_cmd cmd; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0, WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1816,7 +1854,9 @@ int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi) struct { struct wmi_cmd_hdr wmi; struct wmi_p2p_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); @@ -1837,7 +1877,9 @@ int wmi_start_listen(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); @@ -1859,7 +1901,9 @@ int wmi_start_search(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_search_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); @@ -1985,7 +2029,9 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_info(wil, "(%s)\n", on ? "on" : "off"); @@ -2027,6 +2073,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring) } __packed evt; int rc; + memset(&evt, 0, sizeof(evt)); + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { struct ieee80211_channel *ch = wil->monitor_chandef.chan; @@ -2056,14 +2104,14 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring) if (rc) return rc; + if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) + rc = -EINVAL; + vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", le32_to_cpu(evt.evt.status), vring->hwtail); - if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) - rc = -EINVAL; - return rc; } @@ -2081,6 +2129,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) struct wmi_temp_sense_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -2113,6 +2163,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); + memset(&reply, 0, sizeof(reply)); vif->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); @@ -2153,8 +2204,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 mid, u8 ringid, u8 size, u16 timeout) { u8 amsdu = wil->use_enhanced_dma_hw && use_rx_hw_reordering && - amsdu_en ? 1 : 0; - + test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) && + wil->amsdu_en; struct wmi_ring_ba_en_cmd cmd = { .ring_id = ringid, .agg_max_wsize = size, @@ -2214,7 +2265,9 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_rcp_addba_resp_sent_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, + }; wil_dbg_wmi(wil, "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n", @@ -2295,13 +2348,13 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_ps_dev_profile_cfg_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR)}, + }; u32 status; wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile); - reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR); - rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply), @@ -2330,15 +2383,15 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) struct { struct wmi_cmd_hdr wmi; struct wmi_set_mgmt_retry_limit_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short); if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), @@ -2369,7 +2422,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.mgmt_retry_limit = 0; + memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0, WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), 100); @@ -2517,14 +2570,15 @@ int wmi_suspend(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_suspend_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE}, + }; + u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP; wil->suspend_resp_rcvd = false; wil->suspend_resp_comp = false; - reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE; - rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), @@ -2600,10 +2654,11 @@ int wmi_resume(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_resume_event evt; - } __packed reply; - - reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; - reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_RESUME_FAILED, + .resume_triggers = + cpu_to_le32(WMI_RESUME_TRIGGER_UNKNOWN)}, + }; rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0, WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), @@ -2688,7 +2743,9 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, struct { struct wmi_cmd_hdr wmi; struct wmi_port_allocated_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n", mid, iftype, mac); @@ -2713,8 +2770,6 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, return -EINVAL; } - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_ALLOCATED_EVENTID, &reply, @@ -2741,12 +2796,12 @@ int wmi_port_delete(struct wil6210_priv *wil, u8 mid) struct { struct wmi_cmd_hdr wmi; struct wmi_port_deleted_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port delete, mid %d\n", mid); - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_DELETED_EVENTID, &reply, @@ -3034,7 +3089,9 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_start_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; @@ -3050,8 +3107,6 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, wmi_sched_scan_set_plans(wil, &cmd, request->scan_plans, request->n_scan_plans); - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply), @@ -3075,13 +3130,13 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_stop_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0, WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); @@ -3097,6 +3152,53 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) return 0; } +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) +{ + size_t total; + struct wil6210_priv *wil = vif_to_wil(vif); + struct ieee80211_mgmt *mgmt_frame = (void *)buf; + struct wmi_sw_tx_req_cmd *cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_sw_tx_complete_event evt; + } __packed evt = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + int rc; + + wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + + total = sizeof(*cmd) + len; + if (total < len) { + wil_err(wil, "mgmt_tx invalid len %zu\n", len); + return -EINVAL; + } + + cmd = kmalloc(total, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); + cmd->len = cpu_to_le16(len); + memcpy(cmd->payload, buf, len); + + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status); + rc = -EINVAL; + } + + kfree(cmd); + + return rc; +} + int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id) { int rc; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 93f9e08b77106b198e1f270a83aacb40c13ad424..7a7ccccf900f2e6a79596c28f7d99b781ec5b892 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity * @@ -29,8 +30,6 @@ #ifndef __WILOCITY_WMI_H__ #define __WILOCITY_WMI_H__ -/* General */ -#define WMI_MAX_ASSOC_STA (8) #define WMI_DEFAULT_ASSOC_STA (1) #define WMI_MAC_LEN (6) #define WMI_PROX_RANGE_NUM (3) @@ -41,6 +40,19 @@ #define WMI_RF_ETYPE_LENGTH (3) #define WMI_RF_RX2TX_LENGTH (3) #define WMI_RF_ETYPE_VAL_PER_RANGE (5) +/* DTYPE configuration array size + * must always be kept equal to (WMI_RF_DTYPE_LENGTH+1) + */ +#define WMI_RF_DTYPE_CONF_LENGTH (4) +/* ETYPE configuration array size + * must always be kept equal to + * (WMI_RF_ETYPE_LENGTH+WMI_RF_ETYPE_VAL_PER_RANGE) + */ +#define WMI_RF_ETYPE_CONF_LENGTH (8) +/* RX2TX configuration array size + * must always be kept equal to (WMI_RF_RX2TX_LENGTH+1) + */ +#define WMI_RF_RX2TX_CONF_LENGTH (4) /* Mailbox interface * used for commands and events @@ -61,7 +73,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PS_CONFIG = 1, WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, - WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL = 4, WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, WMI_FW_CAPABILITY_D3_SUSPEND = 8, @@ -73,7 +85,10 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_PNO = 15, WMI_FW_CAPABILITY_CONNECT_SNR_THR = 16, + WMI_FW_CAPABILITY_CHANNEL_BONDING = 17, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, + WMI_FW_CAPABILITY_AMSDU = 23, WMI_FW_CAPABILITY_MAX, }; @@ -166,12 +181,14 @@ enum wmi_command_id { WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, + WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID = 0x85F, /* Performance monitoring commands */ WMI_BF_CTRL_CMDID = 0x862, WMI_NOTIFY_REQ_CMDID = 0x863, WMI_GET_STATUS_CMDID = 0x864, WMI_GET_RF_STATUS_CMDID = 0x866, WMI_GET_BASEBAND_TYPE_CMDID = 0x867, + WMI_VRING_SWITCH_TIMING_CONFIG_CMDID = 0x868, WMI_UNIT_TEST_CMDID = 0x900, WMI_FLASH_READ_CMDID = 0x902, WMI_FLASH_WRITE_CMDID = 0x903, @@ -204,6 +221,7 @@ enum wmi_command_id { WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, /* Read Power Save profile type */ WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, + WMI_TSF_SYNC_CMDID = 0x973, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -220,6 +238,7 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_BF_CONTROL_CMDID = 0x9AA, WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0, WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1, WMI_TX_DESC_RING_ADD_CMDID = 0x9C2, @@ -231,6 +250,10 @@ enum wmi_command_id { WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, + WMI_GET_ASSOC_LIST_CMDID = 0xA06, + WMI_GET_CCA_INDICATIONS_CMDID = 0xA07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08, + WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, @@ -292,6 +315,19 @@ enum wmi_connect_ctrl_flag_bits { #define WMI_MAX_SSID_LEN (32) +enum wmi_channel { + WMI_CHANNEL_1 = 0x00, + WMI_CHANNEL_2 = 0x01, + WMI_CHANNEL_3 = 0x02, + WMI_CHANNEL_4 = 0x03, + WMI_CHANNEL_5 = 0x04, + WMI_CHANNEL_6 = 0x05, + WMI_CHANNEL_9 = 0x06, + WMI_CHANNEL_10 = 0x07, + WMI_CHANNEL_11 = 0x08, + WMI_CHANNEL_12 = 0x09, +}; + /* WMI_CONNECT_CMDID */ struct wmi_connect_cmd { u8 network_type; @@ -303,8 +339,12 @@ struct wmi_connect_cmd { u8 group_crypto_len; u8 ssid_len; u8 ssid[WMI_MAX_SSID_LEN]; + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; - u8 reserved0; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; u8 bssid[WMI_MAC_LEN]; __le32 ctrl_flags; u8 dst_mac[WMI_MAC_LEN]; @@ -492,6 +532,18 @@ enum wmi_rf_mgmt_type { WMI_RF_MGMT_GET_STATUS = 0x02, }; +/* WMI_BF_CONTROL_CMDID */ +enum wmi_bf_triggers { + WMI_BF_TRIGGER_RS_MCS1_TH_FAILURE = 0x01, + WMI_BF_TRIGGER_RS_MCS1_NO_BACK_FAILURE = 0x02, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP = 0x04, + WMI_BF_TRIGGER_MAX_BACK_FAILURE = 0x08, + WMI_BF_TRIGGER_FW = 0x10, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_KEEP_ALIVE = 0x20, + WMI_BF_TRIGGER_AOA = 0x40, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_UPM = 0x80, +}; + /* WMI_RF_MGMT_CMDID */ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; @@ -527,7 +579,9 @@ struct wmi_bcon_ctrl_cmd { u8 disable_sec; u8 hidden_ssid; u8 is_go; - u8 reserved[2]; + /* A-BFT length override if non-0 */ + u8 abft_len; + u8 reserved; } __packed; /* WMI_PORT_ALLOCATE_CMDID */ @@ -592,17 +646,33 @@ struct wmi_power_mgmt_cfg_cmd { u8 reserved[3]; } __packed; +/* WMI_PCP_START_CMDID */ +enum wmi_ap_sme_offload_mode { + /* Full AP SME in FW */ + WMI_AP_SME_OFFLOAD_FULL = 0x00, + /* Probe AP SME in FW */ + WMI_AP_SME_OFFLOAD_PARTIAL = 0x01, + /* AP SME in host */ + WMI_AP_SME_OFFLOAD_NONE = 0x02, +}; + /* WMI_PCP_START_CMDID */ struct wmi_pcp_start_cmd { __le16 bcon_interval; u8 pcp_max_assoc_sta; u8 hidden_ssid; u8 is_go; - u8 reserved0[5]; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; + u8 reserved[4]; /* A-BFT length override if non-0 */ u8 abft_len; - u8 disable_ap_sme; + /* enum wmi_ap_sme_offload_mode_e */ + u8 ap_sme_offload_mode; u8 network_type; + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; u8 disable_sec_offload; u8 disable_sec; @@ -615,6 +685,17 @@ struct wmi_sw_tx_req_cmd { u8 payload[0]; } __packed; +/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */ +struct wmi_vring_switch_timing_config_cmd { + /* Set vring timing configuration: + * + * defined interval for vring switch + */ + __le32 interval_usec; + /* vring inactivity threshold */ + __le32 idle_th_usec; +} __packed; + struct wmi_sw_ring_cfg { __le64 ring_mem_base; __le16 ring_size; @@ -650,6 +731,7 @@ enum wmi_vring_cfg_schd_params_priority { WMI_SCH_PRIO_HIGH = 0x01, }; +#define CIDXTID_EXTENDED_CID_TID (0xFF) #define CIDXTID_CID_POS (0) #define CIDXTID_CID_LEN (4) #define CIDXTID_CID_MSK (0xF) @@ -670,6 +752,9 @@ struct wmi_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; /* 0-23 vrings */ u8 ringid; + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 encap_trans_type; /* 802.3 DS cfg */ @@ -679,6 +764,11 @@ struct wmi_vring_cfg { u8 to_resolution; u8 agg_max_wsize; struct wmi_vring_cfg_schd schd_params; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; enum wmi_vring_cfg_cmd_action { @@ -948,12 +1038,20 @@ struct wmi_cfg_rx_chain_cmd { /* WMI_RCP_ADDBA_RESP_CMDID */ struct wmi_rcp_addba_resp_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; __le16 status_code; /* ieee80211_ba_parameterset field to send */ __le16 ba_param_set; __le16 ba_timeout; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_RCP_ADDBA_RESP_EDMA_CMDID */ @@ -973,13 +1071,24 @@ struct wmi_rcp_addba_resp_edma_cmd { /* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_RCP_ADDBA_REQ_CMDID */ struct wmi_rcp_addba_req_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset field as it received */ @@ -987,6 +1096,11 @@ struct wmi_rcp_addba_req_cmd { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_SET_MAC_ADDRESS_CMDID */ @@ -997,15 +1111,20 @@ struct wmi_set_mac_address_cmd { /* WMI_ECHO_CMDID * Check FW is alive - * WMI_DEEP_ECHO_CMDID - * Check FW and ucode are alive * Returned event: WMI_ECHO_RSP_EVENTID - * same event for both commands */ struct wmi_echo_cmd { __le32 value; } __packed; +/* WMI_DEEP_ECHO_CMDID + * Check FW and ucode are alive + * Returned event: WMI_ECHO_RSP_EVENTID + */ +struct wmi_deep_echo_cmd { + __le32 value; +} __packed; + /* WMI_RF_PWR_ON_DELAY_CMDID * set FW time parameters used through RF resetting * RF reset consists of bringing its power down for a period of time, then @@ -1023,7 +1142,7 @@ struct wmi_rf_pwr_on_delay_cmd { __le16 up_delay_usec; } __packed; -/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID * This API controls the Tx and Rx gain over temperature. * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. * It also controls the Tx gain index, by controlling the Rx to Tx gain index @@ -1037,25 +1156,46 @@ struct wmi_set_high_power_table_params_cmd { u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; u8 reserved0; /* Tx D-type values to be used for each temperature range */ - __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 tx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; + /* Temperature range for Tx E-type parameters */ + u8 tx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved1; + /* Tx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 tx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for Rx D-type parameters */ u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; - u8 reserved1; + u8 reserved2; /* Rx D-type values to be used for each temperature range */ - __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 rx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; /* Temperature range for Rx E-type parameters */ u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; - u8 reserved2; + u8 reserved3; /* Rx E-type values to be used for each temperature range. * The last 4 values of any range are the first 4 values of the next * range and so on */ - __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + __le32 rx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for rx_2_tx_offs parameters */ u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; - u8 reserved3; + u8 reserved4; /* Rx to Tx gain index offset */ - s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; + s8 rx_2_tx_offs[WMI_RF_RX2TX_CONF_LENGTH]; +} __packed; + +/* WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID + * This API sets rd parameter per mcs. + * Relevant only in Fixed Scheduling mode. + * Returned event: WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID + */ +struct wmi_fixed_scheduling_ul_config_cmd { + /* Use mcs -1 to set for every mcs */ + s8 mcs; + /* Number of frames with rd bit set in a single virtual slot */ + u8 rd_count_per_slot; + u8 reserved[2]; } __packed; /* CMD: WMI_RF_XPM_READ_CMDID */ @@ -1362,6 +1502,93 @@ struct wmi_set_long_range_config_complete_event { u8 reserved[3]; } __packed; +/* payload max size is 236 bytes: max event buffer size (256) - WMI headers + * (16) - prev struct field size (4) + */ +#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236) +#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236) +#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236) + +enum wmi_internal_fw_ioctl_code { + WMI_INTERNAL_FW_CODE_NONE = 0x0, + WMI_INTERNAL_FW_CODE_QCOM = 0x1, +}; + +/* WMI_INTERNAL_FW_IOCTL_CMDID */ +struct wmi_internal_fw_ioctl_cmd { + /* enum wmi_internal_fw_ioctl_code */ + __le16 code; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_IOCTL_EVENTID */ +struct wmi_internal_fw_ioctl_event { + /* wmi_fw_status */ + u8 status; + u8 reserved; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_EVENT_EVENTID */ +struct wmi_internal_fw_event_event { + __le16 id; + __le16 length; + /* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_BF_CONTROL_CMDID */ +struct wmi_bf_control_cmd { + /* wmi_bf_triggers */ + __le32 triggers; + u8 cid; + /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */ + u8 txss_mode; + /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */ + u8 brp_mode; + /* Max cts threshold (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_thr; + /* Max cts threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_dense_thr; + /* Max b-ack threshold (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_thr; + /* Max b-ack threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_dense_thr; + u8 reserved0; + /* Wrong sectors threshold */ + __le32 wrong_sector_bis_thr; + /* BOOL to enable/disable long term trigger */ + u8 long_term_enable; + /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and + * long_term_trig_timeout_per_mcs arrays, 0 = Ignore + */ + u8 long_term_update_thr; + /* Long term throughput threshold [Mbps] */ + u8 long_term_mbps_th_tbl[WMI_NUM_MCS]; + u8 reserved1; + /* Long term timeout threshold table [msec] */ + __le16 long_term_trig_timeout_per_mcs[WMI_NUM_MCS]; + u8 reserved2[2]; +} __packed; + /* WMI Events * List of Events (target to host) */ @@ -1420,6 +1647,7 @@ enum wmi_event_id { WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, + WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID = 0x185F, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, @@ -1429,6 +1657,7 @@ enum wmi_event_id { WMI_RING_EN_EVENTID = 0x1865, WMI_GET_RF_STATUS_EVENTID = 0x1866, WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, + WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868, WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, @@ -1458,6 +1687,7 @@ enum wmi_event_id { WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, /* return the Power Save profile */ WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, + WMI_TSF_SYNC_STATUS_EVENTID = 0x1973, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, @@ -1475,6 +1705,7 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_BF_CONTROL_EVENTID = 0x19AA, WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0, WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1, WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2, @@ -1485,12 +1716,18 @@ enum wmi_event_id { WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, + WMI_GET_ASSOC_LIST_RES_EVENTID = 0x1A06, + WMI_GET_CCA_INDICATIONS_EVENTID = 0x1A07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08, + WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A, + WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_INTERNAL_FW_SET_CHANNEL = 0x9006, WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; @@ -1562,12 +1799,16 @@ enum rf_type { RF_UNKNOWN = 0x00, RF_MARLON = 0x01, RF_SPARROW = 0x02, + RF_TALYNA1 = 0x03, + RF_TALYNA2 = 0x04, }; /* WMI_GET_RF_STATUS_EVENTID */ enum board_file_rf_type { BF_RF_MARLON = 0x00, BF_RF_SPARROW = 0x01, + BF_RF_TALYNA1 = 0x02, + BF_RF_TALYNA2 = 0x03, }; /* WMI_GET_RF_STATUS_EVENTID */ @@ -1607,6 +1848,7 @@ enum baseband_type { BASEBAND_SPARROW_M_C0 = 0x06, BASEBAND_SPARROW_M_D0 = 0x07, BASEBAND_TALYN_M_A0 = 0x08, + BASEBAND_TALYN_M_B0 = 0x09, }; /* WMI_GET_BASEBAND_TYPE_EVENTID */ @@ -1651,7 +1893,11 @@ struct wmi_ready_event { u8 numof_additional_mids; /* rfc read calibration result. 5..15 */ u8 rfc_read_calib_result; - u8 reserved[3]; + /* Max associated STAs supported by FW in AP mode (default 0 means 8 + * STA) + */ + u8 max_assoc_sta; + u8 reserved[2]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ @@ -1676,8 +1922,12 @@ struct wmi_notify_req_done_event { /* WMI_CONNECT_EVENTID */ struct wmi_connect_event { + /* enum wmi_channel WMI_CHANNEL_1..WMI_CHANNEL_6; for EDMG this is + * the primary channel number + */ u8 channel; - u8 reserved0; + /* enum wmi_channel WMI_CHANNEL_9..WMI_CHANNEL_12 */ + u8 edmg_channel; u8 bssid[WMI_MAC_LEN]; __le16 listen_interval; __le16 beacon_interval; @@ -1766,13 +2016,13 @@ enum wmi_pno_result { }; struct wmi_start_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; struct wmi_stop_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; @@ -1839,9 +2089,17 @@ struct wmi_ba_status_event { /* WMI_DELBA_EVENTID */ struct wmi_delba_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 from_initiator; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_VRING_CFG_DONE_EVENTID */ @@ -1854,9 +2112,17 @@ struct wmi_vring_cfg_done_event { /* WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 status; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */ @@ -1904,6 +2170,9 @@ struct wmi_rx_desc_ring_cfg_done_event { /* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset as it received */ @@ -1911,6 +2180,11 @@ struct wmi_rcp_addba_req_event { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_CFG_RX_CHAIN_DONE_EVENTID */ @@ -2085,6 +2359,13 @@ struct wmi_set_high_power_table_params_event { u8 reserved[3]; } __packed; +/* WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID */ +struct wmi_fixed_scheduling_ul_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures @@ -2433,6 +2714,8 @@ struct wmi_link_maintain_cfg { __le32 bad_beacons_num_threshold; /* SNR limit for bad_beacons_detector */ __le32 bad_beacons_snr_threshold_db; + /* timeout for disassoc response frame in uSec */ + __le32 disconnect_timeout; } __packed; /* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */ @@ -2662,6 +2945,7 @@ enum wmi_tof_session_end_status { WMI_TOF_SESSION_END_FAIL = 0x01, WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02, WMI_TOF_SESSION_END_ABORTED = 0x03, + WMI_TOF_SESSION_END_BUSY = 0x04, }; /* WMI_TOF_SESSION_END_EVENTID */ @@ -3068,7 +3352,40 @@ struct wmi_set_silent_rssi_table_done_event { __le32 table; } __packed; -/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +/* WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID */ +struct wmi_vring_switch_timing_config_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_ASSOC_LIST_RES_EVENTID */ +struct wmi_assoc_sta_info { + u8 mac[WMI_MAC_LEN]; + u8 omni_index_address; + u8 reserved; +} __packed; + +#define WMI_GET_ASSOC_LIST_SIZE (8) + +/* WMI_GET_ASSOC_LIST_RES_EVENTID + * Returns up to MAX_ASSOC_STA_LIST_SIZE associated STAs + */ +struct wmi_get_assoc_list_res_event { + struct wmi_assoc_sta_info assoc_sta_list[WMI_GET_ASSOC_LIST_SIZE]; + /* STA count */ + u8 count; + u8 reserved[3]; +} __packed; + +/* WMI_BF_CONTROL_EVENTID */ +struct wmi_bf_control_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */ struct wmi_command_not_supported_event { /* device id */ u8 mid; @@ -3079,4 +3396,62 @@ struct wmi_command_not_supported_event { __le16 reserved1; } __packed; +/* WMI_TSF_SYNC_CMDID */ +struct wmi_tsf_sync_cmd { + /* The time interval to send announce frame in one BI */ + u8 interval_ms; + /* The mcs to send announce frame */ + u8 mcs; + u8 reserved[6]; +} __packed; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +enum wmi_tsf_sync_status { + WMI_TSF_SYNC_SUCCESS = 0x00, + WMI_TSF_SYNC_FAILED = 0x01, + WMI_TSF_SYNC_REJECTED = 0x02, +}; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +struct wmi_tsf_sync_status_event { + /* enum wmi_tsf_sync_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_CCA_INDICATIONS_EVENTID */ +struct wmi_get_cca_indications_event { + /* wmi_fw_status */ + u8 status; + /* CCA-Energy Detect in percentage over last BI (0..100) */ + u8 cca_ed_percent; + /* Averaged CCA-Energy Detect in percent over number of BIs (0..100) */ + u8 cca_ed_avg_percent; + /* NAV percent over last BI (0..100) */ + u8 nav_percent; + /* Averaged NAV percent over number of BIs (0..100) */ + u8 nav_avg_percent; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID */ +struct wmi_set_cca_indications_bi_avg_num_cmd { + /* set the number of bis to average cca_ed (0..255) */ + u8 bi_number; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID */ +struct wmi_set_cca_indications_bi_avg_num_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_INTERNAL_FW_SET_CHANNEL */ +struct wmi_internal_fw_set_channel_event { + u8 channel_num; + u8 reserved[3]; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ diff --git a/drivers/net/wireless/cnss_genl/Kconfig b/drivers/net/wireless/cnss_genl/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..f1b8a586ec90e692549bdc498ad5488a7b8d1103 --- /dev/null +++ b/drivers/net/wireless/cnss_genl/Kconfig @@ -0,0 +1,7 @@ +config CNSS_GENL + tristate "CNSS Generic Netlink Socket Driver" + ---help--- + This module creates generic netlink family "CLD80211". This can be + used by cld driver and userspace utilities to communicate over + netlink sockets. This module creates different multicast groups to + facilitate the same. diff --git a/drivers/net/wireless/cnss_genl/Makefile b/drivers/net/wireless/cnss_genl/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9431c9e596bb82962a1b6fe46d4399ccfbd840b1 --- /dev/null +++ b/drivers/net/wireless/cnss_genl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CNSS_GENL) := cnss_nl.o diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c new file mode 100644 index 0000000000000000000000000000000000000000..d49691548596d7b7e32c92d84ad2ef7855a90b72 --- /dev/null +++ b/drivers/net/wireless/cnss_genl/cnss_nl.c @@ -0,0 +1,209 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#define CLD80211_GENL_NAME "cld80211" + +#define CLD80211_MULTICAST_GROUP_SVC_MSGS "svc_msgs" +#define CLD80211_MULTICAST_GROUP_HOST_LOGS "host_logs" +#define CLD80211_MULTICAST_GROUP_FW_LOGS "fw_logs" +#define CLD80211_MULTICAST_GROUP_PER_PKT_STATS "per_pkt_stats" +#define CLD80211_MULTICAST_GROUP_DIAG_EVENTS "diag_events" +#define CLD80211_MULTICAST_GROUP_FATAL_EVENTS "fatal_events" +#define CLD80211_MULTICAST_GROUP_OEM_MSGS "oem_msgs" + +static const struct genl_multicast_group nl_mcgrps[] = { + [CLD80211_MCGRP_SVC_MSGS] = { .name = + CLD80211_MULTICAST_GROUP_SVC_MSGS}, + [CLD80211_MCGRP_HOST_LOGS] = { .name = + CLD80211_MULTICAST_GROUP_HOST_LOGS}, + [CLD80211_MCGRP_FW_LOGS] = { .name = + CLD80211_MULTICAST_GROUP_FW_LOGS}, + [CLD80211_MCGRP_PER_PKT_STATS] = { .name = + CLD80211_MULTICAST_GROUP_PER_PKT_STATS}, + [CLD80211_MCGRP_DIAG_EVENTS] = { .name = + CLD80211_MULTICAST_GROUP_DIAG_EVENTS}, + [CLD80211_MCGRP_FATAL_EVENTS] = { .name = + CLD80211_MULTICAST_GROUP_FATAL_EVENTS}, + [CLD80211_MCGRP_OEM_MSGS] = { .name = + CLD80211_MULTICAST_GROUP_OEM_MSGS}, +}; + +struct cld_ops { + cld80211_cb cb; + void *cb_ctx; +}; + +struct cld80211_nl_data { + struct cld_ops cld_ops[CLD80211_MAX_COMMANDS]; +}; + +static struct cld80211_nl_data nl_data; + +static inline struct cld80211_nl_data *get_local_ctx(void) +{ + return &nl_data; +} + +static struct genl_ops nl_ops[CLD80211_MAX_COMMANDS]; + +/* policy for the attributes */ +static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = { + [CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED }, + [CLD80211_ATTR_DATA] = { .type = NLA_BINARY, + .len = CLD80211_MAX_NL_DATA }, + [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY, + .len = CLD80211_MAX_NL_DATA }, +}; + +static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, + struct genl_info *info) +{ + u8 cmd_id = ops->cmd; + struct cld80211_nl_data *nl = get_local_ctx(); + + if (cmd_id < 1 || cmd_id > CLD80211_MAX_COMMANDS) { + pr_err("CLD80211: Command Not supported: %u\n", cmd_id); + return -EOPNOTSUPP; + } + info->user_ptr[0] = nl->cld_ops[cmd_id - 1].cb; + info->user_ptr[1] = nl->cld_ops[cmd_id - 1].cb_ctx; + + return 0; +} + +/* The netlink family */ +static struct genl_family cld80211_fam __ro_after_init = { + .name = CLD80211_GENL_NAME, + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ + .maxattr = CLD80211_ATTR_MAX, + .netnsok = true, + .pre_doit = cld80211_pre_doit, + .post_doit = NULL, + .module = THIS_MODULE, + .ops = nl_ops, + .n_ops = ARRAY_SIZE(nl_ops), + .mcgrps = nl_mcgrps, + .n_mcgrps = ARRAY_SIZE(nl_mcgrps), +}; + +int register_cld_cmd_cb(u8 cmd_id, cld80211_cb func, void *cb_ctx) +{ + struct cld80211_nl_data *nl = get_local_ctx(); + + pr_debug("CLD80211: Registering command: %d\n", cmd_id); + if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) { + pr_debug("CLD80211: invalid command: %d\n", cmd_id); + return -EINVAL; + } + + nl->cld_ops[cmd_id - 1].cb = func; + nl->cld_ops[cmd_id - 1].cb_ctx = cb_ctx; + + return 0; +} +EXPORT_SYMBOL(register_cld_cmd_cb); + +int deregister_cld_cmd_cb(u8 cmd_id) +{ + struct cld80211_nl_data *nl = get_local_ctx(); + + pr_debug("CLD80211: De-registering command: %d\n", cmd_id); + if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) { + pr_debug("CLD80211: invalid command: %d\n", cmd_id); + return -EINVAL; + } + + nl->cld_ops[cmd_id - 1].cb = NULL; + nl->cld_ops[cmd_id - 1].cb_ctx = NULL; + + return 0; +} +EXPORT_SYMBOL(deregister_cld_cmd_cb); + +struct genl_family *cld80211_get_genl_family(void) +{ + return &cld80211_fam; +} +EXPORT_SYMBOL(cld80211_get_genl_family); + +static int cld80211_doit(struct sk_buff *skb, struct genl_info *info) +{ + cld80211_cb cld_cb; + void *cld_ctx; + + cld_cb = info->user_ptr[0]; + + if (!cld_cb) { + pr_err("CLD80211: Not supported\n"); + return -EOPNOTSUPP; + } + cld_ctx = info->user_ptr[1]; + + if (info->attrs[CLD80211_ATTR_VENDOR_DATA]) { + cld_cb(nla_data(info->attrs[CLD80211_ATTR_VENDOR_DATA]), + nla_len(info->attrs[CLD80211_ATTR_VENDOR_DATA]), + cld_ctx, info->snd_portid); + } else { + pr_err("CLD80211: No CLD80211_ATTR_VENDOR_DATA\n"); + return -EINVAL; + } + return 0; +} + +static int __cld80211_init(void) +{ + int err, i; + + memset(&nl_ops[0], 0, sizeof(nl_ops)); + + pr_info("CLD80211: Initializing\n"); + for (i = 0; i < CLD80211_MAX_COMMANDS; i++) { + nl_ops[i].cmd = i + 1; + nl_ops[i].doit = cld80211_doit; + nl_ops[i].flags = GENL_ADMIN_PERM; + nl_ops[i].policy = cld80211_policy; + } + + err = genl_register_family(&cld80211_fam); + if (err) { + pr_err("CLD80211: Failed to register cld80211 family: %d\n", + err); + } + + return err; +} + +static void __cld80211_exit(void) +{ + genl_unregister_family(&cld80211_fam); +} + +static int __init cld80211_init(void) +{ + return __cld80211_init(); +} + +static void __exit cld80211_exit(void) +{ + __cld80211_exit(); +} + +module_init(cld80211_init); +module_exit(cld80211_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CNSS generic netlink module"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 1610722b8099dc54f8ecbb8552e0db47a8c1a87f..747eef82cefd9673c093e921f5b269f1a5f83273 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -174,7 +176,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { - int sec_idx, idx; + int sec_idx, idx, ret; u32 offset = 0; /* @@ -201,17 +203,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, */ if (sec_idx >= image->num_sec - 1) { IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(fwrt); - return -EINVAL; + ret = -EINVAL; + goto err; } /* copy the CSS block to the dram */ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx); + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { + IWL_ERR(fwrt, "CSS block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, - fwrt->fw_paging_db[0].fw_paging_size); + image->sec[sec_idx].len); dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, @@ -232,6 +240,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (block->fw_paging_size > image->sec[sec_idx].len - offset) { + IWL_ERR(fwrt, + "Paging: paging size is larger than remaining data in block %d\n", + idx); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, block->fw_paging_size); @@ -242,19 +258,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", - fwrt->fw_paging_db[idx].fw_paging_size, - idx); + block->fw_paging_size, idx); + + offset += block->fw_paging_size; - offset += fwrt->fw_paging_db[idx].fw_paging_size; + if (offset > image->sec[sec_idx].len) { + IWL_ERR(fwrt, + "Paging: offset goes over section size\n"); + ret = -EINVAL; + goto err; + } } /* copy the last paging block */ if (fwrt->num_of_pages_in_last_blk > 0) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + if (image->sec[sec_idx].len - offset > block->fw_paging_size) { + IWL_ERR(fwrt, + "Paging: last block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(block->fw_paging_block), image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + image->sec[sec_idx].len - offset); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, @@ -266,6 +295,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, } return 0; + +err: + iwl_free_fw_paging(fwrt); + return ret; } static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index 26e114f6c2e41b05dd2b759816e6c0896374fca8..ce35eca4fc92ba8fceb0590ebeb02a5fc6d60285 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig @@ -67,3 +67,11 @@ config NFC_NQ This enables the NFC driver for NQx based devices. This is for i2c connected version. NCI protocol logic resides in the usermode and it has no other NFC dependencies. + +config NTAG_NQ + tristate "QTI NTAG Driver for NTAG communication" + depends on I2C + help + This enables NTAG driver for NTx based devices. + NTAG is NFC tags that combine passive NFC interface with contact i2c interface. + This is for i2c connected version. diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile index ad8b12d222cd986309f0bcead821ac9463ad2a11..d8d45190599ebbcae79406c8414fa9a44ec2ebe7 100644 --- a/drivers/nfc/Makefile +++ b/drivers/nfc/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_NFC_NXP_NCI) += nxp-nci/ obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5/ obj-$(CONFIG_NFC_ST95HF) += st95hf/ obj-$(CONFIG_NFC_NQ) += nq-nci.o +obj-$(CONFIG_NTAG_NQ) += nq-ntag.o diff --git a/drivers/nfc/nq-ntag.c b/drivers/nfc/nq-ntag.c new file mode 100644 index 0000000000000000000000000000000000000000..4d1c0861f5e99a74d5692e9c4139821d283c574c --- /dev/null +++ b/drivers/nfc/nq-ntag.c @@ -0,0 +1,472 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nq-ntag.h" + +struct nqntag_platform_data { + unsigned int ntagfd_gpio; +}; + +const static struct of_device_id msm_match_table[] = { + {.compatible = "qcom,nq-ntag"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, msm_match_table); + +struct nqntag_dev { + wait_queue_head_t fd_wq; + struct mutex fd_mutex; + struct i2c_client *client; + dev_t devno; + struct class *nqntag_class; + struct device *nqntag_device; + struct cdev c_dev; + bool irq_enabled; + bool irq_wake_up; + spinlock_t irq_enabled_lock; + char offset; + unsigned int ntagfd_gpio; + enum of_gpio_flags fdflag; + /* read buffer*/ + size_t kbuflen; + u8 *kbuf; +}; + +/** + * nqntag_irq_state() + * + * Based on state enable/disable FD interrupt + * + * Return: void + */ +static void nqntag_irq_state(struct nqntag_dev *nqntag_dev, unsigned int state) +{ + unsigned long flags; + + spin_lock_irqsave(&nqntag_dev->irq_enabled_lock, flags); + if (state == FD_DISABLE) { + if (nqntag_dev->irq_enabled) { + disable_irq_nosync(nqntag_dev->client->irq); + nqntag_dev->irq_enabled = false; + } + } else { + if (!nqntag_dev->irq_enabled) { + nqntag_dev->irq_enabled = true; + enable_irq(nqntag_dev->client->irq); + } + } + spin_unlock_irqrestore(&nqntag_dev->irq_enabled_lock, flags); +} + +static irqreturn_t nqntag_dev_irq_handler(int irq, void *dev_id) +{ + struct nqntag_dev *nqntag_dev = dev_id; + + if (device_may_wakeup(&nqntag_dev->client->dev)) + pm_wakeup_event(&nqntag_dev->client->dev, WAKEUP_SRC_TIMEOUT); + + nqntag_irq_state(nqntag_dev, FD_DISABLE); + wake_up(&nqntag_dev->fd_wq); + return IRQ_HANDLED; +} + +static ssize_t ntag_read(struct file *file, char __user *buf, size_t count, + loff_t *offset) +{ + char *readdata; + int ret = 0; + size_t tmpcount = 1; + struct nqntag_dev *nqntag_dev; + char *bufaddr = NULL; + + if (!file || !file->private_data) + return -ENODATA; + nqntag_dev = file->private_data; + if (nqntag_dev->offset < NTAG_MIN_OFFSET || + nqntag_dev->offset >= NTAG_USER_MEM_SPACE_MAX_OFFSET) { + return -EAGAIN; + } + bufaddr = &nqntag_dev->offset; + ret = i2c_master_send(nqntag_dev->client, bufaddr, tmpcount); + if (ret < 0) { + dev_err(&nqntag_dev->client->dev, + "%s: failed to write %d\n", __func__, ret); + return -EIO; + } + /* count+1 to store NULL byte */ + readdata = kzalloc(count + 1, GFP_KERNEL); + if (readdata == NULL) + return -ENOMEM; + ret = i2c_master_recv(nqntag_dev->client, readdata, count); + if (ret >= 0) + ret = copy_to_user(buf, readdata, count) ? -EFAULT : ret; + kfree(readdata); + return ret; +} + +static ssize_t ntag_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + int ret = 0; + char *writedata; + struct nqntag_dev *nqntag_dev; + + if (!file || !file->private_data) + return -ENODATA; + nqntag_dev = file->private_data; + if (nqntag_dev->offset < NTAG_MIN_OFFSET || + nqntag_dev->offset >= NTAG_USER_MEM_SPACE_MAX_OFFSET) { + return -EAGAIN; + } + /* count+2 to store Offset and NULL byte */ + writedata = kzalloc(count + 2, GFP_KERNEL); + if (writedata == NULL) + return -ENOMEM; + writedata[0] = nqntag_dev->offset; + if (copy_from_user(&writedata[1], buf, count)) { + dev_err(&nqntag_dev->client->dev, "Failed to copy from user\n"); + kfree(writedata); + return -EFAULT; + } + ret = i2c_master_send(nqntag_dev->client, writedata, count + 1); + if (ret != (count + 1)) { + dev_err(&nqntag_dev->client->dev, + "%s: failed to write %d\n", __func__, ret); + kfree(writedata); + return -EIO; + } + kfree(writedata); + return count; +} + +static int ntag_open(struct inode *inode, struct file *filp) +{ + struct nqntag_dev *nqntag_dev = container_of(inode->i_cdev, + struct nqntag_dev, c_dev); + + filp->private_data = nqntag_dev; + dev_dbg(&nqntag_dev->client->dev, + "%s: %d,%d\n", __func__, imajor(inode), iminor(inode)); + return 0; +} + +/** + * ntag_ioctl_fd_state() + * @filp: pointer to the file descriptor + * @arg: mode that we want to move to + * + * Device power control. Depending on the arg value, device moves to + * different states + * (arg = 0): FD_DISABLE + * (arg = 1): FD_ENABLE + * + * Return: -ENOIOCTLCMD if arg is not supported + */ +static int ntag_ioctl_fd_state(struct file *filp, unsigned long arg) +{ + int r = 0, ret = 0; + struct nqntag_dev *nqntag_dev = filp->private_data; + int irq_gpio_val = 0; + + if (arg == 0) { + /* Disabling FD interrupt */ + nqntag_irq_state(nqntag_dev, FD_DISABLE); + return ret; + } else if (arg == 1) { + /* Enable FD interrupt and wait for RF field detection*/ + nqntag_irq_state(nqntag_dev, FD_ENABLE); + mutex_lock(&nqntag_dev->fd_mutex); + irq_gpio_val = gpio_get_value(nqntag_dev->ntagfd_gpio); + dev_dbg(&nqntag_dev->client->dev, + "%s: READ GPIO_VAL: %d/n", __func__, irq_gpio_val); + if (!irq_gpio_val ^ nqntag_dev->fdflag) { + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto err; + } + while (1) { + r = 0; + nqntag_irq_state(nqntag_dev, FD_ENABLE); + irq_gpio_val = gpio_get_value( + nqntag_dev->ntagfd_gpio); + if (!irq_gpio_val ^ nqntag_dev->fdflag) + r = wait_event_interruptible( + nqntag_dev->fd_wq, + !nqntag_dev->irq_enabled); + if (r) { + nqntag_irq_state(nqntag_dev, + FD_DISABLE); + ret = -EAGAIN; + goto err; + } else { + break; + } + } + } + } else { + ret = -EINVAL; + } +err: + mutex_unlock(&nqntag_dev->fd_mutex); + return ret; +} + +static long ntag_ioctl(struct file *pfile, unsigned int cmd, + unsigned long arg) +{ + long r = 0; + struct nqntag_dev *nqntag_dev; + + if (!pfile || !pfile->private_data) + return -ENODATA; + nqntag_dev = pfile->private_data; + switch (cmd) { + case NTAG_FD_STATE: + r = ntag_ioctl_fd_state(pfile, arg); + break; + case NTAG_SET_OFFSET: + nqntag_dev->offset = (char)arg; + break; + default: + r = -ENOIOCTLCMD; + } + return r; +} + +static const struct file_operations ntag_dev_fops = { + .llseek = no_llseek, + .read = ntag_read, + .write = ntag_write, + .open = ntag_open, + .unlocked_ioctl = ntag_ioctl, +}; + +static int nqntag_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int r = 0, irqn = 0; + struct nqntag_platform_data *pdata; + struct nqntag_dev *nqntag_dev; + enum of_gpio_flags fdflag; + + dev_dbg(&client->dev, "%s: enter\n", __func__); + pdata = devm_kzalloc(&client->dev, + sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + r = -ENOMEM; + goto err_probe; + } + pdata->ntagfd_gpio = of_get_named_gpio( + client->dev.of_node, "qcom,nq-ntagfd", 0); + if ((!gpio_is_valid(pdata->ntagfd_gpio))) { + r = -EINVAL; + goto err_probe; + } + r = of_get_named_gpio_flags( + client->dev.of_node, "qcom,nq-ntagfd", 0, &fdflag); + if (r < 0) + goto err_probe; + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__); + r = -ENODEV; + goto err_probe; + } + nqntag_dev = devm_kzalloc(&client->dev, sizeof(*nqntag_dev), + GFP_KERNEL); + if (nqntag_dev == NULL) { + r = -ENOMEM; + goto err_probe; + } + nqntag_dev->client = client; + nqntag_dev->fdflag = fdflag; + nqntag_dev->kbuflen = MAX_BUFFER_SIZE; + nqntag_dev->kbuf = devm_kzalloc(&client->dev, MAX_BUFFER_SIZE, + GFP_KERNEL); + if (!nqntag_dev->kbuf) { + r = -ENOMEM; + goto err_probe; + } + r = devm_gpio_request(&client->dev, pdata->ntagfd_gpio, + "ntagfd_gpio"); + if (r) { + r = -ENOMEM; + goto err_probe; + } + r = gpio_direction_input(pdata->ntagfd_gpio); + if (r) { + dev_err(&client->dev, + "%s: unable to set direction for fd gpio [%d]\n", + __func__, pdata->ntagfd_gpio); + r = -EINVAL; + goto err_probe; + } + irqn = gpio_to_irq(pdata->ntagfd_gpio); + if (irqn < 0) { + r = -EINVAL; + goto err_probe; + } + client->irq = irqn; + nqntag_dev->ntagfd_gpio = pdata->ntagfd_gpio; + /* init mutex and wait queues */ + init_waitqueue_head(&nqntag_dev->fd_wq); + mutex_init(&nqntag_dev->fd_mutex); + spin_lock_init(&nqntag_dev->irq_enabled_lock); + r = alloc_chrdev_region(&nqntag_dev->devno, 0, DEV_COUNT, DEVICE_NAME); + if (r < 0) { + dev_err(&client->dev, + "%s: failed to alloc chrdev region\n", __func__); + goto err_probe; + } + nqntag_dev->nqntag_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(nqntag_dev->nqntag_class)) { + dev_err(&client->dev, + "%s: failed to register device class\n", __func__); + r = -EINVAL; + goto err_class_create; + } + cdev_init(&nqntag_dev->c_dev, &ntag_dev_fops); + r = cdev_add(&nqntag_dev->c_dev, nqntag_dev->devno, DEV_COUNT); + if (r < 0) { + dev_err(&client->dev, "%s: failed to add cdev\n", __func__); + goto err_cdev_add; + } + nqntag_dev->nqntag_device = device_create(nqntag_dev->nqntag_class, + NULL, nqntag_dev->devno, nqntag_dev, + DEVICE_NAME); + if (IS_ERR(nqntag_dev->nqntag_device)) { + dev_err(&client->dev, + "%s: failed to create the device\n", __func__); + r = -EINVAL; + goto err_device_create; + } + /* NTAG_INT IRQ */ + nqntag_dev->irq_enabled = true; + r = devm_request_irq(&client->dev, client->irq, nqntag_dev_irq_handler, + IRQ_TYPE_EDGE_FALLING, client->name, nqntag_dev); + if (r) { + dev_err(&client->dev, "%s: request_irq failed\n", __func__); + goto err_request_irq_failed; + } + nqntag_irq_state(nqntag_dev, FD_DISABLE); + device_init_wakeup(&client->dev, true); + i2c_set_clientdata(client, nqntag_dev); + nqntag_dev->irq_wake_up = false; + return 0; + +err_request_irq_failed: + device_destroy(nqntag_dev->nqntag_class, nqntag_dev->devno); +err_device_create: + cdev_del(&nqntag_dev->c_dev); +err_cdev_add: + class_destroy(nqntag_dev->nqntag_class); +err_class_create: + unregister_chrdev_region(nqntag_dev->devno, DEV_COUNT); +err_probe: + dev_err(&client->dev, "%s: probing NQ NTAG failed ret: %d\n", + __func__, r); + return r; +} + +static int nqntag_remove(struct i2c_client *client) +{ + struct nqntag_dev *nqntag_dev; + + nqntag_dev = i2c_get_clientdata(client); + device_destroy(nqntag_dev->nqntag_class, nqntag_dev->devno); + cdev_del(&nqntag_dev->c_dev); + class_destroy(nqntag_dev->nqntag_class); + unregister_chrdev_region(nqntag_dev->devno, DEV_COUNT); + return 0; +} + +#ifdef CONFIG_PM +/* + * power management + */ +static int nqntag_suspend(struct device *device) +{ + struct i2c_client *client = to_i2c_client(device); + struct nqntag_dev *nqntag_dev = i2c_get_clientdata(client); + + nqntag_irq_state(nqntag_dev, FD_ENABLE); + if (device_may_wakeup(&client->dev)) { + if (!enable_irq_wake(client->irq)) + nqntag_dev->irq_wake_up = true; + } + return 0; +} + +static int nqntag_resume(struct device *device) +{ + struct i2c_client *client = to_i2c_client(device); + struct nqntag_dev *nqntag_dev = i2c_get_clientdata(client); + + nqntag_irq_state(nqntag_dev, FD_DISABLE); + if (device_may_wakeup(&client->dev) && nqntag_dev->irq_wake_up) { + if (!disable_irq_wake(client->irq)) + nqntag_dev->irq_wake_up = false; + } + return 0; +} +#endif /* CONFIG_PM */ + +static const struct i2c_device_id nqntag_id[] = { + {"nqntag-i2c", 0}, + {}, +}; +static const struct dev_pm_ops ntag_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(nqntag_suspend, + nqntag_resume) +}; + +static struct i2c_driver nqntag = { + .id_table = nqntag_id, + .probe = nqntag_probe, + .remove = nqntag_remove, + .driver = { + .name = "nq-ntag", + .of_match_table = msm_match_table, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .pm = &ntag_pm_ops, + }, +}; + +static int __init nqntag_dev_init(void) +{ + return i2c_add_driver(&nqntag); +} +module_init(nqntag_dev_init); + +static void __exit nqntag_dev_exit(void) +{ + i2c_del_driver(&nqntag); +} +module_exit(nqntag_dev_exit); + +MODULE_DESCRIPTION("NTAG nqntag"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/nfc/nq-ntag.h b/drivers/nfc/nq-ntag.h new file mode 100644 index 0000000000000000000000000000000000000000..e1e9814dffc7960a1830c4e3483137e60f6325b9 --- /dev/null +++ b/drivers/nfc/nq-ntag.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __NQ_NTAG_H +#define __NQ_NTAG_H + +#include + +#define DEV_COUNT 1 +#define DEVICE_NAME "nq-ntag" +#define CLASS_NAME "nqntag" +#define FD_DISABLE 1 +#define FD_ENABLE 0 +#define MAX_BUFFER_SIZE (320) +#define WAKEUP_SRC_TIMEOUT (2000) +#define NTAG_MIN_OFFSET 0 +#define NTAG_USER_MEM_SPACE_MAX_OFFSET 56 + +#endif diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index e153e8b64bb8bdd527cd4681c7cb4605d06d3f8e..d5553c47014fade81a4f461903b3cb6c4372ccf5 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -62,6 +62,9 @@ struct pn533_usb_phy { struct urb *out_urb; struct urb *in_urb; + struct urb *ack_urb; + u8 *ack_buffer; + struct pn533 *priv; }; @@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) struct pn533_usb_phy *phy = dev->phy; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ - int rc; - phy->out_urb->transfer_buffer = (u8 *)ack; - phy->out_urb->transfer_buffer_length = sizeof(ack); - rc = usb_submit_urb(phy->out_urb, flags); + if (!phy->ack_buffer) { + phy->ack_buffer = kmemdup(ack, sizeof(ack), flags); + if (!phy->ack_buffer) + return -ENOMEM; + } - return rc; + phy->ack_urb->transfer_buffer = phy->ack_buffer; + phy->ack_urb->transfer_buffer_length = sizeof(ack); + return usb_submit_urb(phy->ack_urb, flags); } static int pn533_usb_send_frame(struct pn533 *dev, @@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) /* Power on th reader (CCID cmd) */ u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, 0, 0, 0, 0, 0, 0, 3, 0, 0}; + char *buffer; + int transferred; int rc; void *cntx; struct pn533_acr122_poweron_rdr_arg arg; dev_dbg(&phy->udev->dev, "%s\n", __func__); + buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL); + if (!buffer) + return -ENOMEM; + init_completion(&arg.done); cntx = phy->in_urb->context; /* backup context */ phy->in_urb->complete = pn533_acr122_poweron_rdr_resp; phy->in_urb->context = &arg; - phy->out_urb->transfer_buffer = cmd; - phy->out_urb->transfer_buffer_length = sizeof(cmd); - print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, cmd, sizeof(cmd), false); - rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); - if (rc) { + rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), + &transferred, 0); + kfree(buffer); + if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, "Reader power on cmd error %d\n", rc); return rc; @@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface, phy->in_urb = usb_alloc_urb(0, GFP_KERNEL); phy->out_urb = usb_alloc_urb(0, GFP_KERNEL); + phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!phy->in_urb || !phy->out_urb) + if (!phy->in_urb || !phy->out_urb || !phy->ack_urb) goto error; usb_fill_bulk_urb(phy->in_urb, phy->udev, @@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface, usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_send_complete, phy); - + usb_fill_bulk_urb(phy->ack_urb, phy->udev, + usb_sndbulkpipe(phy->udev, out_endpoint), + NULL, 0, pn533_send_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: @@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface, error: usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); + usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); @@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface *interface) usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); + usb_kill_urb(phy->ack_urb); kfree(phy->in_urb->transfer_buffer); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); + usb_free_urb(phy->ack_urb); + kfree(phy->ack_buffer); nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index baf283986a7ec38ea4f2562a43b36e31e1743f95..2fffd42767c7b5ddb398b32c3d5a1ec7e8e1dcc8 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -565,14 +565,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk)->parent; struct nd_region *nd_region = to_nd_region(dev->parent); - const char *pol = nd_region->ro ? "only" : "write"; + int disk_ro = get_disk_ro(disk); - if (nd_region->ro == get_disk_ro(disk)) + /* + * Upgrade to read-only if the region is read-only preserve as + * read-only if the disk is already read-only. + */ + if (disk_ro || nd_region->ro == disk_ro) return 0; - dev_info(dev, "%s read-%s, marking %s read-%s\n", - dev_name(&nd_region->dev), pol, disk->disk_name, pol); - set_disk_ro(disk, nd_region->ro); + dev_info(dev, "%s read-only, marking %s read-only\n", + dev_name(&nd_region->dev), disk->disk_name); + set_disk_ro(disk, 1); return 0; diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 46d6cb1e03bd0b0ea2763b2fd74bb13bf07c44ab..8f845de8a8a2ed276b8ce10deac094638de0c8c5 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -18,7 +18,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" - depends on INFINIBAND && BLOCK + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK select NVME_CORE select NVME_FABRICS select SG_POOL diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dd956311a85a51cb8a988ad756b5e185583dc6e9..38c128f230e7cdf1221e90c4f878f695347a077e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -665,6 +665,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ret = PTR_ERR(meta); goto out_unmap; } + req->cmd_flags |= REQ_INTEGRITY; } } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 740aae51e1c6382f9038a6bc2696ec9da0c24b37..33d060c524e605ebc92d446993e6e0f84548b851 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -587,6 +587,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->transport); opts->transport = p; break; case NVMF_OPT_NQN: @@ -595,6 +596,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->subsysnqn); opts->subsysnqn = p; nqnlen = strlen(opts->subsysnqn); if (nqnlen >= NVMF_NQN_SIZE) { @@ -617,6 +619,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->traddr); opts->traddr = p; break; case NVMF_OPT_TRSVCID: @@ -625,6 +628,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->trsvcid); opts->trsvcid = p; break; case NVMF_OPT_QUEUE_SIZE: @@ -706,6 +710,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -EINVAL; goto out; } + nvmf_host_put(opts->host); opts->host = nvmf_host_add(p); kfree(p); if (!opts->host) { @@ -731,6 +736,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ret = -ENOMEM; goto out; } + kfree(opts->host_traddr); opts->host_traddr = p; break; case NVMF_OPT_HOST_ID: diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 03e4ab65fe777ad3c6a347959fb9cdb2dc5a060a..48d20c2c125650388986048f8051118f494d7867 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP config NVME_TARGET_RDMA tristate "NVMe over Fabrics RDMA target support" - depends on INFINIBAND + depends on INFINIBAND && INFINIBAND_ADDR_TRANS depends on NVME_TARGET help This enables the NVMe RDMA target support, which allows exporting NVMe diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 52a297d5560a5a65b4ad1754db397d8c4a32b59c..0a963b17910528768eaa3562209bb906a3fa1a5c 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -46,10 +46,20 @@ config OF_EARLY_FLATTREE config OF_PROMTREE bool +config OF_KOBJ + bool "Display devicetree in sysfs" + def_bool SYSFS + help + Some embedded platforms have no need to display the devicetree + nodes and properties in sysfs. Disabling this option will save + a small amount of memory, as well as decrease boot time. By + default this option will be enabled if SYSFS is enabled. + # Hardly any platforms need this. It is safe to select, but only do so if you # need it. config OF_DYNAMIC bool "Support for dynamic device trees" if OF_UNITTEST + select OF_KOBJ help On some platforms, the device tree can be manipulated at runtime. While this option is selected automatically on such platforms, you diff --git a/drivers/of/Makefile b/drivers/of/Makefile index 97ad18c5f360baf23edda48a07c3bdd860628e48..700d878b7c6c3bd91d6dbe6c4e815564bb1985a7 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-y = base.o device.o platform.o property.o +obj-$(CONFIG_OF_KOBJ) += kobj.o obj-$(CONFIG_OF_DYNAMIC) += dynamic.o obj-$(CONFIG_OF_FLATTREE) += fdt.o obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o diff --git a/drivers/of/base.c b/drivers/of/base.c index 9e8dca88f071bb4a0e8e2f9743839c7912a9ff0d..48ec9878b79ce8eb0cb93182b20688a9869b2bc7 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -95,108 +95,6 @@ int __weak of_node_to_nid(struct device_node *np) } #endif -#ifndef CONFIG_OF_DYNAMIC -static void of_node_release(struct kobject *kobj) -{ - /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */ -} -#endif /* CONFIG_OF_DYNAMIC */ - -struct kobj_type of_node_ktype = { - .release = of_node_release, -}; - -static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, char *buf, - loff_t offset, size_t count) -{ - struct property *pp = container_of(bin_attr, struct property, attr); - return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); -} - -/* always return newly allocated name, caller must free after use */ -static const char *safe_name(struct kobject *kobj, const char *orig_name) -{ - const char *name = orig_name; - struct kernfs_node *kn; - int i = 0; - - /* don't be a hero. After 16 tries give up */ - while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) { - sysfs_put(kn); - if (name != orig_name) - kfree(name); - name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); - } - - if (name == orig_name) { - name = kstrdup(orig_name, GFP_KERNEL); - } else { - pr_warn("Duplicate name in %s, renamed to \"%s\"\n", - kobject_name(kobj), name); - } - return name; -} - -int __of_add_property_sysfs(struct device_node *np, struct property *pp) -{ - int rc; - - /* Important: Don't leak passwords */ - bool secure = strncmp(pp->name, "security-", 9) == 0; - - if (!IS_ENABLED(CONFIG_SYSFS)) - return 0; - - if (!of_kset || !of_node_is_attached(np)) - return 0; - - sysfs_bin_attr_init(&pp->attr); - pp->attr.attr.name = safe_name(&np->kobj, pp->name); - pp->attr.attr.mode = secure ? 0400 : 0444; - pp->attr.size = secure ? 0 : pp->length; - pp->attr.read = of_node_property_read; - - rc = sysfs_create_bin_file(&np->kobj, &pp->attr); - WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np); - return rc; -} - -int __of_attach_node_sysfs(struct device_node *np) -{ - const char *name; - struct kobject *parent; - struct property *pp; - int rc; - - if (!IS_ENABLED(CONFIG_SYSFS)) - return 0; - - if (!of_kset) - return 0; - - np->kobj.kset = of_kset; - if (!np->parent) { - /* Nodes without parents are new top level trees */ - name = safe_name(&of_kset->kobj, "base"); - parent = NULL; - } else { - name = safe_name(&np->parent->kobj, kbasename(np->full_name)); - parent = &np->parent->kobj; - } - if (!name) - return -ENOMEM; - rc = kobject_add(&np->kobj, parent, "%s", name); - kfree(name); - if (rc) - return rc; - - for_each_property_of_node(np, pp) - __of_add_property_sysfs(np, pp); - - return 0; -} - static struct device_node **phandle_cache; static u32 phandle_cache_mask; @@ -1608,22 +1506,6 @@ int __of_remove_property(struct device_node *np, struct property *prop) return 0; } -void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) -{ - sysfs_remove_bin_file(&np->kobj, &prop->attr); - kfree(prop->attr.attr.name); -} - -void __of_remove_property_sysfs(struct device_node *np, struct property *prop) -{ - if (!IS_ENABLED(CONFIG_SYSFS)) - return; - - /* at early boot, bail here and defer setup to of_init() */ - if (of_kset && of_node_is_attached(np)) - __of_sysfs_remove_bin_file(np, prop); -} - /** * of_remove_property - Remove a property from a node. * @@ -1683,21 +1565,6 @@ int __of_update_property(struct device_node *np, struct property *newprop, return 0; } -void __of_update_property_sysfs(struct device_node *np, struct property *newprop, - struct property *oldprop) -{ - if (!IS_ENABLED(CONFIG_SYSFS)) - return; - - /* At early boot, bail out and defer setup to of_init() */ - if (!of_kset) - return; - - if (oldprop) - __of_sysfs_remove_bin_file(np, oldprop); - __of_add_property_sysfs(np, newprop); -} - /* * of_update_property - Update a property in a node, if the property does * not exist, add it. diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 301b6db2b48db7d5826add0fff76721fdf125b3f..39e8cf7317649d38b3cc6c3610a6032d46ce180c 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -16,6 +16,11 @@ #include "of_private.h" +static struct device_node *kobj_to_device_node(struct kobject *kobj) +{ + return container_of(kobj, struct device_node, kobj); +} + /** * of_node_get() - Increment refcount of a node * @node: Node to inc refcount, NULL is supported to simplify writing of @@ -43,28 +48,6 @@ void of_node_put(struct device_node *node) } EXPORT_SYMBOL(of_node_put); -void __of_detach_node_sysfs(struct device_node *np) -{ - struct property *pp; - - if (!IS_ENABLED(CONFIG_SYSFS)) - return; - - BUG_ON(!of_node_is_initialized(np)); - if (!of_kset) - return; - - /* only remove properties if on sysfs */ - if (of_node_is_attached(np)) { - for_each_property_of_node(np, pp) - __of_sysfs_remove_bin_file(np, pp); - kobject_del(&np->kobj); - } - - /* finally remove the kobj_init ref */ - of_node_put(np); -} - static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); int of_reconfig_notifier_register(struct notifier_block *nb) diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c new file mode 100644 index 0000000000000000000000000000000000000000..23d8f2e1ffea34ec62ba5aa79eeacbf31eb0fb43 --- /dev/null +++ b/drivers/of/kobj.c @@ -0,0 +1,164 @@ +#include +#include + +#include "of_private.h" + +/* true when node is initialized */ +static int of_node_is_initialized(struct device_node *node) +{ + return node && node->kobj.state_initialized; +} + +/* true when node is attached (i.e. present on sysfs) */ +int of_node_is_attached(struct device_node *node) +{ + return node && node->kobj.state_in_sysfs; +} + + +#ifndef CONFIG_OF_DYNAMIC +static void of_node_release(struct kobject *kobj) +{ + /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */ +} +#endif /* CONFIG_OF_DYNAMIC */ + +struct kobj_type of_node_ktype = { + .release = of_node_release, +}; + +static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct property *pp = container_of(bin_attr, struct property, attr); + return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); +} + +/* always return newly allocated name, caller must free after use */ +static const char *safe_name(struct kobject *kobj, const char *orig_name) +{ + const char *name = orig_name; + struct kernfs_node *kn; + int i = 0; + + /* don't be a hero. After 16 tries give up */ + while (i < 16 && name && (kn = sysfs_get_dirent(kobj->sd, name))) { + sysfs_put(kn); + if (name != orig_name) + kfree(name); + name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); + } + + if (name == orig_name) { + name = kstrdup(orig_name, GFP_KERNEL); + } else { + pr_warn("Duplicate name in %s, renamed to \"%s\"\n", + kobject_name(kobj), name); + } + return name; +} + +int __of_add_property_sysfs(struct device_node *np, struct property *pp) +{ + int rc; + + /* Important: Don't leak passwords */ + bool secure = strncmp(pp->name, "security-", 9) == 0; + + if (!IS_ENABLED(CONFIG_SYSFS)) + return 0; + + if (!of_kset || !of_node_is_attached(np)) + return 0; + + sysfs_bin_attr_init(&pp->attr); + pp->attr.attr.name = safe_name(&np->kobj, pp->name); + pp->attr.attr.mode = secure ? 0400 : 0444; + pp->attr.size = secure ? 0 : pp->length; + pp->attr.read = of_node_property_read; + + rc = sysfs_create_bin_file(&np->kobj, &pp->attr); + WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np); + return rc; +} + +void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) +{ + if (!IS_ENABLED(CONFIG_SYSFS)) + return; + + sysfs_remove_bin_file(&np->kobj, &prop->attr); + kfree(prop->attr.attr.name); +} + +void __of_remove_property_sysfs(struct device_node *np, struct property *prop) +{ + /* at early boot, bail here and defer setup to of_init() */ + if (of_kset && of_node_is_attached(np)) + __of_sysfs_remove_bin_file(np, prop); +} + +void __of_update_property_sysfs(struct device_node *np, struct property *newprop, + struct property *oldprop) +{ + /* At early boot, bail out and defer setup to of_init() */ + if (!of_kset) + return; + + if (oldprop) + __of_sysfs_remove_bin_file(np, oldprop); + __of_add_property_sysfs(np, newprop); +} + +int __of_attach_node_sysfs(struct device_node *np) +{ + const char *name; + struct kobject *parent; + struct property *pp; + int rc; + + if (!of_kset) + return 0; + + np->kobj.kset = of_kset; + if (!np->parent) { + /* Nodes without parents are new top level trees */ + name = safe_name(&of_kset->kobj, "base"); + parent = NULL; + } else { + name = safe_name(&np->parent->kobj, kbasename(np->full_name)); + parent = &np->parent->kobj; + } + if (!name) + return -ENOMEM; + rc = kobject_add(&np->kobj, parent, "%s", name); + kfree(name); + if (rc) + return rc; + + for_each_property_of_node(np, pp) + __of_add_property_sysfs(np, pp); + + return 0; +} + +void __of_detach_node_sysfs(struct device_node *np) +{ + struct property *pp; + + BUG_ON(!of_node_is_initialized(np)); + if (!of_kset) + return; + + /* only remove properties if on sysfs */ + if (of_node_is_attached(np)) { + for_each_property_of_node(np, pp) + __of_sysfs_remove_bin_file(np, pp); + kobject_del(&np->kobj); + } + + /* finally remove the kobj_init ref */ + of_node_put(np); +} + diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index f2111674e45c60d3cbc083a455c2064d2467ae91..3b0b3a5c7c6e5992a1072ce5f04e1b89b31f6b1c 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -35,12 +35,6 @@ extern struct mutex of_mutex; extern struct list_head aliases_lookup; extern struct kset *of_kset; - -static inline struct device_node *kobj_to_device_node(struct kobject *kobj) -{ - return container_of(kobj, struct device_node, kobj); -} - #if defined(CONFIG_OF_DYNAMIC) extern int of_property_notify(int action, struct device_node *np, struct property *prop, struct property *old_prop); @@ -55,6 +49,29 @@ static inline int of_property_notify(int action, struct device_node *np, } #endif /* CONFIG_OF_DYNAMIC */ +#if defined(CONFIG_OF_KOBJ) +int of_node_is_attached(struct device_node *node); +int __of_add_property_sysfs(struct device_node *np, struct property *pp); +void __of_remove_property_sysfs(struct device_node *np, struct property *prop); +void __of_update_property_sysfs(struct device_node *np, struct property *newprop, + struct property *oldprop); +int __of_attach_node_sysfs(struct device_node *np); +void __of_detach_node_sysfs(struct device_node *np); +#else +static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp) +{ + return 0; +} +static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {} +static inline void __of_update_property_sysfs(struct device_node *np, + struct property *newprop, struct property *oldprop) {} +static inline int __of_attach_node_sysfs(struct device_node *np) +{ + return 0; +} +static inline void __of_detach_node_sysfs(struct device_node *np) {} +#endif + #if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY) extern void __init unittest_unflatten_overlay_base(void); #else diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 91767d394b7a51395f22f9ebbf6eda250bab481e..3729fd1544b48f64134e1f4a3d55d988946d55f2 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -24,6 +24,7 @@ #include #include #include +#include #define MAX_RESERVED_REGIONS 32 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; @@ -54,8 +55,10 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, } *res_base = base; - if (nomap) + if (nomap) { + kmemleak_ignore_phys(base); return memblock_remove(base, size); + } return 0; } #else diff --git a/drivers/of/of_slimbus.c b/drivers/of/of_slimbus.c index 2b3d2401f61e00f30e06154d0d4dabeadaaa4eb9..41f64faf4599da1ec217365131cb1a4784ff87aa 100644 --- a/drivers/of/of_slimbus.c +++ b/drivers/of/of_slimbus.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2017-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,7 +29,7 @@ int of_register_slim_devices(struct slim_controller *ctrl) if (!ctrl->dev.of_node) return -EINVAL; - for_each_child_of_node(ctrl->dev.of_node, node) { + for_each_available_child_of_node(ctrl->dev.of_node, node) { struct property *prop; struct slim_device *slim; char *name; diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 2a5685662eb0a2797a60ec0281ab5801ed9d5831..5a4963060b8583a8357a15ed3379108e1d1b535e 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -535,6 +535,9 @@ int of_platform_device_destroy(struct device *dev, void *data) if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS)) device_for_each_child(dev, NULL, of_platform_device_destroy); + of_node_clear_flag(dev->of_node, OF_POPULATED); + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); + if (dev->bus == &platform_bus_type) platform_device_unregister(to_platform_device(dev)); #ifdef CONFIG_ARM_AMBA @@ -542,8 +545,6 @@ int of_platform_device_destroy(struct device *dev, void *data) amba_device_unregister(to_amba_device(dev)); #endif - of_node_clear_flag(dev->of_node, OF_POPULATED); - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; } EXPORT_SYMBOL_GPL(of_platform_device_destroy); diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index 8f41c801b0e6c242d534fe2ad1c620cd875aae68..570612256894fda43eeb84b3cd841917c8f7f7dd 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -126,6 +126,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay, goto err_fail; } + if (offset < 0 || offset + sizeof(__be32) > prop->length) { + err = -EINVAL; + goto err_fail; + } + *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle); } diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 168ef0bbabde1d6b1874d97e63ee474f7ec00e62..985a85f281a82ea18d35937352891252dca3faa2 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -164,20 +164,20 @@ static void __init of_unittest_dynamic(void) /* Add a new property - should pass*/ prop->name = "new-property"; prop->value = "new-property-data"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n"); /* Try to add an existing property - should fail */ prop++; prop->name = "new-property"; prop->value = "new-property-data-should-fail"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_add_property(np, prop) != 0, "Adding an existing property should have failed\n"); /* Try to modify an existing property - should pass */ prop->value = "modify-property-data-should-pass"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_update_property(np, prop) == 0, "Updating an existing property should have passed\n"); @@ -185,7 +185,7 @@ static void __init of_unittest_dynamic(void) prop++; prop->name = "modify-property"; prop->value = "modify-missing-property-data-should-pass"; - prop->length = strlen(prop->value); + prop->length = strlen(prop->value) + 1; unittest(of_update_property(np, prop) == 0, "Updating a missing property should have passed\n"); diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c index dc3033cf3c19fec435eab75cfa3f7b158c883929..efc317e7669d8d65a867ab075869a05fb40bf67b 100644 --- a/drivers/pci/dwc/pcie-kirin.c +++ b/drivers/pci/dwc/pcie-kirin.c @@ -490,7 +490,7 @@ static int kirin_pcie_probe(struct platform_device *pdev) return ret; kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, - "reset-gpio", 0); + "reset-gpios", 0); if (kirin_pcie->gpio_id_reset < 0) return -ENODEV; diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index c91662927de0e48e1f831f19e5b329cc0f8b68ed..caea7c618207aae6b2e7c1822ddfa55a724103fa 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -566,6 +566,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); +/* + * There is no good way to get notified from vmbus_onoffer_rescind(), + * so let's use polling here, since this is not a hot path. + */ +static int wait_for_response(struct hv_device *hdev, + struct completion *comp) +{ + while (true) { + if (hdev->channel->rescind) { + dev_warn_once(&hdev->device, "The device is gone.\n"); + return -ENODEV; + } + + if (wait_for_completion_timeout(comp, HZ / 10)) + break; + } + + return 0; +} + /** * devfn_to_wslot() - Convert from Linux PCI slot to Windows * @devfn: The Linux representation of PCI slot @@ -1582,24 +1602,14 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, if (ret) goto error; - wait_for_completion(&comp_pkt.host_event); + if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) + goto error; hpdev->desc = *desc; refcount_set(&hpdev->refs, 1); get_pcichild(hpdev, hv_pcidev_ref_childlist); spin_lock_irqsave(&hbus->device_list_lock, flags); - /* - * When a device is being added to the bus, we set the PCI domain - * number to be the device serial number, which is non-zero and - * unique on the same VM. The serial numbers start with 1, and - * increase by 1 for each device. So device names including this - * can have shorter names than based on the bus instance UUID. - * Only the first device serial number is used for domain, so the - * domain number will not change after the first device is added. - */ - if (list_empty(&hbus->children)) - hbus->sysdata.domain = desc->ser; list_add_tail(&hpdev->list_entry, &hbus->children); spin_unlock_irqrestore(&hbus->device_list_lock, flags); return hpdev; @@ -2075,15 +2085,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) sizeof(struct pci_version_request), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + if (ret) { dev_err(&hdev->device, - "PCI Pass-through VSP failed sending version reqquest: %#x", + "PCI Pass-through VSP failed to request version: %d", ret); goto exit; } - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status >= 0) { pci_protocol_version = pci_protocol_versions[i]; dev_info(&hdev->device, @@ -2292,11 +2303,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev) ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); + if (ret) goto exit; - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -2336,11 +2348,10 @@ static int hv_pci_query_relations(struct hv_device *hdev) ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), 0, VM_PKT_DATA_INBAND, 0); - if (ret) - return ret; + if (!ret) + ret = wait_for_response(hdev, &comp); - wait_for_completion(&comp); - return 0; + return ret; } /** @@ -2410,11 +2421,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev) size_res, (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + if (!ret) + ret = wait_for_response(hdev, &comp_pkt.host_event); if (ret) break; - wait_for_completion(&comp_pkt.host_event); - if (comp_pkt.completion_status < 0) { ret = -EPROTO; dev_err(&hdev->device, diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 06109d40c4ac92168470382ab4ba5e740382b4d4..e7d6cfaf386581a7d89541c89298d235bde752d6 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -134,7 +134,7 @@ struct controller *pcie_init(struct pcie_device *dev); int pcie_init_notification(struct controller *ctrl); int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); -void pcie_enable_notification(struct controller *ctrl); +void pcie_reenable_notification(struct controller *ctrl); int pciehp_power_on_slot(struct slot *slot); void pciehp_power_off_slot(struct slot *slot); void pciehp_get_power_status(struct slot *slot, u8 *status); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 35d84845d5af93769c5062ccf1395c467e29d8fe..1288289cc85d38fa09190586f7743ac1271e5a83 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -297,7 +297,7 @@ static int pciehp_resume(struct pcie_device *dev) ctrl = get_service_data(dev); /* reinitialize the chipset's event detection logic */ - pcie_enable_notification(ctrl); + pcie_reenable_notification(ctrl); slot = ctrl->slot; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index e5d5ce9e30106a8329e423bff084592c201606a7..05832b597e536e3db9744bc4cff9e9661bc5ee80 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -676,7 +676,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) return handled; } -void pcie_enable_notification(struct controller *ctrl) +static void pcie_enable_notification(struct controller *ctrl) { u16 cmd, mask; @@ -714,6 +714,17 @@ void pcie_enable_notification(struct controller *ctrl) pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } +void pcie_reenable_notification(struct controller *ctrl) +{ + /* + * Clear both Presence and Data Link Layer Changed to make sure + * those events still fire after we have re-enabled them. + */ + pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_enable_notification(ctrl); +} + static void pcie_disable_notification(struct controller *ctrl) { u16 mask; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 929d68f744af2bf4f7283f93009aa4de7e7fe36e..ec2911c4ee425e7cde8ee990aa78848cfa96f8e1 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4377,11 +4377,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) * 0xa290-0xa29f PCI Express Root port #{0-16} * 0xa2e7-0xa2ee PCI Express Root port #{17-24} * + * Mobile chipsets are also affected, 7th & 8th Generation + * Specification update confirms ACS errata 22, status no fix: (7th Generation + * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel + * Processor Family I/O for U Quad Core Platforms Specification Update, + * August 2017, Revision 002, Document#: 334660-002)[6] + * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O + * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U + * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7] + * + * 0x9d10-0x9d1b PCI Express Root port #{1-12} + * + * The 300 series chipset suffers from the same bug so include those root + * ports here as well. + * + * 0xa32c-0xa343 PCI Express Root port #{0-24} + * * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html + * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html + * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) { @@ -4391,6 +4409,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) switch (dev->device) { case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ + case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ + case 0xa32c ... 0xa343: /* 300 series */ return true; } diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 6c575244c0fb9b61e6ab185a670494dd4c40f2f0..af9b7005a2bad8c994fdf9650de8d79f3019a3ed 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -178,6 +178,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy) struct device *dev = &qphy->phy->dev; u8 *val; + /* efuse register is optional */ + if (!qphy->cell) + return; + /* * Read efuse register having TUNE2 parameter's high nibble. * If efuse register shows value as 0x0, or if we fail to find diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index b601039d6c69a28d771eff622f0001d70bf84204..c4aa411f5935b7b0275c004a3924ffda3613630a 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) } static int dt_to_map_one_config(struct pinctrl *p, - struct pinctrl_dev *pctldev, + struct pinctrl_dev *hog_pctldev, const char *statename, struct device_node *np_config) { + struct pinctrl_dev *pctldev = NULL; struct device_node *np_pctldev; const struct pinctrl_ops *ops; int ret; @@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p, return -EPROBE_DEFER; } /* If we're creating a hog we can use the passed pctldev */ - if (pctldev && (np_pctldev == p->dev->of_node)) + if (hog_pctldev && (np_pctldev == p->dev->of_node)) { + pctldev = hog_pctldev; break; + } pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 7dc276b59dfa2d0be70851dfa4b242f1d749c767..2569bc7d5920022f094403cb901b50593d321b62 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -161,6 +161,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, val = readl(pctrl->regs + g->ctl_reg); val &= ~mask; val |= i << g->mux_bit; + /* Check if egpio present and enable that feature */ + if (val & BIT(g->egpio_present)) + val |= BIT(g->egpio_enable); + writel(val, pctrl->regs + g->ctl_reg); raw_spin_unlock_irqrestore(&pctrl->lock, flags); @@ -881,16 +885,21 @@ static int msm_gpio_domain_translate(struct irq_domain *d, static int msm_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { - int ret; + int ret = 0; irq_hw_number_t hwirq; - struct irq_fwspec *fwspec = arg; + struct irq_fwspec *fwspec = arg, parent_fwspec; ret = msm_gpio_domain_translate(domain, fwspec, &hwirq, NULL); if (ret) return ret; msm_gpio_domain_set_info(domain, virq, hwirq); - return ret; + + parent_fwspec = *fwspec; + parent_fwspec.fwnode = domain->parent->fwnode; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); } static const struct irq_domain_ops msm_gpio_domain_ops = { @@ -1335,11 +1344,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) chip->parent = pctrl->dev; chip->owner = THIS_MODULE; chip->of_node = pctrl->dev->of_node; - chip->irqchip = &msm_gpio_irq_chip; - chip->irq_handler = handle_fasteoi_irq; - chip->irq_default_type = IRQ_TYPE_NONE; - chip->to_irq = msm_gpiochip_to_irq; - chip->lock_key = NULL; ret = gpiochip_add_data(&pctrl->chip, pctrl); if (ret) { diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 6ae59dc7a62f7c708fdc04ac9e3be08d19d7052a..7eb857d810368483755ed5ce46e65927187f2a3b 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -84,6 +84,8 @@ struct msm_pingroup { unsigned pull_bit:5; unsigned drv_bit:5; + unsigned egpio_enable:5; + unsigned egpio_present:5; unsigned oe_bit:5; unsigned in_bit:5; unsigned out_bit:5; diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c index 33c16f295d318e3566679c7872862bb0a49467ea..f3565102a780049a9cf78b36acf6e64a61f39696 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c @@ -59,6 +59,8 @@ .mux_bit = 2, \ .pull_bit = 0, \ .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ .oe_bit = 9, \ .in_bit = 0, \ .out_bit = 1, \ diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index 92aeea174a5673c61d95fde4aed610539cc113ea..afeb4876ffb2cbf09073dc95a44aa7109a343a62 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c @@ -110,12 +110,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38), EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c), EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40), - EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"), EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44), EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48), EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c), EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50), EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54), + EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"), EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"), EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"), EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"), @@ -635,7 +635,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20), EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24), EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28), - EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"), EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c), EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30), EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34), @@ -646,6 +645,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48), EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c), EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50), + EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"), EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"), EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"), EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"), diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 45350332238404b79df758832ee62ca31e40d3b0..219112d04f12ad6a589f2653665abe40a1d01e4b 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -63,28 +63,6 @@ config USB_BAM USB BAM driver was added to supports SPS Peripheral-to-Peripheral transfers between the USB and other peripheral. -config IPA - tristate "IPA support" - depends on SPS && NET - help - This driver supports the Internet Packet Accelerator (IPA) core. - IPA is a programmable protocol processor HW block. - It is designed to support generic HW processing of UL/DL IP packets - for various use cases independent of radio technology. - The driver support client connection and configuration - for the IPA core. - Kernel and user-space processes can call the IPA driver - to configure IPA core. - -config RMNET_IPA - tristate "IPA RMNET WWAN Network Device" - depends on IPA && MSM_QMI_INTERFACE - help - This WWAN Network Driver implements network stack class device. - It supports Embedded data transfer from A7 to Q6. Configures IPA HW - for RmNet Data Driver and also exchange of QMI messages between - A7 and Q6 IPA-driver. - config GSI bool "GSI support" help @@ -131,7 +109,7 @@ config RMNET_IPA3 config ECM_IPA tristate "STD ECM LAN Driver support" - depends on IPA || IPA3 + depends on IPA3 help Enables LAN between applications processor and a tethered host using the STD ECM protocol. @@ -140,7 +118,7 @@ config ECM_IPA config RNDIS_IPA tristate "RNDIS_IPA Network Interface Driver support" - depends on IPA || IPA3 + depends on IPA3 help Enables LAN between applications processor and a tethered host using the RNDIS protocol. @@ -188,4 +166,10 @@ config SEEMP_CORE a log and rates the actions according to whether a typical user would use the tools. +config IPA_EMULATION + bool "IPA on X86 Linux (IPA emulation support)" + depends on X86 && IPA3 + help + This option is used only when building the X86 version of + the IPA/GSI driver. Never set this when building for ARM. endmenu diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile index 82d6c6299bb809c67365bc62ecf6e6b0ae2bd27c..a14a203a9e295a5f7c9eb18c56b250c776686e16 100644 --- a/drivers/platform/msm/gsi/Makefile +++ b/drivers/platform/msm/gsi/Makefile @@ -1,2 +1,4 @@ gsidbg-$(CONFIG_DEBUG_FS) += gsi_dbg.o obj-$(CONFIG_GSI) += gsi.o gsidbg.o + +obj-$(CONFIG_IPA_EMULATION) += gsi_emulation.o diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index 2b4780b90632185bd6cdddbb48a36e8b99bf15fd..6ca1d32bb05427ce8912500b2e71c952f637b487 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -20,9 +20,10 @@ #include #include "gsi.h" #include "gsi_reg.h" +#include "gsi_emulation.h" #define GSI_CMD_TIMEOUT (5*HZ) -#define GSI_STOP_CMD_TIMEOUT_MS 20 +#define GSI_STOP_CMD_TIMEOUT_MS 50 #define GSI_MAX_CH_LOW_WEIGHT 15 #define GSI_RESET_WA_MIN_SLEEP 1000 @@ -42,6 +43,13 @@ static const struct of_device_id msm_gsi_match[] = { { }, }; + +#if defined(CONFIG_IPA_EMULATION) +static bool running_emulation = true; +#else +static bool running_emulation; +#endif + struct gsi_ctx *gsi_ctx; static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val) @@ -369,6 +377,8 @@ uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr) static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1, uint64_t addr2) { + uint32_t addr_diff; + WARN(addr1 < ctx->base || addr1 >= ctx->end, "address not in range. base 0x%llx end 0x%llx addr 0x%llx\n", ctx->base, ctx->end, addr1); @@ -376,10 +386,11 @@ static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1, "address not in range. base 0x%llx end 0x%llx addr 0x%llx\n", ctx->base, ctx->end, addr2); + addr_diff = (uint32_t)(addr2 - addr1); if (addr1 < addr2) - return (addr2 - addr1) / ctx->elem_sz; + return addr_diff / ctx->elem_sz; else - return (addr2 - addr1 + ctx->len) / ctx->elem_sz; + return (addr_diff + ctx->len) / ctx->elem_sz; } static void gsi_process_chan(struct gsi_xfer_compl_evt *evt, @@ -621,7 +632,7 @@ static void gsi_handle_irq(void) if (!type) break; - GSIDBG_LOW("type %x\n", type); + GSIDBG_LOW("type 0x%x\n", type); if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK) gsi_handle_ch_ctrl(ee); @@ -718,10 +729,10 @@ static uint32_t gsi_get_max_channels(enum gsi_ver ver) break; case GSI_VER_2_5: reg = gsi_readl(gsi_ctx->base + - GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); reg = (reg & - GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> - GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; break; } @@ -775,10 +786,10 @@ static uint32_t gsi_get_max_event_rings(enum gsi_ver ver) break; case GSI_VER_2_5: reg = gsi_readl(gsi_ctx->base + - GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); reg = (reg & - GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> - GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; break; } @@ -856,17 +867,57 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) GSIERR("bad irq specified %u\n", props->irq); return -GSI_STATUS_INVALID_PARAMS; } - - res = devm_request_irq(gsi_ctx->dev, props->irq, + /* + * On a real UE, there are two separate interrupt + * vectors that get directed toward the GSI/IPA + * drivers. They are handled by gsi_isr() and + * (ipa_isr() or ipa3_isr()) respectively. In the + * emulation environment, this is not the case; + * instead, interrupt vectors are routed to the + * emualation hardware's interrupt controller, which + * in turn, forwards a single interrupt to the GSI/IPA + * driver. When the new interrupt vector is received, + * the driver needs to probe the interrupt + * controller's registers so see if one, the other, or + * both interrupts have occurred. Given the above, we + * now need to handle both situations, namely: the + * emulator's and the real UE. + */ + if (running_emulation) { + /* + * New scheme involving the emulator's + * interrupt controller. + */ + res = devm_request_threaded_irq( + gsi_ctx->dev, + props->irq, + /* top half handler to follow */ + emulator_hard_irq_isr, + /* threaded bottom half handler to follow */ + emulator_soft_irq_isr, + IRQF_SHARED, + "emulator_intcntrlr", + gsi_ctx); + } else { + /* + * Traditional scheme used on the real UE. + */ + res = devm_request_irq(gsi_ctx->dev, props->irq, gsi_isr, props->req_clk_cb ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_HIGH, "gsi", gsi_ctx); + } if (res) { - GSIERR("failed to register isr for %u\n", props->irq); + GSIERR( + "failed to register isr for %u\n", + props->irq); return -GSI_STATUS_ERROR; } + GSIDBG( + "succeeded to register isr for %u\n", + props->irq); res = enable_irq_wake(props->irq); if (res) @@ -886,6 +937,41 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) return -GSI_STATUS_RES_ALLOC_FAILURE; } + GSIDBG("GSI base(%pa) mapped to (%pK) with len (0x%lx)\n", + &(props->phys_addr), + gsi_ctx->base, + props->size); + + if (running_emulation) { + GSIDBG("GSI SW ver register value 0x%x\n", + gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_SW_VERSION_OFFS(0))); + gsi_ctx->intcntrlr_mem_size = + props->emulator_intcntrlr_size; + gsi_ctx->intcntrlr_base = + devm_ioremap_nocache( + gsi_ctx->dev, + props->emulator_intcntrlr_addr, + props->emulator_intcntrlr_size); + if (!gsi_ctx->intcntrlr_base) { + GSIERR( + "failed to remap emulator's interrupt controller HW\n"); + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + return -GSI_STATUS_RES_ALLOC_FAILURE; + } + + GSIDBG( + "Emulator's interrupt controller base(%pa) mapped to (%pK) with len (0x%lx)\n", + &(props->emulator_intcntrlr_addr), + gsi_ctx->intcntrlr_base, + props->emulator_intcntrlr_size); + + gsi_ctx->intcntrlr_gsi_isr = gsi_isr; + gsi_ctx->intcntrlr_client_isr = + props->emulator_intcntrlr_client_isr; + } + gsi_ctx->per = *props; gsi_ctx->per_registered = true; mutex_init(&gsi_ctx->mlock); @@ -893,19 +979,36 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) atomic_set(&gsi_ctx->num_evt_ring, 0); gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver); if (gsi_ctx->max_ch == 0) { + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); GSIERR("failed to get max channels\n"); return -GSI_STATUS_ERROR; } gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver); if (gsi_ctx->max_ev == 0) { + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); GSIERR("failed to get max event rings\n"); return -GSI_STATUS_ERROR; } + if (gsi_ctx->max_ev > GSI_EVT_RING_MAX) { + GSIERR("max event rings are beyond absolute maximum\n"); + return -GSI_STATUS_ERROR; + } + if (props->mhi_er_id_limits_valid && props->mhi_er_id_limits[0] > (gsi_ctx->max_ev - 1)) { devm_iounmap(gsi_ctx->dev, gsi_ctx->base); - gsi_ctx->base = NULL; + if (running_emulation) + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); GSIERR("MHI event ring start id %u is beyond max %u\n", props->mhi_er_id_limits[0], gsi_ctx->max_ev); @@ -946,6 +1049,22 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) gsi_writel(0, gsi_ctx->base + GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee)); + if (running_emulation) { + /* + * Set up the emulator's interrupt controller... + */ + res = setup_emulator_cntrlr( + gsi_ctx->intcntrlr_base, gsi_ctx->intcntrlr_mem_size); + if (res != 0) { + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + devm_iounmap(gsi_ctx->dev, gsi_ctx->intcntrlr_base); + gsi_ctx->base = gsi_ctx->intcntrlr_base = NULL; + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("setup_emulator_cntrlr() failed\n"); + return res; + } + } + *dev_hdl = (uintptr_t)gsi_ctx; return GSI_STATUS_SUCCESS; @@ -1623,14 +1742,86 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, } EXPORT_SYMBOL(gsi_set_evt_ring_cfg); +static void gsi_program_chan_ctx_qos(struct gsi_chan_props *props, + unsigned int ee) +{ + uint32_t val; + + val = + (((props->low_weight << + GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | + ((props->max_prefetch << + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | + ((props->use_db_eng << + GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & + GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK)); + if (gsi_ctx->per.ver >= GSI_VER_2_0) + val |= ((props->prefetch_mode << + GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT) + & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK); + + gsi_writel(val, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); +} + +static void gsi_program_chan_ctx_qos_v2_5(struct gsi_chan_props *props, + unsigned int ee) +{ + uint32_t val; + + val = + (((props->low_weight << + GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | + ((props->max_prefetch << + GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | + ((props->use_db_eng << + GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK) | + ((props->prefetch_mode << + GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK) | + ((props->empty_lvl_threshold << + GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK)); + + gsi_writel(val, gsi_ctx->base + + GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); +} + static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee, uint8_t erindex) { uint32_t val; + uint32_t prot; + uint32_t prot_msb; + + switch (props->prot) { + case GSI_CHAN_PROT_MHI: + case GSI_CHAN_PROT_XHCI: + case GSI_CHAN_PROT_GPI: + case GSI_CHAN_PROT_XDCI: + prot = props->prot; + prot_msb = 0; + break; + default: + GSIERR("Unsupported protocol %d\n", props->prot); + WARN_ON(1); + return; + } + val = ((prot << + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK); + if (gsi_ctx->per.ver >= GSI_VER_2_5) { + val |= ((prot_msb << + GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT) & + GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK); + } - val = (((props->prot << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT) - & GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK) | - ((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) & + val |= (((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) & GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) | ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) & GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) | @@ -1656,19 +1847,10 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee, gsi_writel(val, gsi_ctx->base + GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee)); - val = (((props->low_weight << GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) & - GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) | - ((props->max_prefetch << - GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) & - GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) | - ((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) & - GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK)); - if (gsi_ctx->per.ver >= GSI_VER_2_0) - val |= ((props->prefetch_mode << - GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_SHFT) - & GSI_EE_n_GSI_CH_k_QOS_USE_ESCAPE_BUF_ONLY_BMSK); - gsi_writel(val, gsi_ctx->base + - GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee)); + if (gsi_ctx->per.ver >= GSI_VER_2_5) + gsi_program_chan_ctx_qos_v2_5(props, ee); + else + gsi_program_chan_ctx_qos(props, ee); } static void gsi_init_chan_ring(struct gsi_chan_props *props, @@ -1781,7 +1963,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, } if (props->evt_ring_hdl != ~0) { - if (props->evt_ring_hdl >= GSI_EVT_RING_MAX) { + if (props->evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1868,8 +2050,6 @@ EXPORT_SYMBOL(gsi_alloc_channel); static void __gsi_write_channel_scratch(unsigned long chan_hdl, union __packed gsi_channel_scratch val) { - uint32_t reg; - gsi_writel(val.data.word1, gsi_ctx->base + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, gsi_ctx->per.ee)); @@ -1879,17 +2059,89 @@ static void __gsi_write_channel_scratch(unsigned long chan_hdl, gsi_writel(val.data.word3, gsi_ctx->base + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, gsi_ctx->per.ee)); + + gsi_writel(val.data.word4, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); +} + +static void __gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch * val) +{ + val->data.word1 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word2 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word3 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + val->data.word4 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, + gsi_ctx->per.ee)); +} + +static union __packed gsi_channel_scratch __gsi_update_mhi_channel_scratch( + unsigned long chan_hdl, struct __packed gsi_mhi_channel_scratch mscr) +{ + union __packed gsi_channel_scratch scr; + /* below sequence is not atomic. assumption is sequencer specific fields * will remain unchanged across this sequence */ - reg = gsi_readl(gsi_ctx->base + + + /* READ */ + scr.data.word1 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word2 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word3 = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + scr.data.word4 = gsi_readl(gsi_ctx->base + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, gsi_ctx->per.ee)); - reg &= 0xFFFF; - reg |= (val.data.word4 & 0xFFFF0000); - gsi_writel(reg, gsi_ctx->base + + + /* UPDATE */ + scr.mhi.mhi_host_wp_addr = mscr.mhi_host_wp_addr; + scr.mhi.assert_bit40 = mscr.assert_bit40; + scr.mhi.polling_configuration = mscr.polling_configuration; + scr.mhi.burst_mode_enabled = mscr.burst_mode_enabled; + scr.mhi.polling_mode = mscr.polling_mode; + scr.mhi.oob_mod_threshold = mscr.oob_mod_threshold; + + if (gsi_ctx->per.ver < GSI_VER_2_5) { + scr.mhi.max_outstanding_tre = mscr.max_outstanding_tre; + scr.mhi.outstanding_threshold = mscr.outstanding_threshold; + } + + /* WRITE */ + gsi_writel(scr.data.word1, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word2, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word3, gsi_ctx->base + + GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl, + gsi_ctx->per.ee)); + + gsi_writel(scr.data.word4, gsi_ctx->base + GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl, gsi_ctx->per.ee)); + + return scr; } int gsi_write_channel_scratch(unsigned long chan_hdl, @@ -1925,6 +2177,71 @@ int gsi_write_channel_scratch(unsigned long chan_hdl, } EXPORT_SYMBOL(gsi_write_channel_scratch); +int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STARTED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + __gsi_read_channel_scratch(chan_hdl, val); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_read_channel_scratch); + +int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr) +{ + struct gsi_chan_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (chan_hdl >= gsi_ctx->max_ch) { + GSIERR("bad params chan_hdl=%lu\n", chan_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED && + gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) { + GSIERR("bad state %d\n", + gsi_ctx->chan[chan_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx = &gsi_ctx->chan[chan_hdl]; + + mutex_lock(&ctx->mlock); + ctx->scratch = __gsi_update_mhi_channel_scratch(chan_hdl, mscr); + mutex_unlock(&ctx->mlock); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_update_mhi_channel_scratch); + int gsi_query_channel_db_addr(unsigned long chan_hdl, uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb) { @@ -2068,6 +2385,20 @@ int gsi_stop_channel(unsigned long chan_hdl) res = wait_for_completion_timeout(&ctx->compl, msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS)); if (res == 0) { + /* + * check channel state here in case the channel is stopped but + * the interrupt was not handled yet. + */ + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl, + gsi_ctx->per.ee)); + ctx->state = (val & + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >> + GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT; + if (ctx->state == GSI_CHAN_STATE_STOPPED) { + res = GSI_STATUS_SUCCESS; + goto free_lock; + } GSIDBG("chan_hdl=%lu timed out\n", chan_hdl); res = -GSI_STATUS_TIMED_OUT; goto free_lock; @@ -2610,7 +2941,7 @@ int gsi_start_xfer(unsigned long chan_hdl) return -GSI_STATUS_UNSUPPORTED_OP; } - if (ctx->state != GSI_CHAN_STATE_STARTED) { + if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) { GSIERR("bad state %d\n", ctx->state); return -GSI_STATUS_UNSUPPORTED_OP; } @@ -2744,21 +3075,27 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) return -GSI_STATUS_UNSUPPORTED_OP; } - spin_lock_irqsave(&gsi_ctx->slock, flags); if (curr == GSI_CHAN_MODE_CALLBACK && mode == GSI_CHAN_MODE_POLL) { + spin_lock_irqsave(&gsi_ctx->slock, flags); __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); + spin_lock_irqsave(&ctx->ring.slock, flags); atomic_set(&ctx->poll_mode, mode); + spin_unlock_irqrestore(&ctx->ring.slock, flags); ctx->stats.callback_to_poll++; } if (curr == GSI_CHAN_MODE_POLL && mode == GSI_CHAN_MODE_CALLBACK) { + spin_lock_irqsave(&ctx->ring.slock, flags); atomic_set(&ctx->poll_mode, mode); + spin_unlock_irqrestore(&ctx->ring.slock, flags); + spin_lock_irqsave(&gsi_ctx->slock, flags); __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0); + spin_unlock_irqrestore(&gsi_ctx->slock, flags); ctx->stats.poll_to_callback++; } - spin_unlock_irqrestore(&gsi_ctx->slock, flags); return GSI_STATUS_SUCCESS; } @@ -2849,7 +3186,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, } EXPORT_SYMBOL(gsi_set_channel_cfg); -static void gsi_configure_ieps(void *base) +static void gsi_configure_ieps(void *base, enum gsi_ver ver) { void __iomem *gsi_base = base; @@ -2859,18 +3196,26 @@ static void gsi_configure_ieps(void *base) gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS); gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS); gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS); - gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS); + gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS); gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS); gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS); gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS); gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS); gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS); gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS); + gsi_writel(14, gsi_base + GSI_GSI_IRAM_PTR_EV_DB_OFFS); + gsi_writel(15, gsi_base + GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS); + gsi_writel(16, gsi_base + GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS); + + if (ver >= GSI_VER_2_5) + gsi_writel(17, + gsi_base + GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS); } static void gsi_configure_bck_prs_matrix(void *base) { - void __iomem *gsi_base = base; + void __iomem *gsi_base = (void __iomem *) base; + /* * For now, these are default values. In the future, GSI FW image will * produce optimized back-pressure values based on the FW image. @@ -2908,10 +3253,20 @@ static void gsi_configure_bck_prs_matrix(void *base) } int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, - phys_addr_t per_base_addr) + phys_addr_t per_base_addr, enum gsi_ver ver) { void __iomem *gsi_base; + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) { + GSIERR("Incorrect version %d\n", ver); + return -GSI_STATUS_ERROR; + } + gsi_base = ioremap_nocache(gsi_base_addr, gsi_size); if (!gsi_base) { GSIERR("ioremap failed\n"); @@ -2921,7 +3276,7 @@ int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, gsi_writel(per_base_addr, gsi_base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS); gsi_configure_bck_prs_matrix((void *)gsi_base); - gsi_configure_ieps(gsi_base); + gsi_configure_ieps(gsi_base, ver); iounmap(gsi_base); return 0; @@ -2962,7 +3317,6 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) | ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) & GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK)); - gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); } else { value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & GSI_GSI_CFG_GSI_ENABLE_BMSK) | @@ -2972,9 +3326,13 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & GSI_GSI_CFG_UC_IS_MCS_BMSK)); - gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); } + /* GSI frequency is peripheral frequency divided by 3 (2+1) */ + if (ver >= GSI_VER_2_5) + value |= ((2 << GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT) & + GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); iounmap(gsi_base); return 0; @@ -2983,13 +3341,46 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) EXPORT_SYMBOL(gsi_enable_fw); void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, - unsigned long *size) + unsigned long *size, enum gsi_ver ver) { - if (base_offset) - *base_offset = GSI_GSI_INST_RAM_n_OFFS(0); + unsigned long maxn; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return; + } + + switch (ver) { + case GSI_VER_1_0: + case GSI_VER_1_2: + case GSI_VER_1_3: + maxn = GSI_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_0: + maxn = GSI_V2_0_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_2: + maxn = GSI_V2_2_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_2_5: + maxn = GSI_V2_5_GSI_INST_RAM_n_MAXn; + break; + case GSI_VER_ERR: + case GSI_VER_MAX: + default: + GSIERR("GSI version is not supported %d\n", ver); + WARN_ON(1); + return; + } if (size) - *size = GSI_GSI_INST_RAM_n_WORD_SZ * - (GSI_GSI_INST_RAM_n_MAXn + 1); + *size = GSI_GSI_INST_RAM_n_WORD_SZ * (maxn + 1); + + if (base_offset) { + if (ver < GSI_VER_2_5) + *base_offset = GSI_GSI_INST_RAM_n_OFFS(0); + else + *base_offset = GSI_V2_5_GSI_INST_RAM_n_OFFS(0); + } } EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size); @@ -3093,16 +3484,45 @@ static struct platform_driver msm_gsi_driver = { }, }; +static struct platform_device *pdev; + /** * Module Init. */ static int __init gsi_init(void) { - pr_debug("%s\n", __func__); - return platform_driver_register(&msm_gsi_driver); -} + int ret; + + pr_debug("gsi_init\n"); + + ret = platform_driver_register(&msm_gsi_driver); + if (ret < 0) + goto out; + + if (running_emulation) { + pdev = platform_device_register_simple("gsi", -1, NULL, 0); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + platform_driver_unregister(&msm_gsi_driver); + goto out; + } + } +out: + return ret; +} arch_initcall(gsi_init); +/* + * Module exit. + */ +static void __exit gsi_exit(void) +{ + if (running_emulation && pdev) + platform_device_unregister(pdev); + platform_driver_unregister(&msm_gsi_driver); +} +module_exit(gsi_exit); + MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic Software Interface (GSI)"); diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h index 9b5dead02fd50fcce5e8c0dc5fd31721822a393e..870d8d615ba39ffbf0b9dd4a8ecf8f70942c9134 100644 --- a/drivers/platform/msm/gsi/gsi.h +++ b/drivers/platform/msm/gsi/gsi.h @@ -18,10 +18,18 @@ #include #include #include +#include #include +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "gsi_emulation_stubs.h" +#endif + #define GSI_CHAN_MAX 31 -#define GSI_EVT_RING_MAX 23 +#define GSI_EVT_RING_MAX 24 #define GSI_NO_EVT_ERINDEX 31 #define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) @@ -204,6 +212,13 @@ struct gsi_ctx { struct completion gen_ee_cmd_compl; void *ipc_logbuf; void *ipc_logbuf_low; + /* + * The following used only on emulation systems. + */ + void __iomem *intcntrlr_base; + u32 intcntrlr_mem_size; + irq_handler_t intcntrlr_gsi_isr; + irq_handler_t intcntrlr_client_isr; }; enum gsi_re_type { diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c index bd288b9ce9493670b7c4dddbb2a75b6a5e18973b..dab8dc29e20b75a3cb763a8eae478dd8efa20901 100644 --- a/drivers/platform/msm/gsi/gsi_dbg.c +++ b/drivers/platform/msm/gsi/gsi_dbg.c @@ -220,8 +220,13 @@ static ssize_t gsi_dump_ch(struct file *file, GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1, gsi_ctx->per.ee)); TERR("CH%2d REFWP 0x%x\n", arg1, val); - val = gsi_readl(gsi_ctx->base + - GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + if (gsi_ctx->per.ver >= GSI_VER_2_5) { + val = gsi_readl(gsi_ctx->base + + GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + } else { + val = gsi_readl(gsi_ctx->base + + GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee)); + } TERR("CH%2d QOS 0x%x\n", arg1, val); val = gsi_readl(gsi_ctx->base + GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee)); diff --git a/drivers/platform/msm/gsi/gsi_emulation.c b/drivers/platform/msm/gsi/gsi_emulation.c new file mode 100644 index 0000000000000000000000000000000000000000..adaaaaa47f2acdae4ee121c9c6544edd88a018de --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_emulation.c @@ -0,0 +1,233 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "gsi_emulation.h" + +/* + * ***************************************************************************** + * The following used to set up the EMULATION interrupt controller... + * ***************************************************************************** + */ +int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size) +{ + uint32_t val, ver, intrCnt, rangeCnt, range; + + val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT); + + intrCnt = val & 0xFFFF; + ver = (val >> 16) & 0xFFFF; + rangeCnt = intrCnt / 32; + + GSIDBG( + "CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n", + val, intrCnt, ver, rangeCnt); + + /* + * Verify the interrupt controller version + */ + if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) { + GSIERR( + "Error: invalid interrupt controller version 0x%x\n", + ver); + return -GSI_STATUS_INVALID_PARAMS; + } + + /* + * Verify the interrupt count + * + * NOTE: intrCnt must be at least one block and multiple of 32 + */ + if ((intrCnt % 32) != 0) { + GSIERR( + "Invalid interrupt count read from HW 0x%04x\n", + intrCnt); + return -GSI_STATUS_ERROR; + } + + /* + * Calculate number of ranges used, each range handles 32 int lines + */ + if (rangeCnt > DEO_IC_MAX_RANGE_CNT) { + GSIERR( + "SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n", + rangeCnt, + DEO_IC_MAX_RANGE_CNT); + return -GSI_STATUS_ERROR; + } + + /* + * Let's take the last register offset minus the first + * register offset (ie. range) and compare it to the interrupt + * controller's dtsi defined memory size. The range better + * fit within the size. + */ + val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT; + if (val > intcntrlr_mem_size) { + GSIERR( + "Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n", + val, intcntrlr_mem_size); + return -GSI_STATUS_ERROR; + } + + /* + * The following will disable the emulators interrupt controller, + * so that we can config it... + */ + GSIDBG("Writing GE_INT_MASTER_ENABLE\n"); + gsi_emu_writel( + 0x0, + intcntrlr_base + GE_INT_MASTER_ENABLE); + + /* + * Init register maps of all ranges + */ + for (range = 0; range < rangeCnt; range++) { + /* + * Disable all int sources by setting all enable clear bits + */ + GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range); + gsi_emu_writel( + 0xFFFFFFFF, + intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range)); + + /* + * Clear all raw statuses + */ + GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range); + gsi_emu_writel( + 0xFFFFFFFF, + intcntrlr_base + GE_INT_CLEAR_n(range)); + + /* + * Init all int types + */ + GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range); + gsi_emu_writel( + 0x0, + intcntrlr_base + GE_INT_TYPE_n(range)); + } + + /* + * The following tells the interrupt controller to interrupt us + * when it sees interupts from ipa and/or gsi. + * + * Interrupts: + * =================================================================== + * DUT0 [ 63 : 16 ] + * ipa_irq [ 3 : 0 ] <---HERE + * ipa_gsi_bam_irq [ 7 : 4 ] <---HERE + * ipa_bam_apu_sec_error_irq [ 8 ] + * ipa_bam_apu_non_sec_error_irq [ 9 ] + * ipa_bam_xpu2_msa_intr [ 10 ] + * ipa_vmidmt_nsgcfgirpt [ 11 ] + * ipa_vmidmt_nsgirpt [ 12 ] + * ipa_vmidmt_gcfgirpt [ 13 ] + * ipa_vmidmt_girpt [ 14 ] + * bam_xpu3_qad_non_secure_intr_sp [ 15 ] + */ + GSIDBG("Writing GE_INT_ENABLE_n(0)\n"); + gsi_emu_writel( + 0x00FF, /* See <---HERE above */ + intcntrlr_base + GE_INT_ENABLE_n(0)); + + /* + * The following will enable the IC post config... + */ + GSIDBG("Writing GE_INT_MASTER_ENABLE\n"); + gsi_emu_writel( + 0x1, + intcntrlr_base + GE_INT_MASTER_ENABLE); + + return 0; +} + +/* + * ***************************************************************************** + * The following for EMULATION hard irq... + * ***************************************************************************** + */ +irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt) +{ + struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt; + + uint32_t val; + + val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS); + + /* + * If bit zero is set, interrupt is for us, hence return IRQ_NONE + * when it's not set... + */ + if (!(val & 0x00000001)) + return IRQ_NONE; + + /* + * The following will mask (ie. turn off) future interrupts from + * the emulator's interrupt controller. It wil stay this way until + * we turn back on...which will be done in the bottom half + * (ie. emulator_soft_irq_isr)... + */ + gsi_emu_writel( + 0x0, + gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE); + + return IRQ_WAKE_THREAD; +} + +/* + * ***************************************************************************** + * The following for EMULATION soft irq... + * ***************************************************************************** + */ +irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt) +{ + struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt; + + irqreturn_t retVal = IRQ_HANDLED; + uint32_t val; + + val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0)); + + GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val); + + if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) { + GSIDBG("Got gsi interrupt\n"); + retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt); + } + + if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) { + GSIDBG("Got ipa interrupt\n"); + retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0); + } + + /* + * The following will clear the interrupts... + */ + gsi_emu_writel( + 0xFFFFFFFF, + gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0)); + + /* + * The following will unmask (ie. turn on) future interrupts from + * the emulator's interrupt controller... + */ + gsi_emu_writel( + 0x1, + gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE); + + return retVal; +} diff --git a/drivers/platform/msm/gsi/gsi_emulation.h b/drivers/platform/msm/gsi/gsi_emulation.h new file mode 100644 index 0000000000000000000000000000000000000000..cd9a5c0368c301ef7ca964d21573989996d95dd5 --- /dev/null +++ b/drivers/platform/msm/gsi/gsi_emulation.h @@ -0,0 +1,192 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#if !defined(_GSI_EMULATION_H_) +# define _GSI_EMULATION_H_ + +# include + +# include "gsi.h" +# include "gsi_reg.h" +# include "gsi_emulation_stubs.h" + +# define gsi_emu_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) + +# define CNTRLR_BASE 0 + +/* + * The following file contains definitions and declarations that are + * germane only to the IPA emulation system, which is run from an X86 + * environment. Declaration's for non-X86 (ie. arm) are merely stubs + * to facilitate compile and link. + * + * Interrupt controller registers. + * Descriptions taken from the EMULATION interrupt controller SWI. + * - There is only one Master Enable register + * - Each group of 32 interrupt lines (range) is controlled by 8 registers, + * which are consecutive in memory: + * GE_INT_ENABLE_n + * GE_INT_ENABLE_CLEAR_n + * GE_INT_ENABLE_SET_n + * GE_INT_TYPE_n + * GE_IRQ_STATUS_n + * GE_RAW_STATUS_n + * GE_INT_CLEAR_n + * GE_SOFT_INT_n + * - After the above 8 registers, there are the registers of the next + * group (range) of 32 interrupt lines, and so on. + */ + +/** @brief The interrupt controller version and interrupt count register. + * Specifies interrupt controller version (upper 16 bits) and the + * number of interrupt lines supported by HW (lower 16 bits). + */ +# define GE_INT_CTL_VER_CNT \ + (CNTRLR_BASE + 0x0000) + +/** @brief Enable or disable physical IRQ output signal to the system, + * not affecting any status registers. + * + * 0x0 : DISABLE IRQ output disabled + * 0x1 : ENABLE IRQ output enabled + */ +# define GE_INT_OUT_ENABLE \ + (CNTRLR_BASE + 0x0004) + +/** @brief The IRQ master enable register. + * Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable. + */ +# define GE_INT_MASTER_ENABLE \ + (CNTRLR_BASE + 0x0008) + +# define GE_INT_MASTER_STATUS \ + (CNTRLR_BASE + 0x000C) + +/** @brief Each bit disables (bit=0, default) or enables (bit=1) the + * corresponding interrupt source + */ +# define GE_INT_ENABLE_n(n) \ + (CNTRLR_BASE + 0x0010 + 0x20 * (n)) + +/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE. + * Does nothing for bit=0 + */ +# define GE_INT_ENABLE_CLEAR_n(n) \ + (CNTRLR_BASE + 0x0014 + 0x20 * (n)) + +/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE. + * Does nothing for bit=0 + */ +# define GE_INT_ENABLE_SET_n(n) \ + (CNTRLR_BASE + 0x0018 + 0x20 * (n)) + +/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input + * detection logic for each corresponding interrupt source + */ +# define GE_INT_TYPE_n(n) \ + (CNTRLR_BASE + 0x001C + 0x20 * (n)) + +/** @brief Shows the interrupt sources captured in RAW_STATUS that have been + * steered to irq_n by INT_SELECT. Interrupts must also be enabled by + * INT_ENABLE and MASTER_ENABLE. Read only register. + * Bit values: 1=active, 0=inactive + */ +# define GE_IRQ_STATUS_n(n) \ + (CNTRLR_BASE + 0x0020 + 0x20 * (n)) + +/** @brief Shows the interrupt sources that have been latched by the input + * logic of the Interrupt Controller. Read only register. + * Bit values: 1=active, 0=inactive + */ +# define GE_RAW_STATUS_n(n) \ + (CNTRLR_BASE + 0x0024 + 0x20 * (n)) + +/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS. + * Does nothing for bit=0 + */ +# define GE_INT_CLEAR_n(n) \ + (CNTRLR_BASE + 0x0028 + 0x20 * (n)) + +/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS. + * Does nothing for bit=0. + * @note Only functional for edge detected interrupts + */ +# define GE_SOFT_INT_n(n) \ + (CNTRLR_BASE + 0x002C + 0x20 * (n)) + +/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt + * lines. If HW is extended considerably, increase this value + */ +# define DEO_IC_MAX_RANGE_CNT 8 + +/** @brief Size of the registers of one range in memory, in bytes */ +# define DEO_IC_RANGE_MEM_SIZE 32 /* SWI: 8 registers, no gaps */ + +/** @brief Minimal Interrupt controller HW version */ +# define DEO_IC_INT_CTL_VER_MIN 0x0102 + + +#if defined(CONFIG_IPA_EMULATION) /* declarations to follow */ + +/* + * ***************************************************************************** + * The following used to set up the EMULATION interrupt controller... + * ***************************************************************************** + */ +int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size); + +/* + * ***************************************************************************** + * The following for EMULATION hard irq... + * ***************************************************************************** + */ +irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt); + +/* + * ***************************************************************************** + * The following for EMULATION soft irq... + * ***************************************************************************** + */ +irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt); + +# else /* #if !defined(CONFIG_IPA_EMULATION) then definitions to follow */ + +static inline int setup_emulator_cntrlr( + void __iomem *intcntrlr_base, + u32 intcntrlr_mem_size) +{ + return 0; +} + +static inline irqreturn_t emulator_hard_irq_isr( + int irq, + void *ctxt) +{ + return IRQ_NONE; +} + +static inline irqreturn_t emulator_soft_irq_isr( + int irq, + void *ctxt) +{ + return IRQ_HANDLED; +} + +# endif /* #if defined(CONFIG_IPA_EMULATION) */ + +#endif /* #if !defined(_GSI_EMULATION_H_) */ diff --git a/arch/arm64/boot/dts/qcom/sm6150-cdp-overlay.dts b/drivers/platform/msm/gsi/gsi_emulation_stubs.h similarity index 67% rename from arch/arm64/boot/dts/qcom/sm6150-cdp-overlay.dts rename to drivers/platform/msm/gsi/gsi_emulation_stubs.h index 36c64e4d5610a9914d214f439b9952bf9b7c3555..dd9d0df5d1bf3d75dca42ef7f01100653dcc6275 100644 --- a/arch/arm64/boot/dts/qcom/sm6150-cdp-overlay.dts +++ b/drivers/platform/msm/gsi/gsi_emulation_stubs.h @@ -10,15 +10,11 @@ * GNU General Public License for more details. */ -/dts-v1/; -/plugin/; +#if !defined(_GSI_EMULATION_STUBS_H_) +# define _GSI_EMULATION_STUBS_H_ -#include +# include +# define __iormb() rmb() /* used in gsi.h */ +# define __iowmb() wmb() /* used in gsi.h */ -#include "sm6150-cdp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. SM6150 CDP"; - compatible = "qcom,sm6150-cdp", "qcom,sm6150", "qcom,cdp"; - qcom,board-id = <1 0>; -}; +#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */ diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h index 377e39ad22ab350d1e211da267ee9182f5619f88..ec2e886fb2d709f1678f21c315327aa07f52f93e 100644 --- a/drivers/platform/msm/gsi/gsi_reg.h +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -16,7 +16,8 @@ #define GSI_GSI_CFG_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000000) -#define GSI_GSI_CFG_RMSK 0xf +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_BMSK 0xf00 +#define GSI_V2_5_GSI_CFG_SLEEP_CLK_DIV_SHFT 0x8 #define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20 #define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5 #define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10 @@ -35,18 +36,6 @@ #define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1 #define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0 -#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000008) -#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff -#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_BMSK 0xffffffff -#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_SHFT 0x0 - -#define GSI_GSI_ZEROS_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000010) -#define GSI_GSI_ZEROS_RMSK 0xffffffff -#define GSI_GSI_ZEROS_ZEROS_BMSK 0xffffffff -#define GSI_GSI_ZEROS_ZEROS_SHFT 0x0 - #define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000018) #define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff @@ -59,136 +48,6 @@ #define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff #define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0 -#define GSI_GSI_MOQA_CFG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000030) -#define GSI_GSI_MOQA_CFG_RMSK 0xffffff -#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_BMSK 0xff0000 -#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_SHFT 0x10 -#define GSI_GSI_MOQA_CFG_CLIENT_OORD_BMSK 0xff00 -#define GSI_GSI_MOQA_CFG_CLIENT_OORD_SHFT 0x8 -#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_BMSK 0xff -#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_SHFT 0x0 - -#define GSI_GSI_REE_CFG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000038) -#define GSI_GSI_REE_CFG_RMSK 0xff01 -#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00 -#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8 -#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1 -#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0 - -#define GSI_GSI_SHRAM_WR_WRR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000050) -#define GSI_GSI_SHRAM_WR_WRR_RMSK 0xffff -#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_BMSK 0xf000 -#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_SHFT 0xc -#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_BMSK 0xf00 -#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_SHFT 0x8 -#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_BMSK 0xf0 -#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_SHFT 0x4 -#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_BMSK 0xf -#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_SHFT 0x0 - -#define GSI_GSI_SHRAM_RD_WRR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000058) -#define GSI_GSI_SHRAM_RD_WRR_RMSK 0xffffff -#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_BMSK 0xf00000 -#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_SHFT 0x14 -#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_BMSK 0xf0000 -#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_SHFT 0x10 -#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_BMSK 0xf000 -#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_SHFT 0xc -#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_BMSK 0xf00 -#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_SHFT 0x8 -#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_BMSK 0xf0 -#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_SHFT 0x4 -#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_BMSK 0xf -#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_SHFT 0x0 - -#define GSI_GSI_CGC_CTRL_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000060) -#define GSI_GSI_CGC_CTRL_RMSK 0x3f -#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800 -#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb -#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400 -#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa -#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200 -#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9 -#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100 -#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8 -#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80 -#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7 -#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40 -#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6 -#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20 -#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5 -#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10 -#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4 -#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8 -#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_SHFT 0x3 -#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_BMSK 0x4 -#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_SHFT 0x2 -#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_BMSK 0x2 -#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_SHFT 0x1 -#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_BMSK 0x1 -#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_SHFT 0x0 - -#define GSI_GSI_MSI_CACHEATTR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000080) -#define GSI_GSI_MSI_CACHEATTR_RMSK 0x3f -#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_BMSK 0x30 -#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_SHFT 0x4 -#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_BMSK 0x8 -#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_SHFT 0x3 -#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_BMSK 0x4 -#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_SHFT 0x2 -#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_BMSK 0x2 -#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_SHFT 0x1 -#define GSI_GSI_MSI_CACHEATTR_ASHARED_BMSK 0x1 -#define GSI_GSI_MSI_CACHEATTR_ASHARED_SHFT 0x0 - -#define GSI_GSI_EVENT_CACHEATTR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000084) -#define GSI_GSI_EVENT_CACHEATTR_RMSK 0x3f -#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_BMSK 0x30 -#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_SHFT 0x4 -#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_BMSK 0x8 -#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_SHFT 0x3 -#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_BMSK 0x4 -#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_SHFT 0x2 -#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_BMSK 0x2 -#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_SHFT 0x1 -#define GSI_GSI_EVENT_CACHEATTR_ASHARED_BMSK 0x1 -#define GSI_GSI_EVENT_CACHEATTR_ASHARED_SHFT 0x0 - -#define GSI_GSI_DATA_CACHEATTR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000088) -#define GSI_GSI_DATA_CACHEATTR_RMSK 0x3f -#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_BMSK 0x30 -#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_SHFT 0x4 -#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_BMSK 0x8 -#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_SHFT 0x3 -#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_BMSK 0x4 -#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_SHFT 0x2 -#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_BMSK 0x2 -#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_SHFT 0x1 -#define GSI_GSI_DATA_CACHEATTR_ASHARED_BMSK 0x1 -#define GSI_GSI_DATA_CACHEATTR_ASHARED_SHFT 0x0 - -#define GSI_GSI_TRE_CACHEATTR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000090) -#define GSI_GSI_TRE_CACHEATTR_RMSK 0x3f -#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_BMSK 0x30 -#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_SHFT 0x4 -#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_BMSK 0x8 -#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_SHFT 0x3 -#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_BMSK 0x4 -#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_SHFT 0x2 -#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_BMSK 0x2 -#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_SHFT 0x1 -#define GSI_GSI_TRE_CACHEATTR_ASHARED_BMSK 0x1 -#define GSI_GSI_TRE_CACHEATTR_ASHARED_SHFT 0x0 - #define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x000000a0) #define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047 @@ -475,123 +334,6 @@ #define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1 #define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0 -#define GSI_IC_INT_WEIGHT_REE_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000100) -#define GSI_IC_INT_WEIGHT_REE_RMSK 0xfff -#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_BMSK 0xf00 -#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_SHFT 0x8 -#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_BMSK 0xf0 -#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_SHFT 0x4 -#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_EVT_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000104) -#define GSI_IC_INT_WEIGHT_EVT_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_INT_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000108) -#define GSI_IC_INT_WEIGHT_INT_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_CSR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000010c) -#define GSI_IC_INT_WEIGHT_CSR_RMSK 0xffffff -#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_BMSK 0xf00000 -#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_SHFT 0x14 -#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_BMSK 0xf0000 -#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_SHFT 0x10 -#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_BMSK 0xf000 -#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_SHFT 0xc -#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_BMSK 0xf00 -#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_SHFT 0x8 -#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_BMSK 0xf0 -#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_SHFT 0x4 -#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_TLV_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000110) -#define GSI_IC_INT_WEIGHT_TLV_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_TIMER_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000114) -#define GSI_IC_INT_WEIGHT_TIMER_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_DB_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000118) -#define GSI_IC_INT_WEIGHT_DB_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000011c) -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_RMSK 0xff -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_BMSK 0xf0 -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_SHFT 0x4 -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_SHFT 0x0 - -#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000120) -#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_RMSK 0xf -#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_BMSK 0xf -#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_SHFT 0x0 - -#define GSI_GSI_MANAGER_EE_QOS_n_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n)) -#define GSI_GSI_MANAGER_EE_QOS_n_RMSK 0x1f1f03 -#define GSI_GSI_MANAGER_EE_QOS_n_MAXn 3 -#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_BMSK 0x1f0000 -#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_SHFT 0x10 -#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_BMSK 0x1f00 -#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_SHFT 0x8 -#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_BMSK 0x3 -#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000200) -#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000204) -#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000208) -#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000020c) -#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000240) -#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - -#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000244) -#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff -#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK 0xffff -#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT 0x0 - #define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000400) #define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff @@ -604,6 +346,12 @@ #define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff #define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0 +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x00000408) +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_RMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_BMSK 0xfff +#define GSI_V2_5_GSI_IRAM_PTR_TLV_CH_NOT_FULL_IRAM_PTR_SHFT 0x0 + #define GSI_GSI_IRAM_PTR_CH_DB_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000418) #define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff @@ -682,17 +430,22 @@ #define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff #define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0 -#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_OFFS \ +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x0000044c) -#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_RMSK 0xfff -#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_IRAM_PTR_INT_MOD_STOPPED_IRAM_PTR_SHFT 0x0 +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_SHFT 0x0 #define GSI_GSI_INST_RAM_n_WORD_SZ 0x4 #define GSI_GSI_INST_RAM_n_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) +#define GSI_V2_5_GSI_INST_RAM_n_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001b000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n)) #define GSI_GSI_INST_RAM_n_RMSK 0xffffffff #define GSI_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143 +#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095 +#define GSI_V2_5_GSI_INST_RAM_n_MAXn 8191 #define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000 #define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18 #define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000 @@ -702,342 +455,16 @@ #define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff #define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0 -#define GSI_GSI_SHRAM_n_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n)) -#define GSI_GSI_SHRAM_n_RMSK 0xffffffff -#define GSI_GSI_SHRAM_n_MAXn 1023 -#define GSI_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff -#define GSI_GSI_SHRAM_n_SHRAM_SHFT 0x0 - -#define GSI_GSI_TEST_BUS_SEL_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001000) -#define GSI_GSI_TEST_BUS_SEL_RMSK 0xff -#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff -#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0 - -#define GSI_GSI_TEST_BUS_REG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001008) -#define GSI_GSI_TEST_BUS_REG_RMSK 0xffffffff -#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff -#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0 - -#define GSI_GSI_DEBUG_BUSY_REG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001010) -#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff -#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80 -#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7 -#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40 -#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6 -#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20 -#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5 -#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10 -#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4 -#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8 -#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3 -#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4 -#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2 -#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2 -#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1 -#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1 -#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0 - -#define GSI_GSI_DEBUG_COUNTER_CFGn_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n)) -#define GSI_GSI_DEBUG_COUNTER_CFGn_RMSK 0x3ffffff7 -#define GSI_GSI_DEBUG_COUNTER_CFGn_MAXn 7 -#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_BMSK 0x3ff80000 -#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_SHFT 0x13 -#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_BMSK 0x7f000 -#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_SHFT 0xc -#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_BMSK 0xf00 -#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_SHFT 0x8 -#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_BMSK 0xf0 -#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_SHFT 0x4 -#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_BMSK 0x4 -#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_SHFT 0x2 -#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_BMSK 0x2 -#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_SHFT 0x1 -#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_BMSK 0x1 -#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_SHFT 0x0 - -#define GSI_GSI_DEBUG_COUNTERn_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n)) -#define GSI_GSI_DEBUG_COUNTERn_RMSK 0xffff -#define GSI_GSI_DEBUG_COUNTERn_MAXn 7 -#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff -#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0 - -#define GSI_GSI_DEBUG_PC_FROM_SW_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001040) -#define GSI_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff -#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0 - -#define GSI_GSI_DEBUG_SW_STALL_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001044) -#define GSI_GSI_DEBUG_SW_STALL_RMSK 0x1 -#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1 -#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0 - -#define GSI_GSI_DEBUG_PC_FOR_DEBUG_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001048) -#define GSI_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff -#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_SEL_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001050) -#define GSI_GSI_DEBUG_QSB_LOG_SEL_RMSK 0xffff01 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_BMSK 0xff0000 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_SHFT 0x10 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_BMSK 0xff00 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_SHFT 0x8 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_BMSK 0x1 -#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_CLR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001058) -#define GSI_GSI_DEBUG_QSB_LOG_CLR_RMSK 0x1 -#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_BMSK 0x1 -#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001060) -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK 0x1000000 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK 0xff0000 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1 -#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_0_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001064) -#define GSI_GSI_DEBUG_QSB_LOG_0_RMSK 0xffffffff -#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_BMSK 0xffffffff -#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_1_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00001068) -#define GSI_GSI_DEBUG_QSB_LOG_1_RMSK 0xfff7ffff -#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_BMSK 0xf0000000 -#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_SHFT 0x1c -#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_BMSK 0xf000000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_SHFT 0x18 -#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_BMSK 0xf00000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_SHFT 0x14 -#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_BMSK 0x40000 -#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_SHFT 0x12 -#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_BMSK 0x20000 -#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_SHFT 0x11 -#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_BMSK 0x10000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_SHFT 0x10 -#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_BMSK 0x8000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_SHFT 0xf -#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_BMSK 0x4000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_SHFT 0xe -#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_BMSK 0x2000 -#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_SHFT 0xd -#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_BMSK 0x1000 -#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_SHFT 0xc -#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_BMSK 0xfff -#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_2_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000106c) -#define GSI_GSI_DEBUG_QSB_LOG_2_RMSK 0xffff -#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_BMSK 0xf000 -#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_SHFT 0xc -#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_BMSK 0xfff -#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_SHFT 0x0 - -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n)) -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_RMSK 0xffffffff -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MAXn 3 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_BMSK 0xf8000000 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_SHFT 0x1b -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_BMSK 0x7c00000 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_SHFT 0x16 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_BMSK 0x200000 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_SHFT 0x15 -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_BMSK 0x1fffff -#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_SHFT 0x0 - -#define GSI_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n)) -#define GSI_GSI_DEBUG_SW_RF_n_WRITE_RMSK 0xffffffff -#define GSI_GSI_DEBUG_SW_RF_n_WRITE_MAXn 31 -#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_BMSK 0xffffffff -#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_SHFT 0x0 - -#define GSI_GSI_DEBUG_SW_RF_n_READ_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n)) -#define GSI_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff -#define GSI_GSI_DEBUG_SW_RF_n_READ_MAXn 31 -#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff -#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0 - -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(k, n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + 0x4 * (k)) -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_RMSK 0x3f -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXk 30 -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXn 3 -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20 -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5 -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f -#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0 - -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(k, n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + 0x4 * (k)) -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 15 -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3 -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20 -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5 -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f -#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0 - -#define GSI_GSI_UC_SRC_IRQ_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000500) -#define GSI_GSI_UC_SRC_IRQ_RMSK 0xf -#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_BMSK 0x8 -#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_SHFT 0x3 -#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 -#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 -#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_BMSK 0x2 -#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_SHFT 0x1 -#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_BMSK 0x1 -#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_SHFT 0x0 - -#define GSI_GSI_UC_SRC_IRQ_MSK_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000504) -#define GSI_GSI_UC_SRC_IRQ_MSK_RMSK 0xf -#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_BMSK 0x8 -#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_SHFT 0x3 -#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 -#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 -#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_BMSK 0x2 -#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_SHFT 0x1 -#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_BMSK 0x1 -#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_SHFT 0x0 - -#define GSI_GSI_UC_SRC_IRQ_CLR_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000508) -#define GSI_GSI_UC_SRC_IRQ_CLR_RMSK 0xf -#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_BMSK 0x8 -#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_SHFT 0x3 -#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_BMSK 0x4 -#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_SHFT 0x2 -#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_BMSK 0x2 -#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_SHFT 0x1 -#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_BMSK 0x1 -#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_SHFT 0x0 - -#define GSI_GSI_ACC_ARGS_n_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n)) -#define GSI_GSI_ACC_ARGS_n_RMSK 0xffffffff -#define GSI_GSI_ACC_ARGS_n_MAXn 5 -#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_BMSK 0xffffffff -#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_SHFT 0x0 - -#define GSI_GSI_ACC_ROUTINE_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000524) -#define GSI_GSI_ACC_ROUTINE_RMSK 0xffffffff -#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_BMSK 0xffffffff -#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_SHFT 0x0 - -#define GSI_GSI_ACC_GO_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000528) -#define GSI_GSI_ACC_GO_RMSK 0x7f -#define GSI_GSI_ACC_GO_TIMER_GO_BMSK 0x40 -#define GSI_GSI_ACC_GO_TIMER_GO_SHFT 0x6 -#define GSI_GSI_ACC_GO_RW_ENG_GO_BMSK 0x20 -#define GSI_GSI_ACC_GO_RW_ENG_GO_SHFT 0x5 -#define GSI_GSI_ACC_GO_INT_ENG_GO_BMSK 0x10 -#define GSI_GSI_ACC_GO_INT_ENG_GO_SHFT 0x4 -#define GSI_GSI_ACC_GO_TLV_OUT_GO_BMSK 0x8 -#define GSI_GSI_ACC_GO_TLV_OUT_GO_SHFT 0x3 -#define GSI_GSI_ACC_GO_CSR_GO_BMSK 0x4 -#define GSI_GSI_ACC_GO_CSR_GO_SHFT 0x2 -#define GSI_GSI_ACC_GO_RE_ENG_GO_BMSK 0x2 -#define GSI_GSI_ACC_GO_RE_ENG_GO_SHFT 0x1 -#define GSI_GSI_ACC_GO_EV_ENG_GO_BMSK 0x1 -#define GSI_GSI_ACC_GO_EV_ENG_GO_SHFT 0x0 - -#define GSI_GSI_ACC_2_UC_MCS_STTS_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000052c) -#define GSI_GSI_ACC_2_UC_MCS_STTS_RMSK 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_BMSK 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_SHFT 0x0 - -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000530) -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_RMSK 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \ - 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \ - 0x0 - -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000534) -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_RMSK 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \ - 0xffffffff -#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \ - 0x0 - -#define GSI_GSI_IC_2_UC_MCS_VLD_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000538) -#define GSI_GSI_IC_2_UC_MCS_VLD_RMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_BMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_SHFT 0x0 - -#define GSI_GSI_IC_2_UC_MCS_PC_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000053c) -#define GSI_GSI_IC_2_UC_MCS_PC_RMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_BMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_SHFT 0x0 - -#define GSI_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n)) -#define GSI_GSI_IC_2_UC_MCS_ARGS_n_RMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_ARGS_n_MAXn 5 -#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_BMSK 0xffffffff -#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_SHFT 0x0 - -#define GSI_GSI_UC_TLV_IN_VLD_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x00000558) -#define GSI_GSI_UC_TLV_IN_VLD_RMSK 0x1 -#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_BMSK 0x1 -#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_SHFT 0x0 - -#define GSI_GSI_UC_TLV_IN_ROUTINE_OFFS \ - (GSI_GSI_REG_BASE_OFFS + 0x0000055c) -#define GSI_GSI_UC_TLV_IN_ROUTINE_RMSK 0xffffffff -#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_BMSK 0xffffffff -#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_SHFT 0x0 - -#define GSI_GSI_UC_TLV_IN_ARGS_n_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n)) -#define GSI_GSI_UC_TLV_IN_ARGS_n_RMSK 0xffffffff -#define GSI_GSI_UC_TLV_IN_ARGS_n_MAXn 5 -#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_BMSK 0xffffffff -#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_SHFT 0x0 - #define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c000 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7dfff -#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 #define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 #define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 #define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14 #define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000 #define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_MSB_SHFT 0xd #define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00 #define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8 #define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0 @@ -1049,57 +476,36 @@ #define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c004 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff -#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff #define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c008 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c00c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c010 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c014 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c018 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c01c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXk 30 -#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXn 3 #define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 @@ -1133,43 +539,42 @@ #define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf #define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0000f05c + 0x4000 * (n) + 0x80 * (k)) +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_BMSK 0xff0000 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_EMPTY_LVL_THRSHOLD_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_BMSK 0x3c00 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_PREFETCH_MODE_SHFT 0xa +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf +#define GSI_V2_5_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0 + + #define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c060 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXk 30 -#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXn 3 #define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c064 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXk 30 -#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXn 3 #define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c068 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXk 30 -#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXn 3 #define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001c06c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXk 30 -#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXn 3 #define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d000 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff -#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000 #define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18 #define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000 @@ -1185,65 +590,41 @@ #define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d004 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff -#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff #define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d008 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d00c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d010 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d014 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d018 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d01c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d020 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000 #define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18 #define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000 @@ -1253,103 +634,66 @@ #define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d024 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d028 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d02c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d030 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d034 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXk 15 -#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXn 3 #define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d048 + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXk 15 -#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXn 3 #define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0 #define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001d04c + 0x4000 * (n) + 0x80 * (k)) -#define GSI_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXk 15 -#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXn 3 #define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001e000 + 0x4000 * (n) + 0x8 * (k)) -#define GSI_EE_n_GSI_CH_k_DOORBELL_0_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXk 30 -#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXn 3 #define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 #define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001e004 + 0x4000 * (n) + 0x8 * (k)) -#define GSI_EE_n_GSI_CH_k_DOORBELL_1_RMSK 0xffffffff -#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXk 30 -#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXn 3 #define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001e100 + 0x4000 * (n) + 0x8 * (k)) -#define GSI_EE_n_EV_CH_k_DOORBELL_0_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXk 15 -#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXn 3 #define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0 #define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001e104 + 0x4000 * (n) + 0x8 * (k)) -#define GSI_EE_n_EV_CH_k_DOORBELL_1_RMSK 0xffffffff -#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXk 15 -#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXn 3 #define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff #define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0 #define GSI_EE_n_GSI_STATUS_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f000 + 0x4000 * (n)) -#define GSI_EE_n_GSI_STATUS_RMSK 0x1 -#define GSI_EE_n_GSI_STATUS_MAXn 3 #define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1 #define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0 #define GSI_EE_n_GSI_CH_CMD_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f008 + 0x4000 * (n)) -#define GSI_EE_n_GSI_CH_CMD_RMSK 0xff0000ff -#define GSI_EE_n_GSI_CH_CMD_MAXn 3 #define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 #define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 #define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff @@ -1357,8 +701,6 @@ #define GSI_EE_n_EV_CH_CMD_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f010 + 0x4000 * (n)) -#define GSI_EE_n_EV_CH_CMD_RMSK 0xff0000ff -#define GSI_EE_n_EV_CH_CMD_MAXn 3 #define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 #define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 #define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff @@ -1366,8 +708,6 @@ #define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n)) -#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff -#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3 #define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f #define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0 #define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0 @@ -1375,11 +715,8 @@ #define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00 #define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa -/* v1.0 */ #define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) -#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff -#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3 #define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 #define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a #define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 @@ -1393,11 +730,8 @@ #define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff #define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 -/* v1.2 */ #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) -#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff -#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2 #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 @@ -1411,98 +745,6 @@ #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff #define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ - 0x80000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ - 0x40000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff -#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 - -/* v1.3 */ -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff -#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 - -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n)) -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ - 0x80000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ - 0x40000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff -#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 - #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff @@ -1520,7 +762,6 @@ #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 #define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 -/* v2.0 */ #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff @@ -1548,7 +789,6 @@ #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 #define GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 -/* v2.2 */ #define GSI_V2_2_EE_n_GSI_HW_PARAM_2_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) #define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 @@ -1580,10 +820,41 @@ #define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 #define GSI_V2_2_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00012040 + 0x4000 * (n)) +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_INTER_EE_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_RD_WR_ENG_SHFT 0xE +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_BMSK 0x38000000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_IOVEC_SHFT 0x1b +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_BMSK 0x7F80000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_MAX_BURST_SHFT 0x13 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_BMSK 0x70000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_SDMA_N_INT_SHFT 0x10 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_BMSK 0x8000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_USE_SDMA_SHFT 0xf +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_N_HALF_KB_FVAL 0x2 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_KB_FVAL 0x3 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_THREE_N_HALF_KB_FVAL 0x4 +#define GSI_V2_5_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_FOUR_KB_FVAL 0x5 + #define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n)) -#define GSI_EE_n_GSI_SW_VERSION_RMSK 0xffffffff -#define GSI_EE_n_GSI_SW_VERSION_MAXn 3 #define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000 #define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c #define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000 @@ -1591,17 +862,8 @@ #define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff #define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0 -#define GSI_EE_n_GSI_MCS_CODE_VER_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f048 + 0x4000 * (n)) -#define GSI_EE_n_GSI_MCS_CODE_VER_RMSK 0xffffffff -#define GSI_EE_n_GSI_MCS_CODE_VER_MAXn 3 -#define GSI_EE_n_GSI_MCS_CODE_VER_VER_BMSK 0xffffffff -#define GSI_EE_n_GSI_MCS_CODE_VER_VER_SHFT 0x0 - #define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f080 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f -#define GSI_EE_n_CNTXT_TYPE_IRQ_MAXn 3 #define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40 #define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6 #define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20 @@ -1619,8 +881,6 @@ #define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f088 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f -#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 3 #define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40 #define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6 #define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20 @@ -1638,74 +898,57 @@ #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f090 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 3 #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f094 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 3 #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f098 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 3 -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \ - 0xffffffff +#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x1ffff +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK 0x7fffff #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f09c + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 3 -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ - 0xffffffff +#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f0a0 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 3 #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f0a4 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 3 #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f0b0 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 3 #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f0b8 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 3 -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ - 0xffffffff +#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfff +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK 0xfffff #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 +#define GSI_V2_5_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f0c0 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 3 #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 #define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f100 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf -#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 3 #define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8 #define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3 #define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4 @@ -1717,8 +960,6 @@ #define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f108 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_RMSK 0xf -#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_MAXn 3 #define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8 #define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3 #define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4 @@ -1730,8 +971,6 @@ #define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f110 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_RMSK 0xf -#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_MAXn 3 #define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8 #define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3 #define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4 @@ -1743,8 +982,6 @@ #define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f118 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf -#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 3 #define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 #define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 #define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 @@ -1756,8 +993,6 @@ #define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f120 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GSI_IRQ_EN_RMSK 0xf -#define GSI_EE_n_CNTXT_GSI_IRQ_EN_MAXn 3 #define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 #define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 #define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 @@ -1769,8 +1004,6 @@ #define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f128 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_RMSK 0xf -#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_MAXn 3 #define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8 #define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3 #define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4 @@ -1782,128 +1015,49 @@ #define GSI_EE_n_CNTXT_INTSET_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f180 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_INTSET_RMSK 0x1 -#define GSI_EE_n_CNTXT_INTSET_MAXn 3 #define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1 #define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0 -#define GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f188 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MAXn 3 -#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff -#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0 - -#define GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f18c + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MAXn 3 -#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff -#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0 - -#define GSI_EE_n_CNTXT_INT_VEC_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f190 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_INT_VEC_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_INT_VEC_MAXn 3 -#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_BMSK 0xffffffff -#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_SHFT 0x0 - #define GSI_EE_n_ERROR_LOG_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f200 + 0x4000 * (n)) -#define GSI_EE_n_ERROR_LOG_RMSK 0xffffffff -#define GSI_EE_n_ERROR_LOG_MAXn 3 #define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff #define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0 #define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f210 + 0x4000 * (n)) -#define GSI_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff -#define GSI_EE_n_ERROR_LOG_CLR_MAXn 3 #define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff #define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0 #define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f400 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SCRATCH_0_MAXn 3 #define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff #define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0 -#define GSI_EE_n_CNTXT_SCRATCH_1_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0001f404 + 0x4000 * (n)) -#define GSI_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff -#define GSI_EE_n_CNTXT_SCRATCH_1_MAXn 3 -#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff -#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0 - -#define GSI_INTER_EE_n_ORIGINATOR_EE_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n)) -#define GSI_INTER_EE_n_ORIGINATOR_EE_RMSK 0xf -#define GSI_INTER_EE_n_ORIGINATOR_EE_MAXn 3 -#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_BMSK 0xf -#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_SHFT 0x0 - -#define GSI_INTER_EE_n_GSI_CH_CMD_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n)) -#define GSI_INTER_EE_n_GSI_CH_CMD_RMSK 0xff0000ff -#define GSI_INTER_EE_n_GSI_CH_CMD_MAXn 3 -#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000 -#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18 -#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_BMSK 0xff -#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_SHFT 0x0 - -#define GSI_INTER_EE_n_EV_CH_CMD_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n)) -#define GSI_INTER_EE_n_EV_CH_CMD_RMSK 0xff0000ff -#define GSI_INTER_EE_n_EV_CH_CMD_MAXn 3 -#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000 -#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18 -#define GSI_INTER_EE_n_EV_CH_CMD_CHID_BMSK 0xff -#define GSI_INTER_EE_n_EV_CH_CMD_CHID_SHFT 0x0 - #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MAXn 3 #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MAXn 3 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0 -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3 -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \ - 0x00003fff -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 - -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \ - (GSI_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3 -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ - 0x000003ff -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 - #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_MAXn 3 #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n)) -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff -#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_MAXn 3 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_OFFS(k, n) \ + (GSI_GSI_REG_BASE_OFFS + 0x00003800 + 0x80 * (n) + 0x4 * (k)) +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5 +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f +#define GSI_V2_5_GSI_MAP_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0 #endif /* __GSI_REG_H__ */ diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index e514979115d09a5f48a7152e55f4c50715879736..524d0083e77d1317a2501fa699e57f5b78d99169 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -19,8 +19,16 @@ #include #include #include +#include #include "ipa_api.h" +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_v3/ipa_emulation_stubs.h" +#endif + #define DRV_NAME "ipa" #define IPA_API_DISPATCH_RETURN(api, p...) \ @@ -94,6 +102,12 @@ } \ } while (0) +#if defined(CONFIG_IPA_EMULATION) +static bool running_emulation = true; +#else +static bool running_emulation; +#endif + static enum ipa_hw_type ipa_api_hw_type; static struct ipa_api_controller *ipa_api_ctrl; @@ -703,6 +717,26 @@ int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) } EXPORT_SYMBOL(ipa_add_hdr); +/** + * ipa_add_hdr_usr() - add the specified headers to SW and optionally + * commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_hdr_usr, hdrs, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_hdr_usr); + /** * ipa_del_hdr() - Remove the specified headers from SW and optionally * commit them to IPA HW @@ -743,15 +777,16 @@ EXPORT_SYMBOL(ipa_commit_hdr); * ipa_reset_hdr() - reset the current header table in SW (does not commit to * HW) * + * @user_only: [in] indicate delete rules installed by userspace * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_hdr(void) +int ipa_reset_hdr(bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_hdr); + IPA_API_DISPATCH_RETURN(ipa_reset_hdr, user_only); return ret; } @@ -821,16 +856,18 @@ EXPORT_SYMBOL(ipa_copy_hdr); * ipa_add_hdr_proc_ctx() - add the specified headers to SW * and optionally commit them to IPA HW * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs); + IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs, user_only); return ret; } @@ -875,6 +912,26 @@ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) } EXPORT_SYMBOL(ipa_add_rt_rule); +/** + * ipa_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_rt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_rt_rule_usr); + /** * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally * commit to IPA HW @@ -917,16 +974,17 @@ EXPORT_SYMBOL(ipa_commit_rt); * ipa_reset_rt() - reset the current SW routing table of specified type * (does not commit to HW) * @ip: The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_rt(enum ipa_ip_type ip) +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip); + IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip, user_only); return ret; } @@ -1009,6 +1067,7 @@ EXPORT_SYMBOL(ipa_mdfy_rt_rule); /** * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW + * @rules: [inout] set of filtering rules to add * * Returns: 0 on success, negative on failure * @@ -1024,6 +1083,26 @@ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) } EXPORT_SYMBOL(ipa_add_flt_rule); +/** + * ipa_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_add_flt_rule_usr, rules, user_only); + + return ret; +} +EXPORT_SYMBOL(ipa_add_flt_rule_usr); + /** * ipa_del_flt_rule() - Remove the specified filtering rules from SW and * optionally commit to IPA HW @@ -1082,17 +1161,18 @@ EXPORT_SYMBOL(ipa_commit_flt); /** * ipa_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) - * @ip: [in] the family of routing tables + * @ip: [in] the family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa_reset_flt(enum ipa_ip_type ip) +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) { int ret; - IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip); + IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip, user_only); return ret; } @@ -2732,14 +2812,14 @@ int ipa_start_gsi_channel(u32 clnt_hdl) EXPORT_SYMBOL(ipa_start_gsi_channel); /** -* ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode -* @iface - type of vlan capable device -* @res - query result: true for vlan mode, false for non vlan mode -* -* API must be called after ipa_is_ready() returns true, otherwise it will fail -* -* Returns: 0 on success, negative on failure -*/ + * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode + * @iface - type of vlan capable device + * @res - query result: true for vlan mode, false for non vlan mode + * + * API must be called after ipa_is_ready() returns true, otherwise it will fail + * + * Returns: 0 on success, negative on failure + */ int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res) { int ret; @@ -2823,6 +2903,57 @@ static const struct of_device_id ipa_plat_drv_match[] = { {} }; +/*********************************************************/ +/* PCIe Version */ +/*********************************************************/ + +static const struct of_device_id ipa_pci_drv_match[] = { + { .compatible = "qcom,ipa", }, + {} +}; + +/* + * Forward declarations of static functions required for PCI + * registraion + * + * VENDOR and DEVICE should be defined in pci_ids.h + */ +static int ipa_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void ipa_pci_remove(struct pci_dev *pdev); +static void ipa_pci_shutdown(struct pci_dev *pdev); +static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *dev, + pci_channel_state_t state); +static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *dev); +static void ipa_pci_io_resume(struct pci_dev *dev); + +#define LOCAL_VENDOR 0x17CB +#define LOCAL_DEVICE 0x00ff + +static const char ipa_pci_driver_name[] = "qcipav3"; + +static const struct pci_device_id ipa_pci_tbl[] = { + { PCI_DEVICE(LOCAL_VENDOR, LOCAL_DEVICE) }, + { 0, 0, 0, 0, 0, 0, 0 } +}; + +MODULE_DEVICE_TABLE(pci, ipa_pci_tbl); + +/* PCI Error Recovery */ +static const struct pci_error_handlers ipa_pci_err_handler = { + .error_detected = ipa_pci_io_error_detected, + .slot_reset = ipa_pci_io_slot_reset, + .resume = ipa_pci_io_resume, +}; + +static struct pci_driver ipa_pci_driver = { + .name = ipa_pci_driver_name, + .id_table = ipa_pci_tbl, + .probe = ipa_pci_probe, + .remove = ipa_pci_remove, + .shutdown = ipa_pci_shutdown, + .err_handler = &ipa_pci_err_handler +}; + static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) { int result; @@ -3267,10 +3398,86 @@ static struct platform_driver ipa_plat_drv = { }, }; +/*********************************************************/ +/* PCIe Version */ +/*********************************************************/ + +static int ipa_pci_probe( + struct pci_dev *pci_dev, + const struct pci_device_id *ent) +{ + int result; + + if (!pci_dev || !ent) { + pr_err( + "Bad arg: pci_dev (%pK) and/or ent (%pK)\n", + pci_dev, ent); + return -EOPNOTSUPP; + } + + if (!ipa_api_ctrl) { + ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL); + if (ipa_api_ctrl == NULL) + return -ENOMEM; + /* Get IPA HW Version */ + result = of_property_read_u32(NULL, + "qcom,ipa-hw-ver", &ipa_api_hw_type); + if (result || ipa_api_hw_type == 0) { + pr_err("ipa: get resource failed for ipa-hw-ver!\n"); + kfree(ipa_api_ctrl); + ipa_api_ctrl = NULL; + return -ENODEV; + } + pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type); + } + + /* + * Call a reduced version of platform_probe appropriate for PCIe + */ + result = ipa3_pci_drv_probe(pci_dev, ipa_api_ctrl, ipa_pci_drv_match); + + if (result && result != -EPROBE_DEFER) + pr_err("ipa: ipa3_pci_drv_probe failed\n"); + + if (running_emulation) + ipa_ut_module_init(); + + return result; +} + +static void ipa_pci_remove(struct pci_dev *pci_dev) +{ + if (running_emulation) + ipa_ut_module_exit(); +} + +static void ipa_pci_shutdown(struct pci_dev *pci_dev) +{ +} + +static pci_ers_result_t ipa_pci_io_error_detected(struct pci_dev *pci_dev, + pci_channel_state_t state) +{ + return 0; +} + +static pci_ers_result_t ipa_pci_io_slot_reset(struct pci_dev *pci_dev) +{ + return 0; +} + +static void ipa_pci_io_resume(struct pci_dev *pci_dev) +{ +} + static int __init ipa_module_init(void) { pr_debug("IPA module init\n"); + if (running_emulation) { + /* Register as a PCI device driver */ + return pci_register_driver(&ipa_pci_driver); + } /* Register as a platform device driver */ return platform_driver_register(&ipa_plat_drv); } diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index f0541e62d0406a7c772700e1978eb90098fec4e9..1c84f89a823f98dfb75a86f1533957201d0df980 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -68,11 +68,13 @@ struct ipa_api_controller { int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs); + int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only); + int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls); int (*ipa_commit_hdr)(void); - int (*ipa_reset_hdr)(void); + int (*ipa_reset_hdr)(bool user_only); int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup); @@ -80,17 +82,21 @@ struct ipa_api_controller { int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy); - int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); + int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls); int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules); + int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls); int (*ipa_commit_rt)(enum ipa_ip_type ip); - int (*ipa_reset_rt)(enum ipa_ip_type ip); + int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only); int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup); @@ -102,13 +108,16 @@ struct ipa_api_controller { int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules); + int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls); int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules); int (*ipa_commit_flt)(enum ipa_ip_type ip); - int (*ipa_reset_flt)(enum ipa_ip_type ip); + int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only); int (*ipa_allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem); @@ -425,6 +434,10 @@ struct ipa_api_controller { int ipa3_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, const struct of_device_id *pdrv_match); +int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); #else static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, @@ -432,6 +445,13 @@ static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p, { return -ENODEV; } +static inline int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} #endif /* (CONFIG_IPA3) */ #endif /* _IPA_API_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c index 761c0a80dbe3cf741468eca02c5f5c1e2c6d9d49..6cbfa4bcc12f832e76f69c37aeaa2f0f463acaa9 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,11 +22,36 @@ #include "../ipa_v3/ipa_pm.h" #define IPA_MHI_DRV_NAME "ipa_mhi_client" + #define IPA_MHI_DBG(fmt, args...) \ - pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ - __func__, __LINE__, ## args) + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + #define IPA_MHI_ERR(fmt, args...) \ - pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + #define IPA_MHI_FUNC_ENTRY() \ IPA_MHI_DBG("ENTRY\n") #define IPA_MHI_FUNC_EXIT() \ diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index b937964fb459fe0aac8c9ebc70ff89c934eb35ff..be0032ccb21c83868490a58893d0c5f90cb937b5 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -1351,14 +1351,20 @@ static int ipa3_usb_request_xdci_channel( params->xfer_scratch.depcmd_low_addr; chan_params.chan_scratch.xdci.depcmd_hi_addr = params->xfer_scratch.depcmd_hi_addr; - chan_params.chan_scratch.xdci.outstanding_threshold = + + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + chan_params.chan_scratch.xdci.outstanding_threshold = ((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) * chan_params.chan_params.re_size; - - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) - chan_params.chan_scratch.xdci.outstanding_threshold = 0; - + } /* max_outstanding_tre is set in ipa3_request_gsi_channel() */ + result = ipa3_request_gsi_channel(&chan_params, out_params); if (result) { IPA_USB_ERR("failed to allocate GSI channel\n"); @@ -2937,6 +2943,7 @@ static int __init ipa3_usb_init(void) int i; unsigned long flags; int res; + struct ipa3_usb_pm_context *pm_ctx; pr_debug("entry\n"); ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL); @@ -2956,19 +2963,13 @@ static int __init ipa3_usb_init(void) ipa3_usb_ctx->dl_data_pending = false; mutex_init(&ipa3_usb_ctx->general_mutex); - if (ipa_pm_is_used()) { - struct ipa3_usb_pm_context *pm_ctx; - - pm_ctx = - &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx; - pm_ctx->hdl = ~0; - pm_ctx->remote_wakeup_work = - &ipa3_usb_notify_remote_wakeup_work; - pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx; - pm_ctx->hdl = ~0; - pm_ctx->remote_wakeup_work = - &ipa3_usb_dpl_notify_remote_wakeup_work; - } + /* init PM related members */ + pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx; + pm_ctx->hdl = ~0; + pm_ctx->remote_wakeup_work = &ipa3_usb_notify_remote_wakeup_work; + pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx; + pm_ctx->hdl = ~0; + pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work; for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) { ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false; diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 92f1ab65b70fc2692cd6e11c4f743544b39ea43b..530aa545a328ec8ea1954de40567d0d7ab648746 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -15,6 +15,7 @@ #ifndef _IPA_COMMON_I_H_ #define _IPA_COMMON_I_H_ +#include #include #include #include @@ -333,11 +334,13 @@ struct ipa_mhi_connect_params_internal { * @link: entry's link in global header offset entries list * @offset: the offset * @bin: bin + * @ipacm_installed: indicate if installed by ipacm */ struct ipa_hdr_offset_entry { struct list_head link; u32 offset; u32 bin; + bool ipacm_installed; }; extern const char *ipa_clients_strings[]; @@ -439,4 +442,7 @@ int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, struct sg_table *in_sgt_ptr); int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr); +int ipa_ut_module_init(void); +void ipa_ut_module_exit(void); + #endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile index 9901e642a6a56fc0922f92c5987146cb5de378bd..f6f08b1523935722976042da73dc97515f686685 100644 --- a/drivers/platform/msm/ipa/ipa_v3/Makefile +++ b/drivers/platform/msm/ipa/ipa_v3/Makefile @@ -6,6 +6,8 @@ ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \ ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o +ipat-$(CONFIG_IPA_EMULATION) += ipa_dt_replacement.o + obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 92120c009a2cee7da687a0b789494a844268e7e8..74334115521b294f640f6b36d3b9676d83d85e70 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -35,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -60,171 +60,14 @@ #define CREATE_TRACE_POINTS #include "ipa_trace.h" -#define IPA_GPIO_IN_QUERY_CLK_IDX 0 -#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0 -#define IPA_GPIO_OUT_CLK_VOTE_IDX 1 -#define IPA_SMP2P_SMEM_STATE_MASK 3 - - -#define IPA_SUMMING_THRESHOLD (0x10) -#define IPA_PIPE_MEM_START_OFST (0x0) -#define IPA_PIPE_MEM_SIZE (0x0) -#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ - x == IPA_MODE_MOBILE_AP_WAN || \ - x == IPA_MODE_MOBILE_AP_WLAN) -#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) -#define IPA_A5_MUX_HEADER_LENGTH (8) - -#define IPA_AGGR_MAX_STR_LENGTH (10) - -#define CLEANUP_TAG_PROCESS_TIMEOUT 500 - -#define IPA_AGGR_STR_IN_BYTES(str) \ - (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) - -#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100 - -#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 - -#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0 -#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 -#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 -#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 - -#define IPA_MHI_GSI_EVENT_RING_ID_START 10 -#define IPA_MHI_GSI_EVENT_RING_ID_END 12 - -#define IPA_SMEM_SIZE (8 * 1024) - -#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000 -#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000 -#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10 - -/* round addresses for closes page per SMMU requirements */ -#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ - do { \ - (iova_p) = rounddown((iova), PAGE_SIZE); \ - (pa_p) = rounddown((pa), PAGE_SIZE); \ - (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ - } while (0) - - -/* The relative location in /lib/firmware where the FWs will reside */ -#define IPA_FWS_PATH "ipa/ipa_fws.elf" +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif #ifdef CONFIG_COMPAT -#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ADD_HDR, \ - compat_uptr_t) -#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_HDR, \ - compat_uptr_t) -#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ADD_RT_RULE, \ - compat_uptr_t) -#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_RT_RULE, \ - compat_uptr_t) -#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ADD_FLT_RULE, \ - compat_uptr_t) -#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_FLT_RULE, \ - compat_uptr_t) -#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_GET_RT_TBL, \ - compat_uptr_t) -#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_COPY_HDR, \ - compat_uptr_t) -#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_QUERY_INTF, \ - compat_uptr_t) -#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_QUERY_INTF_TX_PROPS, \ - compat_uptr_t) -#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_QUERY_INTF_RX_PROPS, \ - compat_uptr_t) -#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ - compat_uptr_t) -#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_GET_HDR, \ - compat_uptr_t) -#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ALLOC_NAT_MEM, \ - compat_uptr_t) -#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ALLOC_NAT_TABLE, \ - compat_uptr_t) -#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ALLOC_IPV6CT_TABLE, \ - compat_uptr_t) -#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_V4_INIT_NAT, \ - compat_uptr_t) -#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_INIT_IPV6CT_TABLE, \ - compat_uptr_t) -#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_TABLE_DMA_CMD, \ - compat_uptr_t) -#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_V4_DEL_NAT, \ - compat_uptr_t) -#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_NAT_TABLE, \ - compat_uptr_t) -#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_IPV6CT_TABLE, \ - compat_uptr_t) -#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_NAT_MODIFY_PDN, \ - compat_uptr_t) -#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_GET_NAT_OFFSET, \ - compat_uptr_t) -#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_PULL_MSG, \ - compat_uptr_t) -#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_RM_ADD_DEPENDENCY, \ - compat_uptr_t) -#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_RM_DEL_DEPENDENCY, \ - compat_uptr_t) -#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_GENERATE_FLT_EQ, \ - compat_uptr_t) -#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_QUERY_RT_TBL_INDEX, \ - compat_uptr_t) -#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_WRITE_QMAPID, \ - compat_uptr_t) -#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_MDFY_FLT_RULE, \ - compat_uptr_t) -#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ - compat_uptr_t) -#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ - compat_uptr_t) -#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ - compat_uptr_t) -#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_ADD_HDR_PROC_CTX, \ - compat_uptr_t) -#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_DEL_HDR_PROC_CTX, \ - compat_uptr_t) -#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ - IPA_IOCTL_MDFY_RT_RULE, \ - compat_uptr_t) - /** * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation * properties @@ -248,8 +91,7 @@ struct ipa_ioc_nat_ipv6ct_table_alloc32 { compat_size_t size; compat_off_t offset; }; - -#endif +#endif /* #ifdef CONFIG_COMPAT */ #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 #define TZ_MEM_PROTECT_REGION_ID 0x10 @@ -289,6 +131,7 @@ static struct ipa3_plat_drv_res ipa3_res = {0, }; static struct clk *ipa3_clk; struct ipa3_context *ipa3_ctx; + static struct { bool present[IPA_SMMU_CB_MAX]; bool arm_smmu; @@ -373,6 +216,43 @@ int ipa3_active_clients_log_print_table(char *buf, int size) return cnt; } +static int ipa3_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + struct ipa_install_fltr_rule_req_ex_msg_v01 *req_ex; + int val = 0; + + if (ipa3_ctx->ipa_hw_type < IPA_HW_v3_0) { + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_send(req); + kfree(req); + } else { + req_ex = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_ex_msg_v01), + GFP_KERNEL); + if (!req_ex) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req_ex->filter_spec_ex_list_valid = false; + req_ex->filter_spec_ex_list_len = 0; + req_ex->source_pipe_index_valid = 0; + val = ipa3_qmi_filter_request_ex_send(req_ex); + kfree(req_ex); + } + + return val; +} + static int ipa3_active_clients_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr) { @@ -619,10 +499,15 @@ static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type) return; } - if (type != ADD_VLAN_IFACE && - type != DEL_VLAN_IFACE && - type != ADD_L2TP_VLAN_MAPPING && - type != DEL_L2TP_VLAN_MAPPING) { + switch (type) { + case ADD_VLAN_IFACE: + case DEL_VLAN_IFACE: + case ADD_L2TP_VLAN_MAPPING: + case DEL_L2TP_VLAN_MAPPING: + case ADD_BRIDGE_VLAN_MAPPING: + case DEL_BRIDGE_VLAN_MAPPING: + break; + default: IPAERR("Wrong type given. buff %pK type %d\n", buff, type); return; } @@ -635,10 +520,17 @@ static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) int retval; struct ipa_ioc_vlan_iface_info *vlan_info; struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info; + struct ipa_ioc_bridge_vlan_mapping_info *bridge_vlan_info; struct ipa_msg_meta msg_meta; + void *buff; + + IPADBG("type %d\n", msg_type); + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; - if (msg_type == ADD_VLAN_IFACE || - msg_type == DEL_VLAN_IFACE) { + if ((msg_type == ADD_VLAN_IFACE) || + (msg_type == DEL_VLAN_IFACE)) { vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info), GFP_KERNEL); if (!vlan_info) @@ -650,18 +542,10 @@ static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) return -EFAULT; } - memset(&msg_meta, 0, sizeof(msg_meta)); - msg_meta.msg_type = msg_type; msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info); - retval = ipa3_send_msg(&msg_meta, vlan_info, - ipa3_vlan_l2tp_msg_free_cb); - if (retval) { - IPAERR("ipa3_send_msg failed: %d\n", retval); - kfree(vlan_info); - return retval; - } - } else if (msg_type == ADD_L2TP_VLAN_MAPPING || - msg_type == DEL_L2TP_VLAN_MAPPING) { + buff = vlan_info; + } else if ((msg_type == ADD_L2TP_VLAN_MAPPING) || + (msg_type == DEL_L2TP_VLAN_MAPPING)) { mapping_info = kzalloc(sizeof(struct ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL); if (!mapping_info) @@ -674,22 +558,44 @@ static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) return -EFAULT; } - memset(&msg_meta, 0, sizeof(msg_meta)); - msg_meta.msg_type = msg_type; msg_meta.msg_len = sizeof(struct ipa_ioc_l2tp_vlan_mapping_info); - retval = ipa3_send_msg(&msg_meta, mapping_info, - ipa3_vlan_l2tp_msg_free_cb); - if (retval) { - IPAERR("ipa3_send_msg failed: %d\n", retval); - kfree(mapping_info); - return retval; + buff = mapping_info; + } else if ((msg_type == ADD_BRIDGE_VLAN_MAPPING) || + (msg_type == DEL_BRIDGE_VLAN_MAPPING)) { + bridge_vlan_info = kzalloc( + sizeof(struct ipa_ioc_bridge_vlan_mapping_info), + GFP_KERNEL); + if (!bridge_vlan_info) + return -ENOMEM; + + if (copy_from_user((u8 *)bridge_vlan_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_bridge_vlan_mapping_info))) { + kfree(bridge_vlan_info); + IPAERR("copy from user failed\n"); + return -EFAULT; } + + msg_meta.msg_len = sizeof(struct + ipa_ioc_bridge_vlan_mapping_info); + buff = bridge_vlan_info; } else { IPAERR("Unexpected event\n"); return -EFAULT; } + retval = ipa3_send_msg(&msg_meta, buff, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d, msg_type %d\n", + retval, + msg_type); + kfree(buff); + return retval; + } + IPADBG("exit\n"); + return 0; } @@ -919,7 +825,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) { + if (ipa3_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { retval = -EFAULT; break; } @@ -999,7 +906,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) { + if (ipa3_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -1203,7 +1111,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + if (ipa3_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { retval = -EFAULT; break; } @@ -1340,19 +1249,19 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa3_commit_hdr(); break; case IPA_IOC_RESET_HDR: - retval = ipa3_reset_hdr(); + retval = ipa3_reset_hdr(false); break; case IPA_IOC_COMMIT_RT: retval = ipa3_commit_rt(arg); break; case IPA_IOC_RESET_RT: - retval = ipa3_reset_rt(arg); + retval = ipa3_reset_rt(arg, false); break; case IPA_IOC_COMMIT_FLT: retval = ipa3_commit_flt(arg); break; case IPA_IOC_RESET_FLT: - retval = ipa3_reset_flt(arg); + retval = ipa3_reset_flt(arg, false); break; case IPA_IOC_GET_RT_TBL: if (copy_from_user(header, (const void __user *)arg, @@ -1740,7 +1649,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } if (ipa3_add_hdr_proc_ctx( - (struct ipa_ioc_add_hdr_proc_ctx *)param)) { + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { retval = -EFAULT; break; } @@ -1839,7 +1748,18 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } break; - + case IPA_IOC_ADD_BRIDGE_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_BRIDGE_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_BRIDGE_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_BRIDGE_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; case IPA_IOC_ADD_L2TP_VLAN_MAPPING: if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) { retval = -EFAULT; @@ -1854,6 +1774,21 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa3_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa3_nat_del_cmd(&nat_del); + retval = ipa3_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa3_resend_wlan_msg(); + break; + default: IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; @@ -2549,9 +2484,12 @@ static int ipa3_q6_set_ex_path_to_apps(void) /* disable statuses for all modem controlled prod pipes */ if (IPA_CLIENT_IS_Q6_PROD(client_idx) || (ipa3_ctx->ep[ep_idx].valid && - ipa3_ctx->ep[ep_idx].skip_ep_cfg)) { + ipa3_ctx->ep[ep_idx].skip_ep_cfg) || + (ipa3_ctx->ep[ep_idx].client == IPA_CLIENT_APPS_WAN_PROD + && ipa3_ctx->modem_cfg_emb_pipe_flt)) { ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes); + ipa3_ctx->ep[ep_idx].status.status_en = false; reg_write.skip_pipeline_clear = false; reg_write.pipeline_clear_options = IPAHAL_HPS_CLEAR; @@ -2710,6 +2648,16 @@ int _ipa_init_sram_v3(void) u32 *ipa_sram_mmio; unsigned long phys_addr; + IPADBG( + "ipa_wrapper_base(0x%08X) ipa_reg_base_ofst(0x%08X) IPA_SRAM_DIRECT_ACCESS_n(0x%08X) smem_restricted_bytes(0x%08X) smem_sz(0x%08X)\n", + ipa3_ctx->ipa_wrapper_base, + ipa3_ctx->ctrl->ipa_reg_base_ofst, + ipahal_get_reg_n_ofst( + IPA_SRAM_DIRECT_ACCESS_n, + ipa3_ctx->smem_restricted_bytes / 4), + ipa3_ctx->smem_restricted_bytes, + ipa3_ctx->smem_sz); + phys_addr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst + ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, @@ -3226,21 +3174,27 @@ static int ipa3_setup_apps_pipes(void) } IPADBG("Apps to IPA cmd pipe is connected\n"); + IPADBG("Will initialize SRAM\n"); ipa3_ctx->ctrl->ipa_init_sram(); IPADBG("SRAM initialized\n"); + IPADBG("Will initialize HDR\n"); ipa3_ctx->ctrl->ipa_init_hdr(); IPADBG("HDR initialized\n"); + IPADBG("Will initialize V4 RT\n"); ipa3_ctx->ctrl->ipa_init_rt4(); IPADBG("V4 RT initialized\n"); + IPADBG("Will initialize V6 RT\n"); ipa3_ctx->ctrl->ipa_init_rt6(); IPADBG("V6 RT initialized\n"); + IPADBG("Will initialize V4 FLT\n"); ipa3_ctx->ctrl->ipa_init_flt4(); IPADBG("V4 FLT initialized\n"); + IPADBG("Will initialize V6 FLT\n"); ipa3_ctx->ctrl->ipa_init_flt6(); IPADBG("V6 FLT initialized\n"); @@ -3537,6 +3491,12 @@ static const struct file_operations ipa3_drv_fops = { static int ipa3_get_clks(struct device *dev) { + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + IPADBG("No clock manipulation when running emulation\n"); + ipa3_clk = NULL; + return 0; + } + if (ipa3_res.use_bw_vote) { IPADBG("Vote IPA clock by bw voting via bus scaling driver\n"); ipa3_clk = NULL; @@ -4353,7 +4313,7 @@ static void ipa3_freeze_clock_vote_and_notify_modem(void) qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, BIT(IPA_SMP2P_SMEM_STATE_MASK), - BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1<<1))); + BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1 << 1))); ipa3_ctx->smp2p_info.res_sent = true; IPADBG("IPA clocks are %s\n", @@ -4368,10 +4328,9 @@ void ipa3_reset_freeze_vote(void) if (ipa3_ctx->smp2p_info.ipa_clk_on) IPA_ACTIVE_CLIENTS_DEC_SPECIAL("FREEZE_VOTE"); - gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + - IPA_GPIO_OUT_CLK_VOTE_IDX, 0); - gpio_set_value(ipa3_ctx->smp2p_info.out_base_id + - IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 0); + qcom_smem_state_update_bits(ipa3_ctx->smp2p_info.smem_state, + BIT(IPA_SMP2P_SMEM_STATE_MASK), + BIT(ipa3_ctx->smp2p_info.ipa_clk_on | (1 << 1))); ipa3_ctx->smp2p_info.res_sent = false; ipa3_ctx->smp2p_info.ipa_clk_on = false; @@ -4390,7 +4349,7 @@ static int ipa3_panic_notifier(struct notifier_block *this, IPAERR("uC panic handler failed %d\n", res); if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0) - ipahal_print_all_regs(); + ipahal_print_all_regs(false); return NOTIFY_DONE; } @@ -4421,21 +4380,6 @@ static void ipa3_trigger_ipa_ready_cbs(void) mutex_unlock(&ipa3_ctx->lock); } -static int ipa3_gsi_pre_fw_load_init(void) -{ - int result; - - result = gsi_configure_regs(ipa3_res.transport_mem_base, - ipa3_res.transport_mem_size, - ipa3_res.ipa_mem_base); - if (result) { - IPAERR("Failed to configure GSI registers\n"); - return -EINVAL; - } - - return 0; -} - static void ipa3_uc_is_loaded(void) { IPADBG("\n"); @@ -4478,6 +4422,22 @@ static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) return gsi_ver; } +static int ipa3_gsi_pre_fw_load_init(void) +{ + int result; + + result = gsi_configure_regs(ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_res.ipa_mem_base, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + if (result) { + IPAERR("Failed to configure GSI registers\n"); + return -EINVAL; + } + + return 0; +} + /** * ipa3_post_init() - Initialize the IPA Driver (Part II). * This part contains all initialization which requires interaction with @@ -4526,12 +4486,20 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, /* move proxy vote for modem on ipa3_post_init */ if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0) ipa3_proxy_clk_vote(); - /* SMMU was already attached if used, safe to do allocations */ - if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio, - ipa3_ctx->pdev)) { - IPAERR("fail to init ipahal\n"); - result = -EFAULT; - goto fail_ipahal; + + /* + * In Virtual and Emulation mode, IPAHAL initialized at + * pre_init as there is no SMMU. In normal mode need to wait + * until SMMU is attached and thus initialization done here. + */ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL && + ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio, + ipa3_ctx->pdev)) { + IPAERR("fail to init ipahal\n"); + result = -EFAULT; + goto fail_ipahal; + } } result = ipa3_init_hw(); @@ -4666,9 +4634,18 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type); gsi_props.ee = resource_p->ee; gsi_props.intr = GSI_INTR_IRQ; - gsi_props.irq = resource_p->transport_irq; gsi_props.phys_addr = resource_p->transport_mem_base; gsi_props.size = resource_p->transport_mem_size; + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + gsi_props.irq = resource_p->emulator_irq; + gsi_props.emulator_intcntrlr_client_isr = ipa3_get_isr(); + gsi_props.emulator_intcntrlr_addr = + resource_p->emulator_intcntrlr_mem_base; + gsi_props.emulator_intcntrlr_size = + resource_p->emulator_intcntrlr_mem_size; + } else { + gsi_props.irq = resource_p->transport_irq; + } gsi_props.notify_cb = ipa_gsi_notify_cb; gsi_props.req_clk_cb = NULL; gsi_props.rel_clk_cb = NULL; @@ -4736,12 +4713,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, ipa3_register_panic_hdlr(); + ipa3_debugfs_init(); + mutex_lock(&ipa3_ctx->lock); ipa3_ctx->ipa_initialization_complete = true; mutex_unlock(&ipa3_ctx->lock); - ipa3_debugfs_init(); - ipa3_trigger_ipa_ready_cbs(); complete_all(&ipa3_ctx->init_completion_obj); pr_info("IPA driver initialization was successful.\n"); @@ -4754,15 +4731,17 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false); fail_register_device: ipa3_destroy_flt_tbl_idrs(); - ipa3_proxy_clk_unvote(); fail_allok_pkt_init: ipa3_nat_ipv6ct_destroy_devices(); fail_nat_ipv6ct_init_dev: ipa3_free_dma_task_for_gsi(); fail_dma_task: fail_init_hw: - ipahal_destroy(); + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL && + ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) + ipahal_destroy(); fail_ipahal: + ipa3_proxy_clk_unvote(); return result; } @@ -4771,10 +4750,24 @@ static int ipa3_manual_load_ipa_fws(void) { int result; const struct firmware *fw; + const char *path = IPA_FWS_PATH; - IPADBG("Manual FW loading process initiated\n"); + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + switch (ipa3_get_emulation_type()) { + case IPA_HW_v3_5_1: + path = IPA_FWS_PATH_3_5_1; + break; + case IPA_HW_v4_0: + path = IPA_FWS_PATH_4_0; + break; + default: + break; + } + } - result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->cdev.dev); + IPADBG("Manual FW loading (%s) process initiated\n", path); + + result = request_firmware(&fw, path, ipa3_ctx->cdev.dev); if (result < 0) { IPAERR("request_firmware failed, error %d\n", result); return result; @@ -4782,7 +4775,16 @@ static int ipa3_manual_load_ipa_fws(void) IPADBG("FWs are available for loading\n"); - result = ipa3_load_fws(fw, ipa3_res.transport_mem_base); + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + result = emulator_load_fws(fw, + ipa3_res.transport_mem_base, + ipa3_res.transport_mem_size, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + } else { + result = ipa3_load_fws(fw, ipa3_res.transport_mem_base, + ipa3_get_gsi_ver(ipa3_res.ipa_hw_type)); + } + if (result) { IPAERR("Manual IPA FWs loading has failed\n"); release_firmware(fw); @@ -4801,6 +4803,7 @@ static int ipa3_manual_load_ipa_fws(void) release_firmware(fw); IPADBG("Manual FW loading process is complete\n"); + return 0; } @@ -4834,7 +4837,8 @@ static void ipa3_load_ipa_fw(struct work_struct *work) IPA_ACTIVE_CLIENTS_INC_SIMPLE(); - if (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)) + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION && + (ipa3_is_msm_device() || (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5))) result = ipa3_pil_load_ipa_fws(); else result = ipa3_manual_load_ipa_fws(); @@ -5106,6 +5110,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa; ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa; ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm; + ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie; ipa3_ctx->ipa3_active_clients_logging.log_rdy = false; ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config; ipa3_ctx->mhi_evid_limits[0] = resource_p->mhi_evid_limits[0]; @@ -5164,7 +5169,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, goto fail_init_mem_partition; } - if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) { + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL && + ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { ipa3_ctx->ctrl->msm_bus_data_ptr = msm_bus_cl_get_pdata(ipa3_ctx->master_pdev); if (ipa3_ctx->ctrl->msm_bus_data_ptr == NULL) { @@ -5215,6 +5221,30 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, goto fail_remap; } + IPADBG( + "base(0x%x)+offset(0x%x)=(0x%x) mapped to (%pK) with len (0x%x)\n", + resource_p->ipa_mem_base, + ipa3_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_base + ipa3_ctx->ctrl->ipa_reg_base_ofst, + ipa3_ctx->mmio, + resource_p->ipa_mem_size); + + /* + * In Virtual and Emulation mode, IPAHAL used to load the + * firmwares and there is no SMMU so IPAHAL is initialized + * here + */ + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + if (ipahal_init(ipa3_ctx->ipa_hw_type, + ipa3_ctx->mmio, + &(ipa3_ctx->master_pdev->dev))) { + IPAERR("fail to init ipahal\n"); + result = -EFAULT; + goto fail_ipahal_init; + } + } + mutex_init(&ipa3_ctx->ipa3_active_clients.mutex); IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); @@ -5344,6 +5374,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, init_waitqueue_head(&ipa3_ctx->msg_waitq); mutex_init(&ipa3_ctx->msg_lock); + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa3_ctx->msg_wlan_client_list); + mutex_init(&ipa3_ctx->msg_wlan_client_lock); + mutex_init(&ipa3_ctx->lock); mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex); mutex_init(&ipa3_ctx->ipa_cne_evt_lock); @@ -5420,10 +5454,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, * We can't register the GSI driver yet, as it expects * the GSI FW to be up and running before the registration. * - * For IPA3.0, the GSI configuration is done by the GSI driver. + * For IPA3.0 and the emulation system, the GSI configuration + * is done by the GSI driver. + * * For IPA3.1 (and on), the GSI configuration is done by TZ. */ - if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) { + if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0 || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { result = ipa3_gsi_pre_fw_load_init(); if (result) { IPAERR("gsi pre FW loading config failed\n"); @@ -5501,6 +5538,10 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, fail_create_transport_wq: destroy_workqueue(ipa3_ctx->power_mgmt_wq); fail_init_hw: + if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL || + ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) + ipahal_destroy(); +fail_ipahal_init: iounmap(ipa3_ctx->mmio); fail_remap: ipa3_disable_clks(); @@ -5826,6 +5867,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, &ipa_drv_res->ee); if (result) ipa_drv_res->ee = 0; + IPADBG(":ee = %u\n", ipa_drv_res->ee); ipa_drv_res->apply_rg10_wa = of_property_read_bool(pdev->dev.of_node, @@ -5838,7 +5880,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, of_property_read_bool(pdev->dev.of_node, "qcom,do-not-use-ch-gsi-20"); IPADBG(": GSI CH 20 WA is = %s\n", - ipa_drv_res->apply_rg10_wa + ipa_drv_res->gsi_ch20_wa ? "Needed" : "Not needed"); elem_num = of_property_count_elems_of_size(pdev->dev.of_node, @@ -5917,6 +5959,32 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, return result; } + ipa_drv_res->wdi_over_pcie = + of_property_read_bool(pdev->dev.of_node, + "qcom,wlan-ce-db-over-pcie"); + IPADBG("Is wdi_over_pcie ? (%s)\n", + ipa3_ctx->wdi_over_pcie ? "Yes":"No"); + + /* + * If we're on emulator, get its interrupt controller's mem + * start and size + */ + if (ipa_drv_res->ipa3_hw_mode == IPA_HW_MODE_EMULATION) { + resource = platform_get_resource_byname( + pdev, IORESOURCE_MEM, "intctrl-base"); + if (!resource) { + IPAERR(":Can't find intctrl-base resource\n"); + return -ENODEV; + } + ipa_drv_res->emulator_intcntrlr_mem_base = + resource->start; + ipa_drv_res->emulator_intcntrlr_mem_size = + resource_size(resource); + IPADBG(":using intctrl-base at 0x%x of size 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_base, + ipa_drv_res->emulator_intcntrlr_mem_size); + } + return 0; } @@ -6229,15 +6297,17 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) } IPADBG("AP/USB SMMU atomic set\n"); - if (iommu_domain_set_attr(cb->mapping->domain, + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->mapping->domain, DOMAIN_ATTR_FAST, &fast)) { - IPAERR("couldn't set fast map\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; + IPAERR("couldn't set fast map\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); } - IPADBG("SMMU fast map set\n"); } pr_info("IPA smmu_info.s1_bypass_arr[AP]=%d smmu_info.fast_map=%d\n", @@ -6376,7 +6446,7 @@ static int ipa3_smp2p_probe(struct device *dev) qcom_smem_state_get(dev, "ipa-smp2p-out", &ipa3_ctx->smp2p_info.smem_bit); if (IS_ERR(ipa3_ctx->smp2p_info.smem_state)) { - IPAERR("fail to get smp2p clk resp bit %d\n", + IPAERR("fail to get smp2p clk resp bit %ld\n", PTR_ERR(ipa3_ctx->smp2p_info.smem_state)); return PTR_ERR(ipa3_ctx->smp2p_info.smem_state); } @@ -6707,5 +6777,216 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in, return 0; } +/************************************************************** + * PCIe Version + *************************************************************/ + +int ipa3_pci_drv_probe( + struct pci_dev *pci_dev, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct ipa3_plat_drv_res *ipa_drv_res; + u32 bar0_offset; + u32 mem_start; + u32 mem_end; + uint32_t bits; + uint32_t ipa_start, gsi_start, intctrl_start; + struct device *dev; + static struct platform_device platform_dev; + + if (!pci_dev || !api_ctrl || !pdrv_match) { + IPAERR( + "Bad arg: pci_dev (%pK) and/or api_ctrl (%pK) and/or pdrv_match (%pK)\n", + pci_dev, api_ctrl, pdrv_match); + return -EOPNOTSUPP; + } + + dev = &(pci_dev->dev); + + IPADBG("IPA PCI driver probing started\n"); + + /* + * Follow PCI driver flow here. + * pci_enable_device: Enables device and assigns resources + * pci_request_region: Makes BAR0 address region usable + */ + result = pci_enable_device(pci_dev); + if (result < 0) { + IPAERR("pci_enable_device() failed\n"); + return -EOPNOTSUPP; + } + + result = pci_request_region(pci_dev, 0, "IPA Memory"); + if (result < 0) { + IPAERR("pci_request_region() failed\n"); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + /* + * When in the PCI/emulation environment, &platform_dev is + * passed to get_ipa_dts_configuration(), but is unused, since + * all usages of it in the function are replaced by CPP + * relative to definitions in ipa_emulation_stubs.h. Passing + * &platform_dev makes code validity tools happy. + */ + if (get_ipa_dts_configuration(&platform_dev, &ipa3_res) != 0) { + IPAERR("get_ipa_dts_configuration() failed\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + ipa_drv_res = &ipa3_res; + + result = + of_property_read_u32(NULL, "emulator-bar0-offset", + &bar0_offset); + if (result) { + IPAERR(":get resource failed for emulator-bar0-offset!\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -ENODEV; + } + IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset); + + ipa_start = ipa_drv_res->ipa_mem_base; + gsi_start = ipa_drv_res->transport_mem_base; + intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base; + + /* + * Where will we be inerrupted at? + */ + ipa_drv_res->emulator_irq = pci_dev->irq; + IPADBG( + "EMULATION PCI_INTERRUPT_PIN(%u)\n", + ipa_drv_res->emulator_irq); + + /* + * Set the ipa_mem_base to the PCI base address of BAR0 + */ + mem_start = pci_resource_start(pci_dev, 0); + mem_end = pci_resource_end(pci_dev, 0); + + IPADBG("PCI START = 0x%x\n", mem_start); + IPADBG("PCI END = 0x%x\n", mem_end); + + ipa_drv_res->ipa_mem_base = mem_start + bar0_offset; + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + ipa_drv_res->transport_mem_base = + ipa_drv_res->ipa_mem_base + (gsi_start - ipa_start); + + ipa_drv_res->emulator_intcntrlr_mem_base = + ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start); + + IPADBG("ipa_mem_base = 0x%x\n", + ipa_drv_res->ipa_mem_base); + IPADBG("ipa_mem_size = 0x%x\n", + ipa_drv_res->ipa_mem_size); + + IPADBG("transport_mem_base = 0x%x\n", + ipa_drv_res->transport_mem_base); + IPADBG("transport_mem_size = 0x%x\n", + ipa_drv_res->transport_mem_size); + + IPADBG("emulator_intcntrlr_mem_base = 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_base); + IPADBG("emulator_intcntrlr_mem_size = 0x%x\n", + ipa_drv_res->emulator_intcntrlr_mem_size); + + result = ipa3_bind_api_controller(ipa_drv_res->ipa_hw_type, api_ctrl); + if (result != 0) { + IPAERR("ipa3_bind_api_controller() failed\n"); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return result; + } + + bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32; + + if (dma_set_mask(dev, DMA_BIT_MASK(bits)) != 0) { + IPAERR("dma_set_mask(%pK, %u) failed\n", dev, bits); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(bits)) != 0) { + IPAERR("dma_set_coherent_mask(%pK, %u) failed\n", dev, bits); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return -EOPNOTSUPP; + } + + pci_set_master(pci_dev); + + memset(&platform_dev, 0, sizeof(platform_dev)); + platform_dev.dev = *dev; + + /* Proceed to real initialization */ + result = ipa3_pre_init(&ipa3_res, &platform_dev); + if (result) { + IPAERR("ipa3_init failed\n"); + pci_clear_master(pci_dev); + pci_release_region(pci_dev, 0); + pci_disable_device(pci_dev); + return result; + } + + return result; +} + +/* + * The following returns transport register memory location and + * size... + */ +int ipa3_get_transport_info( + phys_addr_t *phys_addr_ptr, + unsigned long *size_ptr) +{ + if (!phys_addr_ptr || !size_ptr) { + IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n", + phys_addr_ptr, size_ptr); + return -EINVAL; + } + + *phys_addr_ptr = ipa3_res.transport_mem_base; + *size_ptr = ipa3_res.transport_mem_size; + + return 0; +} +EXPORT_SYMBOL(ipa3_get_transport_info); + +static uint emulation_type = IPA_HW_v4_0; + +/* + * The following returns emulation type... + */ +uint ipa3_get_emulation_type(void) +{ + return emulation_type; +} + MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IPA HW device driver"); + +/* + * Module parameter. Invoke as follows: + * insmod ipat.ko emulation_type=[13|14|...|N] + * Examples: + * insmod ipat.ko emulation_type=13 # for IPA 3.5.1 + * insmod ipat.ko emulation_type=14 # for IPA 4.0 + * + * NOTE: The emulation_type values need to come from: enum ipa_hw_type + * + */ + +module_param(emulation_type, uint, 0000); +MODULE_PARM_DESC( + emulation_type, + "IPA emulation type (Use 13 for IPA 3.5.1, 14 for IPA 4.0)"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 637d0f8b4c48ad070206f73196141da87ebe3285..309378712be58593a1f0f9158c78eb0ae7c1e959 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -823,11 +823,17 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, memcpy(&ep->chan_scratch, ¶ms->chan_scratch, sizeof(union __packed gsi_channel_scratch)); - ep->chan_scratch.xdci.max_outstanding_tre = - params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv; - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) - ep->chan_scratch.xdci.max_outstanding_tre = 0; + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ep->chan_scratch.xdci.max_outstanding_tre = + params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv; + } gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl, params->chan_scratch); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index cf0fa50ebb926c633c0d15815895482a67ec518f..3454073fec4fe692308bb1f888b24b37aa34ba8e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -73,6 +73,8 @@ const char *ipa3_event_name[] = { __stringify(DEL_L2TP_VLAN_MAPPING), __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), + __stringify(ADD_BRIDGE_VLAN_MAPPING), + __stringify(DEL_BRIDGE_VLAN_MAPPING), }; const char *ipa3_hdr_l2_type_name[] = { @@ -1945,7 +1947,7 @@ static ssize_t ipa3_read_ipahal_regs(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { IPA_ACTIVE_CLIENTS_INC_SIMPLE(); - ipahal_print_all_regs(); + ipahal_print_all_regs(true); IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 6649017f0848de4d17c06ce5ff88c0563613dc27..f27efa4379b0473e9db8fbdc35d33a8c52b2a5b1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -79,6 +79,8 @@ #define IPA_SEND_MAX_DESC (20) +#define IPA_EOT_THRESH 32 + static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags); static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys); static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys); @@ -236,18 +238,19 @@ static void ipa3_send_nop_desc(struct work_struct *work) } list_add_tail(&tx_pkt->link, &sys->head_desc_list); sys->nop_pending = false; - spin_unlock_bh(&sys->spinlock); memset(&nop_xfer, 0, sizeof(nop_xfer)); nop_xfer.type = GSI_XFER_ELEM_NOP; nop_xfer.flags = GSI_XFER_FLAG_EOT; nop_xfer.xfer_user_data = tx_pkt; if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) { + spin_unlock_bh(&sys->spinlock); IPAERR("gsi_queue_xfer for ch:%lu failed\n", sys->ep->gsi_chan_hdl); queue_work(sys->wq, &sys->work); return; } + spin_unlock_bh(&sys->spinlock); /* make sure TAG process is sent before clocks are gated */ ipa3_ctx->tag_process_before_gating = true; @@ -406,11 +409,14 @@ int ipa3_send(struct ipa3_sys_context *sys, } if (i == (num_desc - 1)) { - if (!sys->use_comm_evt_ring) { + if (!sys->use_comm_evt_ring || + (sys->pkt_sent % IPA_EOT_THRESH == 0)) { gsi_xfer[i].flags |= GSI_XFER_FLAG_EOT; gsi_xfer[i].flags |= GSI_XFER_FLAG_BEI; + } else { + send_nop = true; } gsi_xfer[i].xfer_user_data = tx_pkt_first; @@ -429,11 +435,12 @@ int ipa3_send(struct ipa3_sys_context *sys, goto failure; } - - if (sys->use_comm_evt_ring && !sys->nop_pending) { + if (send_nop && !sys->nop_pending) sys->nop_pending = true; - send_nop = true; - } + else + send_nop = false; + + sys->pkt_sent++; spin_unlock_bh(&sys->spinlock); /* set the timer for sending the NOP descriptor */ @@ -3792,15 +3799,20 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in, goto fail_alloc_channel; memset(&ch_scratch, 0, sizeof(ch_scratch)); - ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv * - GSI_CHAN_RE_SIZE_16B; - ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B; - - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { - ch_scratch.gpi.max_outstanding_tre = 0; - ch_scratch.gpi.outstanding_threshold = 0; - } - + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ch_scratch.gpi.max_outstanding_tre = + gsi_ep_info->ipa_if_tlv * GSI_CHAN_RE_SIZE_16B; + ch_scratch.gpi.outstanding_threshold = + 2 * GSI_CHAN_RE_SIZE_16B; + } + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + ch_scratch.gpi.dl_nlo_channel = 0; result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch); if (result != GSI_STATUS_SUCCESS) { IPAERR("failed to write scratch %d\n", result); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c new file mode 100644 index 0000000000000000000000000000000000000000..5fe2294819bf65155e40b0ccbe8c4e10294a1449 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dt_replacement.c @@ -0,0 +1,765 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_emulation_stubs.h" + +# undef strsame +# define strsame(x, y) \ + (!strcmp((x), (y))) + +/* + * The following enum values used to index tables below. + */ +enum dtsi_index_e { + DTSI_INDEX_3_5_1 = 0, + DTSI_INDEX_4_0 = 1, +}; + +struct dtsi_replacement_u32 { + char *key; + u32 value; +}; + +struct dtsi_replacement_u32_table { + struct dtsi_replacement_u32 *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_bool { + char *key; + bool value; +}; + +struct dtsi_replacement_bool_table { + struct dtsi_replacement_bool *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_u32_array { + char *key; + u32 *p_value; + u32 num_elements; +}; + +struct dtsi_replacement_u32_array_table { + struct dtsi_replacement_u32_array *p_table; + u32 num_entries; +}; + +struct dtsi_replacement_resource_table { + struct resource *p_table; + u32 num_entries; +}; + +/* + * Any of the data below with _4_0 in the name represent data taken + * from the 4.0 dtsi file. + * + * Any of the data below with _3_5_1 in the name represent data taken + * from the 3.5.1 dtsi file. + */ +static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = { + {"qcom,use-ipa-tethering-bridge", true}, + {"qcom,modem-cfg-emb-pipe-flt", true}, + {"qcom,ipa-wdi2", true}, + {"qcom,use-64-bit-dma-mask", false}, + {"qcom,bandwidth-vote-for-ipa", false}, + {"qcom,skip-uc-pipe-reset", false}, + {"qcom,tethered-flow-control", true}, + {"qcom,use-rg10-limitation-mitigation", false}, + {"qcom,do-not-use-ch-gsi-20", false}, + {"qcom,use-ipa-pm", false}, +}; + +static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = { + {"qcom,use-ipa-tethering-bridge", true}, + {"qcom,modem-cfg-emb-pipe-flt", true}, + {"qcom,ipa-wdi2", true}, + {"qcom,use-64-bit-dma-mask", false}, + {"qcom,bandwidth-vote-for-ipa", true}, + {"qcom,skip-uc-pipe-reset", false}, + {"qcom,tethered-flow-control", false}, + {"qcom,use-rg10-limitation-mitigation", false}, + {"qcom,do-not-use-ch-gsi-20", false}, + {"qcom,use-ipa-pm", false}, +}; + +static struct dtsi_replacement_bool_table +ipa3_plat_drv_bool_table[] = { + { ipa3_plat_drv_bool_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) }, + { ipa3_plat_drv_bool_4_0, + ARRAY_SIZE(ipa3_plat_drv_bool_4_0) }, +}; + +static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = { + {"qcom,ipa-hw-ver", IPA_HW_v4_0}, + {"qcom,ipa-hw-mode", 3}, + {"qcom,wan-rx-ring-size", 192}, + {"qcom,lan-rx-ring-size", 192}, + {"qcom,ee", 0}, + {"emulator-bar0-offset", 0x01C00000}, +}; + +static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = { + {"qcom,ipa-hw-ver", IPA_HW_v3_5_1}, + {"qcom,ipa-hw-mode", 3}, + {"qcom,wan-rx-ring-size", 192}, + {"qcom,lan-rx-ring-size", 192}, + {"qcom,ee", 0}, + {"emulator-bar0-offset", 0x01C00000}, +}; + +static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = { + { ipa3_plat_drv_u32_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) }, + { ipa3_plat_drv_u32_4_0, + ARRAY_SIZE(ipa3_plat_drv_u32_4_0) }, +}; + +static u32 mhi_event_ring_id_limits_array_4_0[] = { + 9, 10 +}; + +static u32 mhi_event_ring_id_limits_array_3_5_1[] = { + IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END +}; + +static u32 ipa_tz_unlock_reg_array_4_0[] = { + 0x04043583c, 0x00001000 +}; + +static u32 ipa_tz_unlock_reg_array_3_5_1[] = { + 0x04043583c, 0x00001000 +}; + +static u32 ipa_ram_mmap_array_4_0[] = { + 0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078, + 0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388, + 0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000, + 0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E, + 0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078, + 0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008, + 0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608, + 0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8, + 0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0, + 0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x000013F0, + 0x0000100C, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000, + 0x000023FC, 0x00000000, 0x000023FC, 0x00000000, 0x00000080, + 0x00000200, 0x00002800, 0x000023FC, 0x00000000, 0x000023FC, + 0x00000000, 0x000023FC, 0x00000000, 0x000023FC, 0x00000000, + 0x00002400, 0x00000400, 0x00000BD8, 0x00000050, 0x00000C30, + 0x00000060, 0x00000C90, 0x00000140, 0x00000DD0, 0x00000180, + 0x00000F50, 0x00000180, 0x000010D0, 0x00000180, 0x00001250, + 0x00000180, 0x000013D0, 0x00000020 +}; + +static u32 ipa_ram_mmap_array_3_5_1[] = { + 0x00000280, 0x00000000, 0x00000000, 0x00000288, 0x00000078, + 0x00004000, 0x00000308, 0x00000078, 0x00004000, 0x00000388, + 0x00000078, 0x00004000, 0x00000408, 0x00000078, 0x00004000, + 0x0000000F, 0x00000000, 0x00000007, 0x00000008, 0x0000000E, + 0x00000488, 0x00000078, 0x00004000, 0x00000508, 0x00000078, + 0x00004000, 0x0000000F, 0x00000000, 0x00000007, 0x00000008, + 0x0000000E, 0x00000588, 0x00000078, 0x00004000, 0x00000608, + 0x00000078, 0x00004000, 0x00000688, 0x00000140, 0x000007C8, + 0x00000000, 0x00000800, 0x000007D0, 0x00000200, 0x000009D0, + 0x00000200, 0x00000000, 0x00000000, 0x00000000, 0x00000BD8, + 0x00001024, 0x00002000, 0x00000000, 0x00002000, 0x00000000, + 0x00002000, 0x00000000, 0x00002000, 0x00000000, 0x00000080, + 0x00000200, 0x00002000, 0x00002000, 0x00000000, 0x00002000, + 0x00000000, 0x00002000, 0x00000000, 0x00002000, 0x00000000, + 0x00001C00, 0x00000400 +}; + +struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = { + {"qcom,mhi-event-ring-id-limits", + mhi_event_ring_id_limits_array_4_0, + ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) }, + {"qcom,ipa-tz-unlock-reg", + ipa_tz_unlock_reg_array_4_0, + ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) }, + {"qcom,ipa-ram-mmap", + ipa_ram_mmap_array_4_0, + ARRAY_SIZE(ipa_ram_mmap_array_4_0) }, +}; + +struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = { + {"qcom,mhi-event-ring-id-limits", + mhi_event_ring_id_limits_array_3_5_1, + ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) }, + {"qcom,ipa-tz-unlock-reg", + ipa_tz_unlock_reg_array_3_5_1, + ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) }, + {"qcom,ipa-ram-mmap", + ipa_ram_mmap_array_3_5_1, + ARRAY_SIZE(ipa_ram_mmap_array_3_5_1) }, +}; + +struct dtsi_replacement_u32_array_table +ipa3_plat_drv_u32_array_table[] = { + { ipa3_plat_drv_u32_array_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) }, + { ipa3_plat_drv_u32_array_4_0, + ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) }, +}; + +#define INTCTRL_OFFSET 0x083C0000 +#define INTCTRL_SIZE 0x00000110 + +#define IPA_BASE_OFFSET_4_0 0x01e00000 +#define IPA_BASE_SIZE_4_0 0x00034000 +#define GSI_BASE_OFFSET_4_0 0x01e04000 +#define GSI_BASE_SIZE_4_0 0x00028000 + +struct resource ipa3_plat_drv_resource_4_0[] = { + /* + * PLEASE NOTE WELL: The following offset values below + * ("ipa-base", "gsi-base", and "intctrl-base") are used to + * calculate offsets relative to the PCI BAR0 address provided + * by the PCI probe. After their use to calculate the + * offsets, they are not used again, since PCI ultimately + * dictates where things live. + */ + { + IPA_BASE_OFFSET_4_0, + (IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0), + "ipa-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + GSI_BASE_OFFSET_4_0, + (GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0), + "gsi-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + /* + * The following entry is germane only to the emulator + * environment. It is needed to locate the emulator's PCI + * interrupt controller... + */ + { + INTCTRL_OFFSET, + (INTCTRL_OFFSET + INTCTRL_SIZE), + "intctrl-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + IPA_PIPE_MEM_START_OFST, + (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE), + "ipa-pipe-mem", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "gsi-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "ipa-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, +}; + +#define IPA_BASE_OFFSET_3_5_1 0x01e00000 +#define IPA_BASE_SIZE_3_5_1 0x00034000 +#define GSI_BASE_OFFSET_3_5_1 0x01e04000 +#define GSI_BASE_SIZE_3_5_1 0x0002c000 + +struct resource ipa3_plat_drv_resource_3_5_1[] = { + /* + * PLEASE NOTE WELL: The following offset values below + * ("ipa-base", "gsi-base", and "intctrl-base") are used to + * calculate offsets relative to the PCI BAR0 address provided + * by the PCI probe. After their use to calculate the + * offsets, they are not used again, since PCI ultimately + * dictates where things live. + */ + { + IPA_BASE_OFFSET_3_5_1, + (IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1), + "ipa-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + GSI_BASE_OFFSET_3_5_1, + (GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1), + "gsi-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + /* + * The following entry is germane only to the emulator + * environment. It is needed to locate the emulator's PCI + * interrupt controller... + */ + { + INTCTRL_OFFSET, + (INTCTRL_OFFSET + INTCTRL_SIZE), + "intctrl-base", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + IPA_PIPE_MEM_START_OFST, + (IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE), + "ipa-pipe-mem", + IORESOURCE_MEM, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "gsi-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, + + { + 0, + 0, + "ipa-irq", + IORESOURCE_IRQ, + 0, + NULL, + NULL, + NULL + }, +}; + +struct dtsi_replacement_resource_table +ipa3_plat_drv_resource_table[] = { + { ipa3_plat_drv_resource_3_5_1, + ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) }, + { ipa3_plat_drv_resource_4_0, + ARRAY_SIZE(ipa3_plat_drv_resource_4_0) }, +}; + +/* + * The following code uses the data above... + */ +static u32 emulator_type_to_index(void) +{ + /* + * Use the input parameter to the IPA driver loadable module, + * which specifies the type of hardware the driver is running + * on. + */ + u32 index = DTSI_INDEX_4_0; + uint emulation_type = ipa3_get_emulation_type(); + + switch (emulation_type) { + case IPA_HW_v3_5_1: + index = DTSI_INDEX_3_5_1; + break; + case IPA_HW_v4_0: + index = DTSI_INDEX_4_0; + break; + default: + break; + } + + IPADBG("emulation_type(%u) emulation_index(%u)\n", + emulation_type, index); + + return index; +} + +/* From include/linux/of.h */ +/** + * emulator_of_property_read_bool - Find from a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exists false otherwise. + */ +bool emulator_of_property_read_bool( + const struct device_node *np, + const char *propname) +{ + u16 i; + u32 index; + struct dtsi_replacement_bool *ipa3_plat_drv_boolP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) { + IPADBG( + "Did not find ipa3_plat_drv_bool_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_boolP = + ipa3_plat_drv_bool_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_bool_table[index].num_entries; + i++) { + if (strsame(ipa3_plat_drv_boolP[i].key, propname)) { + IPADBG( + "Found value %u for propname %s index %u\n", + ipa3_plat_drv_boolP[i].value, + propname, + index); + return ipa3_plat_drv_boolP[i].value; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return false; +} + +/* From include/linux/of.h */ +int emulator_of_property_read_u32( + const struct device_node *np, + const char *propname, + u32 *out_value) +{ + u16 i; + u32 index; + struct dtsi_replacement_u32 *ipa3_plat_drv_u32P; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_u32P = + ipa3_plat_drv_u32_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_u32_table[index].num_entries; + i++) { + if (strsame(ipa3_plat_drv_u32P[i].key, propname)) { + *out_value = ipa3_plat_drv_u32P[i].value; + IPADBG( + "Found value %u for propname %s index %u\n", + ipa3_plat_drv_u32P[i].value, + propname, + index); + return 0; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return -EINVAL; +} + +/* From include/linux/of.h */ +/** + * emulator_of_property_read_u32_array - Find and read an array of 32 + * bit integers from a property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int emulator_of_property_read_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz) +{ + u16 i; + u32 index; + struct dtsi_replacement_u32_array *u32_arrayP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_array_table for index %u\n", + index); + return false; + } + + u32_arrayP = + ipa3_plat_drv_u32_array_table[index].p_table; + for (i = 0; + i < ipa3_plat_drv_u32_array_table[index].num_entries; + i++) { + if (strsame( + u32_arrayP[i].key, propname)) { + u32 num_elements = + u32_arrayP[i].num_elements; + u32 *p_element = + &u32_arrayP[i].p_value[0]; + size_t j = 0; + + if (num_elements > sz) { + IPAERR( + "Found array of %u values for propname %s; only room for %u elements in copy buffer\n", + num_elements, + propname, + (unsigned int) sz); + return -EOVERFLOW; + } + + while (j++ < num_elements) + *out_values++ = *p_element++; + + IPADBG( + "Found array of values starting with %u for propname %s index %u\n", + u32_arrayP[i].p_value[0], + propname, + index); + + return 0; + } + } + + IPADBG("Did not find match for propname %s index %u\n", + propname, + index); + + return -EINVAL; +} + +/* From drivers/base/platform.c */ +/** + * emulator_platform_get_resource_byname - get a resource for a device by name + * @dev: platform device + * @type: resource type + * @name: resource name + */ +struct resource *emulator_platform_get_resource_byname( + struct platform_device *dev, + unsigned int type, + const char *name) +{ + u16 i; + u32 index; + struct resource *ipa3_plat_drv_resourceP; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) { + IPADBG( + "Did not find ipa3_plat_drv_resource_table for index %u\n", + index); + return false; + } + + ipa3_plat_drv_resourceP = + ipa3_plat_drv_resource_table[index].p_table; + for (i = 0; + i < ipa3_plat_drv_resource_table[index].num_entries; + i++) { + struct resource *r = &ipa3_plat_drv_resourceP[i]; + + if (type == resource_type(r) && strsame(r->name, name)) { + IPADBG( + "Found start 0x%x size %u for name %s index %u\n", + (unsigned int) (r->start), + (unsigned int) (resource_size(r)), + name, + index); + return r; + } + } + + IPADBG("Did not find match for name %s index %u\n", + name, + index); + + return NULL; +} + +/* From drivers/of/base.c */ +/** + * emulator_of_property_count_elems_of_size - Count the number of + * elements in a property + * + * @np: device node from which the property value is to + * be read. Not used. + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property and count the number of elements of size + * elem_size in it. Returns number of elements on success, -EINVAL if + * the property does not exist or its length does not match a multiple + * of elem_size and -ENODATA if the property does not have a value. + */ +int emulator_of_property_count_elems_of_size( + const struct device_node *np, + const char *propname, + int elem_size) +{ + u32 index; + + /* + * Get the index for the type of hardware we're running on. + * This is used as a table index. + */ + index = emulator_type_to_index(); + + /* + * Use elem_size to determine which table to search for the + * specified property name + */ + if (elem_size == sizeof(u32)) { + u16 i; + struct dtsi_replacement_u32_array *u32_arrayP; + + if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) { + IPADBG( + "Did not find ipa3_plat_drv_u32_array_table for index %u\n", + index); + return false; + } + + u32_arrayP = + ipa3_plat_drv_u32_array_table[index].p_table; + + for (i = 0; + i < ipa3_plat_drv_u32_array_table[index].num_entries; + i++) { + if (strsame(u32_arrayP[i].key, propname)) { + if (u32_arrayP[i].p_value == NULL) { + IPADBG( + "Found no elements for propname %s index %u\n", + propname, + index); + return -ENODATA; + } + + IPADBG( + "Found %u elements for propname %s index %u\n", + u32_arrayP[i].num_elements, + propname, + index); + + return u32_arrayP[i].num_elements; + } + } + + IPADBG( + "Found no match in table with elem_size %d for propname %s index %u\n", + elem_size, + propname, + index); + + return -EINVAL; + } + + IPAERR( + "Found no tables with element size %u to search for propname %s index %u\n", + elem_size, + propname, + index); + + return -EINVAL; +} + +int emulator_of_property_read_variable_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max) +{ + return emulator_of_property_read_u32_array( + np, propname, out_values, sz_max); +} + +resource_size_t emulator_resource_size(const struct resource *res) +{ + return res->end - res->start; +} diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h new file mode 100644 index 0000000000000000000000000000000000000000..c4cb81b68f59b35251e3ba682d5c2cd626754376 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_emulation_stubs.h @@ -0,0 +1,128 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if !defined(_IPA_EMULATION_STUBS_H_) +# define _IPA_EMULATION_STUBS_H_ + +# define outer_flush_range(x, y) +# define __flush_dcache_area(x, y) +# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y) + +/* Point several API calls to these new EMULATION functions */ +# define of_property_read_bool(np, propname) \ + emulator_of_property_read_bool(NULL, propname) +# define of_property_read_u32(np, propname, out_value) \ + emulator_of_property_read_u32(NULL, propname, out_value) +# define of_property_read_u32_array(np, propname, out_values, sz) \ + emulator_of_property_read_u32_array(NULL, propname, out_values, sz) +# define platform_get_resource_byname(dev, type, name) \ + emulator_platform_get_resource_byname(NULL, type, name) +# define of_property_count_elems_of_size(np, propname, elem_size) \ + emulator_of_property_count_elems_of_size(NULL, propname, elem_size) +# define of_property_read_variable_u32_array( \ + np, propname, out_values, sz_min, sz_max) \ + emulator_of_property_read_variable_u32_array( \ + NULL, propname, out_values, sz_min, sz_max) +# define resource_size(res) \ + emulator_resource_size(res) + +/** + * emulator_of_property_read_bool - Findfrom a property + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * + * Search for a property in a device node. + * Returns true if the property exists false otherwise. + */ +bool emulator_of_property_read_bool( + const struct device_node *np, + const char *propname); + +int emulator_of_property_read_u32( + const struct device_node *np, + const char *propname, + u32 *out_value); + +/** + * emulator_of_property_read_u32_array - Find and read an array of 32 + * bit integers from a property. + * + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * @out_values: pointer to return value, modified only if return value is 0. + * @sz: number of array elements to read + * + * Search for a property in a device node and read 32-bit value(s) from + * it. Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int emulator_of_property_read_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz); + +/** + * emulator_platform_get_resource_byname - get a resource for a device + * by name + * + * @dev: platform device + * @type: resource type + * @name: resource name + */ +struct resource *emulator_platform_get_resource_byname( + struct platform_device *dev, + unsigned int type, + const char *name); + +/** + * emulator_of_property_count_elems_of_size - Count the number of + * elements in a property + * + * @np: device node used to find the property value. (not used) + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property and count the number of elements of size + * elem_size in it. Returns number of elements on success, -EINVAL if + * the property does not exist or its length does not match a multiple + * of elem_size and -ENODATA if the property does not have a value. + */ +int emulator_of_property_count_elems_of_size( + const struct device_node *np, + const char *propname, + int elem_size); + +int emulator_of_property_read_variable_u32_array( + const struct device_node *np, + const char *propname, + u32 *out_values, + size_t sz_min, + size_t sz_max); + +resource_size_t emulator_resource_size( + const struct resource *res); + +static inline bool is_device_dma_coherent(struct device *dev) +{ + return false; +} + +static inline phys_addr_t qcom_smem_virt_to_phys(void *addr) +{ + return 0; +} + +#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index 0bc77b55be17eb7785dff8140f2d91788cb0b093..28b8ec4668695ea1f5572d9ef6f46b560bb93fe3 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -833,7 +833,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl, - struct ipa3_flt_tbl *tbl) + struct ipa3_flt_tbl *tbl, bool user) { int id; @@ -856,6 +856,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, } } (*entry)->rule_id = id; + (*entry)->ipacm_installed = user; return 0; @@ -893,7 +894,7 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa3_flt_entry *entry; struct ipa3_rt_tbl *rt_tbl = NULL; @@ -901,7 +902,7 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) goto error; - if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, user)) goto error; if (add_rear) { @@ -951,7 +952,7 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, if (__ipa_validate_flt_rule(rule, &rt_tbl, ip)) goto error; - if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl)) + if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl, true)) goto error; list_add(&entry->link, &((*add_after_entry)->link)); @@ -1072,7 +1073,7 @@ static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx) static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, const struct ipa_flt_rule *rule, u8 add_rear, - u32 *rule_hdl) + u32 *rule_hdl, bool user) { struct ipa3_flt_tbl *tbl; int ipa_ep_idx; @@ -1090,18 +1091,34 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip]; IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); - return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl); + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); } /** * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally * commit to IPA HW + * @rules: [inout] set of filtering rules to add * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return ipa3_add_flt_rule_usr(rules, false); +} + +/** + * ipa3_add_flt_rule_usr() - Add the specified filtering rules to + * SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) { int i; int result; @@ -1120,12 +1137,14 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) */ if (ipa3_ctx->ipa_fltrt_not_hashable) rules->rules[i].rule.hashable = false; - result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, + result = __ipa_add_ep_flt_rule(rules->ip, + rules->ep, &rules->rules[i].rule, rules->rules[i].at_rear, - &rules->rules[i].flt_rule_hdl); - } else - result = -1; + &rules->rules[i].flt_rule_hdl, + user_only); + } else + result = -1; if (result) { IPAERR_RL("failed to add flt rule %d\n", i); @@ -1376,18 +1395,20 @@ int ipa3_commit_flt(enum ipa_ip_type ip) * ipa3_reset_flt() - Reset the current SW filtering table of specified type * (does not commit to HW) * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_flt(enum ipa_ip_type ip) +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only) { struct ipa3_flt_tbl *tbl; struct ipa3_flt_entry *entry; struct ipa3_flt_entry *next; int i; int id; + int rule_id; if (ip >= IPA_IP_MAX) { IPAERR_RL("bad parm\n"); @@ -1407,21 +1428,27 @@ int ipa3_reset_flt(enum ipa_ip_type ip) mutex_unlock(&ipa3_ctx->lock); return -EFAULT; } - list_del(&entry->link); - entry->tbl->rule_cnt--; - if (entry->rt_tbl) - entry->rt_tbl->ref_cnt--; - /* if rule id was allocated from idr, remove it */ - if ((entry->rule_id < ipahal_get_rule_id_hi_bit()) && - (entry->rule_id >= ipahal_get_low_rule_id())) - idr_remove(entry->tbl->rule_ids, - entry->rule_id); - entry->cookie = 0; - id = entry->id; - kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); - - /* remove the handle from the database */ - ipa3_id_remove(id); + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + /* if rule id was allocated from idr, remove */ + rule_id = entry->rule_id; + id = entry->id; + if ((rule_id < ipahal_get_rule_id_hi_bit()) && + (rule_id >= ipahal_get_low_rule_id())) + idr_remove(entry->tbl->rule_ids, + rule_id); + entry->cookie = 0; + kmem_cache_free(ipa3_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } } } mutex_unlock(&ipa3_ctx->lock); @@ -1447,14 +1474,14 @@ void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx) tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, - &ep->dflt_flt4_rule_hdl); + &ep->dflt_flt4_rule_hdl, false); ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4); tbl->sticky_rear = true; tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; rule.action = IPA_PASS_TO_EXCEPTION; __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, - &ep->dflt_flt6_rule_hdl); + &ep->dflt_flt6_rule_hdl, false); ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6); tbl->sticky_rear = true; mutex_unlock(&ipa3_ctx->lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index 7f4282a30ac8715924eb0451eb6d2aa65a55b4e8..5fe3c6775ab9eaa62b18d509c6f6d89ece676d33 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -321,7 +321,7 @@ int __ipa_commit_hdr_v3_0(void) } static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, - bool add_ref_hdr) + bool add_ref_hdr, bool user_only) { struct ipa3_hdr_entry *hdr_entry; struct ipa3_hdr_proc_ctx_entry *entry; @@ -367,6 +367,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (add_ref_hdr) hdr_entry->ref_cnt++; entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); @@ -403,6 +404,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, */ offset->offset = htbl->end; offset->bin = bin; + offset->ipacm_installed = user_only; htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; list_add(&offset->link, &htbl->head_offset_list[bin]); @@ -411,6 +413,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, offset = list_first_entry(&htbl->head_free_offset_list[bin], struct ipa3_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; list_move(&offset->link, &htbl->head_offset_list[bin]); } @@ -448,7 +451,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, } -static int __ipa_add_hdr(struct ipa_hdr_add *hdr) +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) { struct ipa3_hdr_entry *entry; struct ipa_hdr_offset_entry *offset = NULL; @@ -481,6 +484,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -532,6 +536,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) list_add(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } } else { entry->is_hdr_proc_ctx = false; @@ -540,6 +545,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) struct ipa_hdr_offset_entry, link); list_move(&offset->link, &htbl->head_offset_list[bin]); entry->offset_entry = offset; + offset->ipacm_installed = user; } list_add(&entry->link, &htbl->head_hdr_entry_list); @@ -571,7 +577,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) IPADBG("adding processing context for header %s\n", hdr->name); proc_ctx.type = IPA_HDR_PROC_NONE; proc_ctx.hdr_hdl = id; - if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { IPAERR("failed to add hdr proc ctx\n"); goto fail_add_proc_ctx; } @@ -653,7 +659,6 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, return 0; } - int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) { struct ipa3_hdr_entry *entry; @@ -732,6 +737,21 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) * Note: Should not be called from atomic context */ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa3_add_hdr_usr(hdrs, false); +} + +/** + * ipa3_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) { int i; int result = -EFAULT; @@ -745,7 +765,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs) IPADBG("adding %d headers to IPA driver internal data struct\n", hdrs->num_hdrs); for (i = 0; i < hdrs->num_hdrs; i++) { - if (__ipa_add_hdr(&hdrs->hdr[i])) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { IPAERR_RL("failed to add hdr %d\n", i); hdrs->hdr[i].status = -1; } else { @@ -826,12 +846,14 @@ int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls) * ipa3_add_hdr_proc_ctx() - add the specified headers to SW * and optionally commit them to IPA HW * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { int i; int result = -EFAULT; @@ -845,7 +867,8 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) IPADBG("adding %d header processing contextes to IPA driver\n", proc_ctxs->num_proc_ctxs); for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { - if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { IPAERR_RL("failed to add hdr pric ctx %d\n", i); proc_ctxs->proc_ctx[i].status = -1; } else { @@ -960,11 +983,12 @@ int ipa3_commit_hdr(void) * ipa3_reset_hdr() - reset the current header table in SW (does not commit to * HW) * + * @user_only: [in] indicate delete rules installed by userspace * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_hdr(void) +int ipa3_reset_hdr(bool user_only) { struct ipa3_hdr_entry *entry; struct ipa3_hdr_entry *next; @@ -974,15 +998,17 @@ int ipa3_reset_hdr(void) struct ipa_hdr_offset_entry *off_next; struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry; struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next; + struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; + struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl; int i; /* * issue a reset on the routing module since routing rules point to * header table entries */ - if (ipa3_reset_rt(IPA_IP_v4)) + if (ipa3_reset_rt(IPA_IP_v4, user_only)) IPAERR("fail to reset v4 rt\n"); - if (ipa3_reset_rt(IPA_IP_v6)) + if (ipa3_reset_rt(IPA_IP_v6, user_only)) IPAERR("fail to reset v4 rt\n"); mutex_lock(&ipa3_ctx->lock); @@ -1011,47 +1037,60 @@ int ipa3_reset_hdr(void) WARN_ON_RATELIMIT_IPA(1); return -EFAULT; } - if (entry->is_hdr_proc_ctx) { - dma_unmap_single(ipa3_ctx->pdev, - entry->phys_base, - entry->hdr_len, - DMA_TO_DEVICE); - entry->proc_ctx = NULL; - } - list_del(&entry->link); - entry->ref_cnt = 0; - entry->cookie = 0; - - /* remove the handle from the database */ - ipa3_id_remove(entry->id); - kmem_cache_free(ipa3_ctx->hdr_cache, entry); + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } else { + /* move the offset entry to free list */ + entry->offset_entry->ipacm_installed = 0; + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[ + entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->ref_cnt = 0; + entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(entry->id); + kmem_cache_free(ipa3_ctx->hdr_cache, entry); + } } - for (i = 0; i < IPA_HDR_BIN_MAX; i++) { - list_for_each_entry_safe(off_entry, off_next, + + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + list_for_each_entry_safe(off_entry, off_next, &ipa3_ctx->hdr_tbl.head_offset_list[i], link) { - - /* - * do not remove the default exception header which is - * at offset 0 - */ - if (off_entry->offset == 0) - continue; - - list_del(&off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); - } - list_for_each_entry_safe(off_entry, off_next, + /** + * do not remove the default exception + * header which is at offset 0 + */ + if (off_entry->offset == 0) + continue; + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } + list_for_each_entry_safe(off_entry, off_next, &ipa3_ctx->hdr_tbl.head_free_offset_list[i], link) { - list_del(&off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry); + list_del(&off_entry->link); + kmem_cache_free(ipa3_ctx->hdr_offset_cache, + off_entry); + } } + /* there is one header of size 8 */ + ipa3_ctx->hdr_tbl.end = 8; + ipa3_ctx->hdr_tbl.hdr_cnt = 1; } - /* there is one header of size 8 */ - ipa3_ctx->hdr_tbl.end = 8; - ipa3_ctx->hdr_tbl.hdr_cnt = 1; IPADBG("reset hdr proc ctx\n"); list_for_each_entry_safe( @@ -1065,34 +1104,47 @@ int ipa3_reset_hdr(void) WARN_ON_RATELIMIT_IPA(1); return -EFAULT; } - list_del(&ctx_entry->link); - ctx_entry->ref_cnt = 0; - ctx_entry->cookie = 0; - - /* remove the handle from the database */ - ipa3_id_remove(ctx_entry->id); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry); + if (!user_only || + ctx_entry->ipacm_installed) { + /* move the offset entry to appropriate free list */ + list_move(&ctx_entry->offset_entry->link, + &htbl_proc->head_free_offset_list[ + ctx_entry->offset_entry->bin]); + list_del(&ctx_entry->link); + htbl_proc->proc_ctx_cnt--; + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + + /* remove the handle from the database */ + ipa3_id_remove(ctx_entry->id); + kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, + ctx_entry); + } } - for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { - list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, &ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i], link) { - - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, ctx_off_entry); + } + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa3_ctx->hdr_proc_ctx_tbl. + head_free_offset_list[i], link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa3_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } } - list_for_each_entry_safe(ctx_off_entry, ctx_off_next, - &ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], - link) { - list_del(&ctx_off_entry->link); - kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache, - ctx_off_entry); - } + ipa3_ctx->hdr_proc_ctx_tbl.end = 0; + ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0; } - ipa3_ctx->hdr_proc_ctx_tbl.end = 0; - ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0; mutex_unlock(&ipa3_ctx->lock); return 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c index 79cec5ae5f4c928b213cb75cb8476b8299e0ca1e..99aa02c6a6c7365d1e6d5d7380fce0ca1ca58f66 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c @@ -231,6 +231,9 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out) IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + if (offset.size == 0) + return 0; + mem.size = offset.size; mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, @@ -542,6 +545,9 @@ int ipa_get_teth_stats(void) IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + if (offset.size == 0) + return 0; + mem.size = offset.size; mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, @@ -1086,6 +1092,11 @@ static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + if (offset.size == 0) { + ret = 0; + goto free_offset; + } + mem.size = offset.size; mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, @@ -1355,6 +1366,9 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out) IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size); + if (offset.size == 0) + return 0; + mem.size = offset.size; mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 02da317c05a32f72feed9ffab9523999eff93f40..4dae8950a21aeec25edaed4340d142fb6690d4bb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -220,6 +220,187 @@ enum { #define IPA_WDI_TX_DB_RES 7 #define IPA_WDI_MAX_RES 8 +#ifdef CONFIG_ARM64 +/* Outer caches unsupported on ARM64 platforms */ +# define outer_flush_range(x, y) +# define __cpuc_flush_dcache_area __flush_dcache_area +#endif + +#define IPA_SMP2P_SMEM_STATE_MASK 3 + + +#define IPA_SUMMING_THRESHOLD (0x10) +#define IPA_PIPE_MEM_START_OFST (0x0) +#define IPA_PIPE_MEM_SIZE (0x0) +#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ + x == IPA_MODE_MOBILE_AP_WAN || \ + x == IPA_MODE_MOBILE_AP_WLAN) +#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) +#define IPA_A5_MUX_HEADER_LENGTH (8) + +#define IPA_AGGR_MAX_STR_LENGTH (10) + +#define CLEANUP_TAG_PROCESS_TIMEOUT 500 + +#define IPA_AGGR_STR_IN_BYTES(str) \ + (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) + +#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100 + +#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 + +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 +#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 + +#define IPA_MHI_GSI_EVENT_RING_ID_START 10 +#define IPA_MHI_GSI_EVENT_RING_ID_END 12 + +#define IPA_SMEM_SIZE (8 * 1024) + +#define IPA_GSI_CHANNEL_HALT_MIN_SLEEP 5000 +#define IPA_GSI_CHANNEL_HALT_MAX_SLEEP 10000 +#define IPA_GSI_CHANNEL_HALT_MAX_TRY 10 + +/* round addresses for closes page per SMMU requirements */ +#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \ + do { \ + (iova_p) = rounddown((iova), PAGE_SIZE); \ + (pa_p) = rounddown((pa), PAGE_SIZE); \ + (size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \ + } while (0) + + +/* The relative location in /lib/firmware where the FWs will reside */ +#define IPA_FWS_PATH "ipa/ipa_fws.elf" +/* + * The following paths below are used when building the system for the + * emulation environment. + * + * As new hardware platforms are added into the emulation environment, + * please add the appropriate paths here for their firmwares. + */ +#define IPA_FWS_PATH_4_0 "ipa/4.0/ipa_fws.elf" +#define IPA_FWS_PATH_3_5_1 "ipa/3.5.1/ipa_fws.elf" + +#ifdef CONFIG_COMPAT +#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + compat_uptr_t) +#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + compat_uptr_t) +#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + compat_uptr_t) +#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + compat_uptr_t) +#define IPA_IOC_INIT_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_INIT_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_TABLE_DMA_CMD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_TABLE_DMA_CMD, \ + compat_uptr_t) +#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + compat_uptr_t) +#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_NAT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_DEL_IPV6CT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_IPV6CT_TABLE, \ + compat_uptr_t) +#define IPA_IOC_NAT_MODIFY_PDN32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_MODIFY_PDN, \ + compat_uptr_t) +#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + compat_uptr_t) +#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + compat_uptr_t) +#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + compat_uptr_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + compat_uptr_t) +#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + compat_uptr_t) +#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + compat_uptr_t) +#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + compat_uptr_t) +#endif /* #ifdef CONFIG_COMPAT */ + +#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 +#define TZ_MEM_PROTECT_REGION_ID 0x10 + struct ipa3_active_client_htable_entry { struct hlist_node list; char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; @@ -264,6 +445,7 @@ struct ipa_smmu_cb_ctx { * @prio: rule 10bit priority which defines the order of the rule * among other rules at the same integrated table * @rule_id: rule 10bit ID to be returned in packet status + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_flt_entry { struct list_head link; @@ -275,6 +457,7 @@ struct ipa3_flt_entry { int id; u16 prio; u16 rule_id; + bool ipacm_installed; }; /** @@ -331,6 +514,7 @@ struct ipa3_rt_tbl { * @is_eth2_ofst_valid: is eth2_ofst field valid? * @eth2_ofst: offset to start of Ethernet-II/802.3 header * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_entry { struct list_head link; @@ -349,6 +533,7 @@ struct ipa3_hdr_entry { u8 is_eth2_ofst_valid; u16 eth2_ofst; bool user_deleted; + bool ipacm_installed; }; /** @@ -372,11 +557,13 @@ struct ipa3_hdr_tbl { * @link: entry's link in global processing context header offset entries list * @offset: the offset * @bin: bin + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_proc_ctx_offset_entry { struct list_head link; u32 offset; u32 bin; + bool ipacm_installed; }; /** @@ -390,6 +577,7 @@ struct ipa3_hdr_proc_ctx_offset_entry { * @ref_cnt: reference counter of routing table * @id: processing context header entry id * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_hdr_proc_ctx_entry { struct list_head link; @@ -401,6 +589,7 @@ struct ipa3_hdr_proc_ctx_entry { u32 ref_cnt; int id; bool user_deleted; + bool ipacm_installed; }; /** @@ -456,6 +645,8 @@ struct ipa3_flt_tbl { * @prio: rule 10bit priority which defines the order of the rule * among other rules at the integrated same table * @rule_id: rule 10bit ID to be returned in packet status + * @rule_id_valid: indicate if rule_id_valid valid or not? + * @ipacm_installed: indicate if installed by ipacm */ struct ipa3_rt_entry { struct list_head link; @@ -469,6 +660,7 @@ struct ipa3_rt_entry { u16 prio; u16 rule_id; u16 rule_id_valid; + bool ipacm_installed; }; /** @@ -682,6 +874,7 @@ struct ipa3_sys_context { struct work_struct repl_work; void (*repl_hdlr)(struct ipa3_sys_context *sys); struct ipa3_repl_ctx repl; + u32 pkt_sent; /* ordering is important - mutable fields go above */ struct ipa3_ep_context *ep; @@ -910,9 +1103,9 @@ struct ipa3_nat_mem { }; /** -* struct ipa3_ipv6ct_mem - IPA IPv6 connection tracking memory description -* @dev: the memory device structure -*/ + * struct ipa3_ipv6ct_mem - IPA IPv6 connection tracking memory description + * @dev: the memory device structure + */ struct ipa3_ipv6ct_mem { struct ipa3_nat_ipv6ct_common_mem dev; }; @@ -922,11 +1115,13 @@ struct ipa3_ipv6ct_mem { * @IPA_HW_Normal: Regular IPA hardware * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge + * @IPA_HW_Emulation: IPA emulation hardware */ enum ipa3_hw_mode { - IPA_HW_MODE_NORMAL = 0, - IPA_HW_MODE_VIRTUAL = 1, - IPA_HW_MODE_PCIE = 2 + IPA_HW_MODE_NORMAL = 0, + IPA_HW_MODE_VIRTUAL = 1, + IPA_HW_MODE_PCIE = 2, + IPA_HW_MODE_EMULATION = 3, }; enum ipa3_config_this_ep { @@ -1219,6 +1414,7 @@ struct ipa3_char_device_context { * @mode: IPA operating mode * @mmio: iomem * @ipa_wrapper_base: IPA wrapper base address + * @ipa_wrapper_size: size of the memory pointed to by ipa_wrapper_base * @hdr_tbl: IPA header table * @hdr_proc_ctx_tbl: IPA processing context table * @rt_tbl_set: list of routing tables each of which is a list of rules @@ -1360,6 +1556,8 @@ struct ipa3_context { struct list_head msg_list; struct list_head pull_msg_list; struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; wait_queue_head_t msg_waitq; enum ipa_hw_type ipa_hw_type; enum ipa3_hw_mode ipa3_hw_mode; @@ -1427,6 +1625,7 @@ struct ipa3_context { struct mutex ipa_cne_evt_lock; bool use_ipa_pm; bool vlan_mode_iface[IPA_VLAN_IF_MAX]; + bool wdi_over_pcie; }; struct ipa3_plat_drv_res { @@ -1435,6 +1634,9 @@ struct ipa3_plat_drv_res { u32 ipa_mem_size; u32 transport_mem_base; u32 transport_mem_size; + u32 emulator_intcntrlr_mem_base; + u32 emulator_intcntrlr_mem_size; + u32 emulator_irq; u32 ipa_irq; u32 transport_irq; u32 ipa_pipe_mem_start_ofst; @@ -1459,6 +1661,7 @@ struct ipa3_plat_drv_res { struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; bool use_ipa_pm; struct ipa_pm_init_params pm_init; + bool wdi_over_pcie; }; /** @@ -1797,13 +2000,15 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); int ipa3_commit_hdr(void); -int ipa3_reset_hdr(void); +int ipa3_reset_hdr(bool user_only); int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1814,7 +2019,8 @@ int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1826,6 +2032,9 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, */ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules); int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules); @@ -1834,7 +2043,7 @@ int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa3_commit_rt(enum ipa_ip_type ip); -int ipa3_reset_rt(enum ipa_ip_type ip); +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1849,6 +2058,9 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa3_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules); int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); @@ -1857,7 +2069,7 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa3_commit_flt(enum ipa_ip_type ip); -int ipa3_reset_flt(enum ipa_ip_type ip); +int ipa3_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT @@ -1888,6 +2100,7 @@ int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn); */ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, ipa_msg_free_fn callback); +int ipa3_resend_wlan_msg(void); int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta); @@ -2364,7 +2577,13 @@ int ipa3_uc_panic_notifier(struct notifier_block *this, unsigned long event, void *ptr); void ipa3_inc_acquire_wakelock(void); void ipa3_dec_release_wakelock(void); -int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base); +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base, + enum gsi_ver); +int emulator_load_fws( + const struct firmware *firmware, + u32 transport_mem_base, + u32 transport_mem_size, + enum gsi_ver); int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data); const char *ipa_hw_error_str(enum ipa3_hw_errors err_type); int ipa_gsi_ch20_wa(void); @@ -2391,4 +2610,9 @@ int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs); void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc, struct ipahal_imm_cmd_pyld *cmd_pyld); int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res); +uint ipa3_get_emulation_type(void); +int ipa3_get_transport_info( + phys_addr_t *phys_addr_ptr, + unsigned long *size_ptr); +irq_handler_t ipa3_get_isr(void); #endif /* _IPA3_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c index f338b75797c360168f0438b70eb6a18b1daf3b6c..e949670762bd4e93eb354f67dff1f9010417e9e5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c @@ -347,6 +347,12 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt) ipa3_dec_client_disable_clks_no_block(&log_info); return IRQ_HANDLED; } + +irq_handler_t ipa3_get_isr(void) +{ + return ipa3_isr; +} + /** * ipa3_add_interrupt_handler() - Adds handler to an interrupt type * @interrupt: Interrupt type @@ -494,21 +500,35 @@ int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) return -ENOMEM; } - res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr, - IRQF_TRIGGER_RISING, "ipa", ipa_dev); - if (res) { - IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq); - return -ENODEV; + /* + * NOTE: + * + * We'll only register an isr on non-emulator (ie. real UE) + * systems. + * + * On the emulator, emulator_soft_irq_isr() will be calling + * ipa3_isr, so hence, no isr registration here, and instead, + * we'll pass the address of ipa3_isr to the gsi layer where + * emulator interrupts are handled... + */ + if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { + res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr, + IRQF_TRIGGER_RISING, "ipa", ipa_dev); + if (res) { + IPAERR( + "fail to register IPA IRQ handler irq=%d\n", + ipa_irq); + return -ENODEV; + } + IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); + + res = enable_irq_wake(ipa_irq); + if (res) + IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", + ipa_irq, res); + else + IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); } - IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); - - res = enable_irq_wake(ipa_irq); - if (res) - IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", - ipa_irq, res); - else - IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); - spin_lock_init(&suspend_wa_lock); return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index ff804294520b845f9554985ccfce956de0a3cddd..72b5a21c9e15cf839a52eeaa7bede0962539a362 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -14,6 +14,7 @@ #include #include #include "ipa_i.h" +#include struct ipa3_intf { char name[IPA_RESOURCE_NAME_MAX]; @@ -386,6 +387,105 @@ static void ipa3_send_msg_free(void *buff, u32 len, u32 type) kfree(buff); } +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa3_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(*msg_dup), GFP_KERNEL); + if (msg_dup == NULL) { + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmalloc(meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + kfree(msg_dup); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa3_send_msg_free; + } else { + IPAERR("msg_len %d\n", meta->msg_len); + kfree(msg_dup); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %pM, msg %d\n", + event_ex_cur_discon->mac_addr, + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa3_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%pM\n", mac); + + /* compare to delete one*/ + if (memcmp(mac2, mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + } + return 0; +} + /** * ipa3_send_msg() - Send "message" from kernel client to IPA driver * @meta: [in] message meta-data @@ -437,6 +537,11 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, mutex_lock(&ipa3_ctx->msg_lock); list_add_tail(&msg->link, &ipa3_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR("wlan_msg_process failed\n"); + + /* unlock only after process */ mutex_unlock(&ipa3_ctx->msg_lock); IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]); @@ -447,6 +552,65 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff, return 0; } +/** + * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa3_push_msg *entry; + struct ipa3_push_msg *next; + int cnt = 0, total = 0; + struct ipa3_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa3_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %pM\n", total, + event_ex_list->attribs[cnt].u.mac_addr); + } + } + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (msg == NULL) { + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmalloc(entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + kfree(msg); + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data, entry->buff, entry->meta.msg_len); + msg->buff = data; + msg->callback = ipa3_send_msg_free; + mutex_lock(&ipa3_ctx->msg_lock); + list_add_tail(&msg->link, &ipa3_ctx->msg_list); + mutex_unlock(&ipa3_ctx->msg_lock); + wake_up(&ipa3_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa3_ctx->msg_wlan_client_lock); + return 0; +} + /** * ipa3_register_pull_msg() - register pull message type * @meta: [in] message meta-data diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index 82cd81878903073fee27f916961285e1eb4bb79c..bc17bdfb722810c588d9f1eb9088ad362a31297d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -56,9 +56,9 @@ #define IPA_MHI_FUNC_ENTRY() \ - IPA_MHI_DBG_LOW("ENTRY\n") + IPA_MHI_DBG("ENTRY\n") #define IPA_MHI_FUNC_EXIT() \ - IPA_MHI_DBG_LOW("EXIT\n") + IPA_MHI_DBG("EXIT\n") #define IPA_MHI_MAX_UL_CHANNELS 1 #define IPA_MHI_MAX_DL_CHANNELS 1 @@ -298,13 +298,18 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, params->channel_context_addr + offsetof(struct ipa_mhi_ch_ctx, wp)); ch_scratch.mhi.assert_bit40 = params->assert_bit40; - ch_scratch.mhi.max_outstanding_tre = - ep_cfg->ipa_if_tlv * ch_props.re_size; - ch_scratch.mhi.outstanding_threshold = - min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size; - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { - ch_scratch.mhi.max_outstanding_tre = 0; - ch_scratch.mhi.outstanding_threshold = 0; + + /* + * Update scratch for MCS smart prefetch: + * Starting IPA4.5, smart prefetch implemented by H/W. + * At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch + * so keep the fields zero. + */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) { + ch_scratch.mhi.max_outstanding_tre = + ep_cfg->ipa_if_tlv * ch_props.re_size; + ch_scratch.mhi.outstanding_threshold = + min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size; } ch_scratch.mhi.oob_mod_threshold = 4; if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT || @@ -549,6 +554,7 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, int res; int ipa_ep_idx; struct ipa3_ep_context *ep; + union __packed gsi_channel_scratch gsi_ch_scratch; IPA_MHI_FUNC_ENTRY(); @@ -560,12 +566,41 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client, ep = &ipa3_ctx->ep[ipa_ep_idx]; if (brstmode_enabled && !LPTransitionRejected) { + + res = gsi_read_channel_scratch(ep->gsi_chan_hdl, + &gsi_ch_scratch); + if (res) { + IPA_MHI_ERR("read ch scratch fail %d %d\n", res); + return res; + } + /* * set polling mode bit to DB mode before * resuming the channel + * + * For MHI-->IPA pipes: + * when resuming due to transition to M0, + * set the polling mode bit to 0. + * In other cases, restore it's value form + * when you stopped the channel. + * Here, after successful resume client move to M0 state. + * So, by default setting polling mode bit to 0. + * + * For IPA-->MHI pipe: + * always restore the polling mode bit. + */ + if (IPA_CLIENT_IS_PROD(client)) + ch_scratch.mhi.polling_mode = + IPA_MHI_POLLING_MODE_DB_MODE; + else + ch_scratch.mhi.polling_mode = + gsi_ch_scratch.mhi.polling_mode; + + /* Use GSI update API to not affect non-SWI fields + * inside the scratch while in suspend-resume operation */ - res = gsi_write_channel_scratch( - ep->gsi_chan_hdl, ch_scratch); + res = gsi_update_mhi_channel_scratch( + ep->gsi_chan_hdl, ch_scratch.mhi); if (res) { IPA_MHI_ERR("write ch scratch fail %d\n" , res); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c index ef10f940fe28206e740f4a7ff93401adc9df33f4..2b78ba4b5ba1580f9d72cf60d6f34d1762785e45 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c @@ -771,7 +771,6 @@ static int imp_mhi_probe_cb(struct mhi_device *mhi_dev, mutex_unlock(&imp_ctx->mutex); IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); - mhi_device_get_sync(imp_ctx->md.mhi_dev); IMP_FUNC_EXIT(); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c index adc153d24f157b5a357116395b05dc0ac0b7936b..40f16bf147b3826c8a0977923b68fdd475792723 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -20,6 +20,13 @@ #include "ipahal/ipahal.h" #include "ipahal/ipahal_nat.h" +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif + #define IPA_NAT_PHYS_MEM_OFFSET 0 #define IPA_IPV6CT_PHYS_MEM_OFFSET 0 #define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE @@ -1477,12 +1484,6 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) { struct ipa_ioc_nat_ipv6ct_table_del tmp; - if ((ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && - (del->public_ip_addr == 0)) { - IPAERR_RL("Bad Parameter public IP address\n"); - return -EPERM; - } - tmp.table_index = del->table_index; return ipa3_del_nat_table(&tmp); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c index bffadfb7147e8575b7d54d2de9398bf69cd19a13..34065cf8c2ed1f3cba54c37d38c419577077f288 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c @@ -242,7 +242,7 @@ static int calculate_throughput(void) struct ipa_pm_client *client; /* Create a basic array to hold throughputs*/ - for (i = 0, n = 0; i < IPA_PM_MAX_CLIENTS; i++) { + for (i = 1, n = 0; i < IPA_PM_MAX_CLIENTS; i++) { client = ipa_pm_ctx->clients[i]; if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) { /* default case */ @@ -498,7 +498,8 @@ static int find_next_open_array_element(const char *name) n = -ENOBUFS; - for (i = IPA_PM_MAX_CLIENTS - 1; i >= 0; i--) { + /* 0 is not a valid handle */ + for (i = IPA_PM_MAX_CLIENTS - 1; i >= 1; i--) { if (ipa_pm_ctx->clients[i] == NULL) { n = i; continue; @@ -1063,7 +1064,7 @@ int ipa_pm_deactivate_all_deferred(void) return -EINVAL; } - for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) { + for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) { client = ipa_pm_ctx->clients[i]; if (client == NULL) @@ -1304,7 +1305,7 @@ int ipa_pm_stat(char *buf, int size) cnt += result; - for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) { + for (i = 1; i < IPA_PM_MAX_CLIENTS; i++) { client = ipa_pm_ctx->clients[i]; if (client == NULL) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h index 38e082f35e6fcccf49333217144c470848592d97..47a03f93824363fe72cf8a49331246d9de53c29a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,7 +16,7 @@ #include /* internal to ipa */ -#define IPA_PM_MAX_CLIENTS 12 /* actual max is value -1 since we start from 1*/ +#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/ #define IPA_PM_MAX_EX_CL 64 #define IPA_PM_THRESHOLD_MAX 5 #define IPA_PM_EXCEPTION_MAX 2 diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index dfb94a683c286ebb7ddccaa8fd09262166f9ad88..654a3df0631e259fbfa47f821c0e8f7fd2a1edf7 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -585,6 +585,14 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) int rc; int i; + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + /* check if the filter rules from IPACM is valid */ if (req->filter_spec_list_len == 0) { IPAWANDBG("IPACM pass zero rules to Q6\n"); @@ -676,6 +684,14 @@ int ipa3_qmi_filter_request_ex_send( int rc; int i; + /* check if modem up */ + if (!ipa3_qmi_indication_fin || + !ipa3_qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + /* check if the filter rules from IPACM is valid */ if (req->filter_spec_ex_list_len == 0) { IPAWANDBG("IPACM pass zero rules to Q6\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index ce2e34a683ba3aef13d0346d06382d322f2912ed..c431baacd527c2e4522fabef18f84d88e88e5b04 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -421,19 +421,23 @@ static inline int ipa3_qmi_stop_data_qouta(void) static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { } -static int ipa3_qmi_send_mhi_ready_indication( +static inline int ipa3_qmi_send_mhi_ready_indication( struct ipa_mhi_ready_indication_msg_v01 *req) { return -EPERM; } -static int ipa3_qmi_send_mhi_cleanup_request( +static inline int ipa3_qmi_send_mhi_cleanup_request( struct ipa_mhi_cleanup_req_msg_v01 *req) { return -EPERM; } -static inline int ipa3_wwan_set_modem_perf_profile(int throughput); +static inline int ipa3_wwan_set_modem_perf_profile( + int throughput) +{ + return -EPERM; +} static inline int ipa3_qmi_enable_per_client_stats( struct ipa_enable_per_client_stats_req_msg_v01 *req, struct ipa_enable_per_client_stats_resp_msg_v01 *resp) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index d2f0f9ced72853e051a487304bb9cc17c7b84b08..a0bd749dbd30768b0879fa3bccd30e7e829d4d15 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -955,7 +955,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, const struct ipa_rt_rule *rule, struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr, struct ipa3_hdr_proc_ctx_entry *proc_ctx, - u16 rule_id) + u16 rule_id, bool user) { int id; @@ -981,6 +981,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, } } (*(entry))->rule_id = id; + (*(entry))->ipacm_installed = user; return 0; @@ -1026,7 +1027,7 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl, static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, - u16 rule_id) + u16 rule_id, bool user) { struct ipa3_rt_tbl *tbl; struct ipa3_rt_entry *entry; @@ -1055,7 +1056,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, } if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, - rule_id)) + rule_id, user)) goto error; if (at_rear) @@ -1086,7 +1087,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) goto error; - if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0)) + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0, true)) goto error; list_add(&entry->link, &((*add_after_entry)->link)); @@ -1115,7 +1116,24 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, * * Note: Should not be called from atomic context */ + int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return ipa3_add_rt_rule_usr(rules, false); +} + +/** + * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ + +int ipa3_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) { int i; int ret; @@ -1135,7 +1153,8 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].rt_rule_hdl, - 0)) { + 0, + user_only)) { IPAERR("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { @@ -1183,7 +1202,7 @@ int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules) &rules->rules[i].rule, rules->rules[i].at_rear, &rules->rules[i].rt_rule_hdl, - rules->rules[i].rule_id)) { + rules->rules[i].rule_id, true)) { IPAERR("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { @@ -1463,13 +1482,14 @@ int ipa3_commit_rt(enum ipa_ip_type ip) /** * ipa3_reset_rt() - reset the current SW routing table of specified type * (does not commit to HW) - * @ip: The family of routing tables + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace * * Returns: 0 on success, negative on failure * * Note: Should not be called from atomic context */ -int ipa3_reset_rt(enum ipa_ip_type ip) +int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only) { struct ipa3_rt_tbl *tbl; struct ipa3_rt_tbl *tbl_next; @@ -1479,6 +1499,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) struct ipa3_rt_tbl_set *rset; u32 apps_start_idx; int id; + bool tbl_user = false; if (ip >= IPA_IP_MAX) { IPAERR_RL("bad param\n"); @@ -1496,7 +1517,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) * issue a reset on the filtering module of same IP type since * filtering rules point to routing tables */ - if (ipa3_reset_flt(ip)) + if (ipa3_reset_flt(ip, user_only)) IPAERR_RL("fail to reset flt ip=%d\n", ip); set = &ipa3_ctx->rt_tbl_set[ip]; @@ -1504,6 +1525,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip) mutex_lock(&ipa3_ctx->lock); IPADBG("reset rt ip=%d\n", ip); list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; list_for_each_entry_safe(rule, rule_next, &tbl->head_rt_rule_list, link) { if (ipa3_id_find(rule->id) == NULL) { @@ -1512,6 +1534,12 @@ int ipa3_reset_rt(enum ipa_ip_type ip) return -EFAULT; } + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } /* * for the "default" routing tbl, remove all but the * last rule @@ -1519,19 +1547,23 @@ int ipa3_reset_rt(enum ipa_ip_type ip) if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) continue; - list_del(&rule->link); - tbl->rule_cnt--; - if (rule->hdr) - __ipa3_release_hdr(rule->hdr->id); - else if (rule->proc_ctx) - __ipa3_release_hdr_proc_ctx(rule->proc_ctx->id); - rule->cookie = 0; - idr_remove(tbl->rule_ids, rule->rule_id); - id = rule->id; - kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); - - /* remove the handle from the database */ - ipa3_id_remove(id); + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + tbl->rule_cnt--; + if (rule->hdr) + __ipa3_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa3_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + idr_remove(tbl->rule_ids, rule->rule_id); + id = rule->id; + kmem_cache_free(ipa3_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa3_id_remove(id); + } } if (ipa3_id_find(tbl->id) == NULL) { @@ -1543,26 +1575,30 @@ int ipa3_reset_rt(enum ipa_ip_type ip) /* do not remove the "default" routing tbl which has index 0 */ if (tbl->idx != apps_start_idx) { - tbl->rule_ids = NULL; - if (tbl->in_sys[IPA_RULE_HASHABLE] || - tbl->in_sys[IPA_RULE_NON_HASHABLE]) { - list_move(&tbl->link, &rset->head_rt_tbl_list); - clear_bit(tbl->idx, + if (!user_only || tbl_user) { + tbl->rule_ids = NULL; + if (tbl->in_sys[IPA_RULE_HASHABLE] || + tbl->in_sys[IPA_RULE_NON_HASHABLE]) { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, &ipa3_ctx->rt_idx_bitmap[ip]); - set->tbl_cnt--; - IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n", + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", tbl->idx, set->tbl_cnt); - } else { - list_del(&tbl->link); - set->tbl_cnt--; - clear_bit(tbl->idx, + } else { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, &ipa3_ctx->rt_idx_bitmap[ip]); - IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", tbl->idx, set->tbl_cnt); - kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl); + kmem_cache_free(ipa3_ctx->rt_tbl_cache, + tbl); + } + /* remove the handle from the database */ + ipa3_id_remove(id); } - /* remove the handle from the database */ - ipa3_id_remove(id); } } mutex_unlock(&ipa3_ctx->lock); @@ -1678,6 +1714,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL; struct ipa3_hdr_entry *hdr_entry; struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry; + if (rtrule->rule.hdr_hdl) { hdr = ipa3_id_find(rtrule->rule.hdr_hdl); if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c index e66aeeabe7e6d2a03401289f190efb37edee3302..9f17d892da2f4ba47899ab2960358edcee449f49 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -508,6 +508,8 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, result = -EFAULT; goto fail_smmu_map_dl; } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPADBG("client %d (ep: %d) connected\n", in->dl.client, ipa_ep_idx_dl); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c index 24ef2928f3042b5571d4dffbe86751a4472373b6..e06d6a4760f6d1d52e8474864fd58e0593a386e0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c @@ -762,16 +762,21 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, return -EINVAL; } - if (IPA_CLIENT_IS_CONS(in->sys.client)) { - if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT || - in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { - IPAERR("alignment failure on TX\n"); - return -EINVAL; - } - } else { - if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { - IPAERR("alignment failure on RX\n"); - return -EINVAL; + if (!in->smmu_enabled) { + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->u.dl.comp_ring_base_pa % + IPA_WDI_RING_ALIGNMENT || + in->u.dl.ce_ring_base_pa % + IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on TX\n"); + return -EINVAL; + } + } else { + if (in->u.ul.rdy_ring_base_pa % + IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on RX\n"); + return -EINVAL; + } } } @@ -801,43 +806,73 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, cmd.size = sizeof(*tx_2); else cmd.size = sizeof(*tx); - IPADBG("comp_ring_base_pa=0x%pa\n", - &in->u.dl.comp_ring_base_pa); - IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); - IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa); - IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); - IPADBG("ce_ring_doorbell_pa=0x%pa\n", - &in->u.dl.ce_door_bell_pa); - IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + if (in->smmu_enabled) { + IPADBG("comp_ring_size=%d\n", + in->u.dl_smmu.comp_ring_size); + IPADBG("ce_ring_size=%d\n", in->u.dl_smmu.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl_smmu.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", + in->u.dl_smmu.num_tx_buffers); + } else { + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", + &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } } else { if (ipa3_ctx->ipa_wdi2) cmd.size = sizeof(*rx_2); else cmd.size = sizeof(*rx); - IPADBG("rx_ring_base_pa=0x%pa\n", - &in->u.ul.rdy_ring_base_pa); - IPADBG("rx_ring_size=%d\n", - in->u.ul.rdy_ring_size); - IPADBG("rx_ring_rp_pa=0x%pa\n", - &in->u.ul.rdy_ring_rp_pa); - IPADBG("rx_comp_ring_base_pa=0x%pa\n", - &in->u.ul.rdy_comp_ring_base_pa); - IPADBG("rx_comp_ring_size=%d\n", - in->u.ul.rdy_comp_ring_size); - IPADBG("rx_comp_ring_wp_pa=0x%pa\n", - &in->u.ul.rdy_comp_ring_wp_pa); - ipa3_ctx->uc_ctx.rdy_ring_base_pa = - in->u.ul.rdy_ring_base_pa; - ipa3_ctx->uc_ctx.rdy_ring_rp_pa = - in->u.ul.rdy_ring_rp_pa; - ipa3_ctx->uc_ctx.rdy_ring_size = - in->u.ul.rdy_ring_size; - ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa = - in->u.ul.rdy_comp_ring_base_pa; - ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = - in->u.ul.rdy_comp_ring_wp_pa; - ipa3_ctx->uc_ctx.rdy_comp_ring_size = - in->u.ul.rdy_comp_ring_size; + if (in->smmu_enabled) { + IPADBG("rx_ring_size=%d\n", + in->u.ul_smmu.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul_smmu.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul_smmu.rdy_comp_ring_wp_pa); + ipa3_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul_smmu.rdy_ring_rp_pa; + ipa3_ctx->uc_ctx.rdy_ring_size = + in->u.ul_smmu.rdy_ring_size; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul_smmu.rdy_comp_ring_wp_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul_smmu.rdy_comp_ring_size; + } else { + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + ipa3_ctx->uc_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa3_ctx->uc_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa3_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + } } cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size, @@ -951,10 +986,11 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, tx->comp_ring_size = len; len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : in->u.dl.ce_ring_size; - IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n", + IPADBG("TX CE ring smmu_en=%d ring_size=%d %d 0x%lx\n", in->smmu_enabled, in->u.dl_smmu.ce_ring_size, - in->u.dl.ce_ring_size); + in->u.dl.ce_ring_size, + va); if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, in->smmu_enabled, in->u.dl.ce_ring_base_pa, @@ -981,8 +1017,19 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in, result = -ENOMEM; goto uc_timeout; } - tx->ce_ring_doorbell_pa = va; - tx->num_tx_buffers = in->u.dl.num_tx_buffers; + + IPADBG("CE doorbell pa: 0x%pa va:0x%lx\n", &pa, va); + IPADBG("Is wdi_over_pcie ? (%s)\n", + ipa3_ctx->wdi_over_pcie ? "Yes":"No"); + + if (ipa3_ctx->wdi_over_pcie) + tx->ce_ring_doorbell_pa = pa; + else + tx->ce_ring_doorbell_pa = va; + + tx->num_tx_buffers = in->smmu_enabled ? + in->u.dl_smmu.num_tx_buffers : + in->u.dl.num_tx_buffers; tx->ipa_pipe_number = ipa_ep_idx; } out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 6fac15b6d2032232d0465a1651d6e40c0485fe45..3b02e8b766d068b281f0389bff8b47e10a957a71 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -24,6 +24,13 @@ #include "ipahal/ipahal_hw_stats.h" #include "../ipa_rm_i.h" +/* + * The following for adding code (ie. for EMULATION) not found on x86. + */ +#if defined(CONFIG_IPA_EMULATION) +# include "ipa_emulation_stubs.h" +#endif + #define IPA_V3_0_CLK_RATE_SVS2 (37.5 * 1000 * 1000UL) #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL) #define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) @@ -147,6 +154,17 @@ #define IPA_v4_2_GROUP_UL_DL (0) #define IPA_v4_2_SRC_GROUP_MAX (1) #define IPA_v4_2_DST_GROUP_MAX (1) + +#define IPA_v4_5_MHI_GROUP_PCIE (0) +#define IPA_v4_5_ETHERNET (0) +#define IPA_v4_5_GROUP_UL_DL (1) +#define IPA_v4_5_MHI_GROUP_DDR (1) +#define IPA_v4_5_MHI_GROUP_DMA (2) +#define IPA_v4_5_MHI_GROUP_QDSS (3) +#define IPA_v4_5_GROUP_UC_RX_Q (4) +#define IPA_v4_5_SRC_GROUP_MAX (5) +#define IPA_v4_5_DST_GROUP_MAX (5) + #define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX enum ipa_rsrc_grp_type_src { @@ -334,7 +352,34 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, }, - + [IPA_4_5] = { + /* not used UL_DL not used not used UC_RX_Q + * other are invalid + */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {0, 0}, {18, 18}, {0, 0}, {0, 0}, {8, 8}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 0}, {0, 63}, {0, 0}, {0, 0}, {0, 63}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE DDR DMA QDSS not used other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = { + {3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = { + {9, 9}, {12, 12}, {2, 2}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = { + {9, 9}, {14, 14}, {4, 4}, {4, 4}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = { + {0, 63}, {0, 63}, {0, 63}, {0, 63}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = { + {22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} }, + }, }; static const struct rsrc_min_max ipa3_rsrc_dst_grp_config @@ -397,6 +442,20 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { {1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, }, + [IPA_4_5] = { + /* ETH UL/DL/DPL not used not used uC other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {16, 16}, {5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 63}, {1, 63}, {0, 0}, {0, 0}, {0, 2}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE/DPL DDR DMA QDSS uC other are invalid */ + [IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = { + {16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} }, + [IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = { + {2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} }, + }, }; static const struct rsrc_min_max ipa3_rsrc_rx_grp_config @@ -441,6 +500,18 @@ static const struct rsrc_min_max ipa3_rsrc_rx_grp_config [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { {4, 4}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, }, + [IPA_4_5] = { + /* not used UL_DL not used not used UC_RX_Q + * other are invalid + */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + {0, 0}, {3, 3}, {0, 0}, {0, 0}, {0, 0}, {0, 0} }, + }, + [IPA_4_5_MHI] = { + /* PCIE DDR DMA QDSS not used other are invalid */ + [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = { + { 3, 3 }, {3, 3}, {3, 3}, {3, 3}, {0, 0}, { 0, 0 } }, + }, }; @@ -474,10 +545,6 @@ static const u32 ipa3_rsrc_rx_grp_hps_weight_config /* LWA_DL UL_DL not used UC_RX_Q, other are invalid */ [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 1, 1, 1, 0, 0 }, }, - [IPA_4_2] = { - /* UL_DL, other are invalid */ - [IPA_RSRC_GRP_TYPE_RX_HPS_WEIGHT_CONFIG] = { 1, 0, 0, 0, 0, 0 }, - }, }; enum ipa_ees { @@ -2585,6 +2652,29 @@ static void ipa_comp_cfg(void) IPADBG("ipa_qmb_select_by_address_cons_en = %d\n", comp_cfg.ipa_qmb_select_by_address_cons_en); } + + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("Before comp config\n"); + IPADBG("gsi_multi_inorder_rd_dis = %d\n", + comp_cfg.gsi_multi_inorder_rd_dis); + + IPADBG("gsi_multi_inorder_wr_dis = %d\n", + comp_cfg.gsi_multi_inorder_wr_dis); + + comp_cfg.gsi_multi_inorder_rd_dis = true; + comp_cfg.gsi_multi_inorder_wr_dis = true; + + ipahal_write_reg_fields(IPA_COMP_CFG, &comp_cfg); + + ipahal_read_reg_fields(IPA_COMP_CFG, &comp_cfg); + IPADBG("After comp config\n"); + IPADBG("gsi_multi_inorder_rd_dis = %d\n", + comp_cfg.gsi_multi_inorder_rd_dis); + + IPADBG("gsi_multi_inorder_wr_dis = %d\n", + comp_cfg.gsi_multi_inorder_wr_dis); + } } /** @@ -2961,6 +3051,32 @@ enum ipa_client_type ipa3_get_client_mapping(int pipe_idx) return ipa3_ctx->ep[pipe_idx].client; } +/** + * ipa3_get_client_by_pipe() - return client type relative to pipe + * index + * @pipe_idx: IPA end-point number + * + * Return value: client type + */ +static enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx) +{ + int j = 0; + + for (j = 0; j < IPA_CLIENT_MAX; j++) { + const struct ipa_ep_configuration *iec_ptr = + &(ipa3_ep_mapping[ipa3_get_hw_type_index()][j]); + if (iec_ptr->valid && + iec_ptr->ipa_gsi_ep_info.ipa_ep_num == pipe_idx) + break; + } + + if (j == IPA_CLIENT_MAX) + IPADBG("Got to IPA_CLIENT_MAX (%d) while searching for (%d)\n", + j, pipe_idx); + + return j; +} + /** * ipa_init_ep_flt_bitmap() - Initialize the bitmap * that represents the End-points that supports filtering @@ -5061,6 +5177,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client; api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl; api_ctrl->ipa_add_hdr = ipa3_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa3_add_hdr_usr; api_ctrl->ipa_del_hdr = ipa3_del_hdr; api_ctrl->ipa_commit_hdr = ipa3_commit_hdr; api_ctrl->ipa_reset_hdr = ipa3_reset_hdr; @@ -5070,6 +5187,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx; api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx; api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa3_add_rt_rule_usr; api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule; api_ctrl->ipa_commit_rt = ipa3_commit_rt; api_ctrl->ipa_reset_rt = ipa3_reset_rt; @@ -5078,6 +5196,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_query_rt_index = ipa3_query_rt_index; api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule; api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa3_add_flt_rule_usr; api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule; api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule; api_ctrl->ipa_commit_flt = ipa3_commit_flt; @@ -5432,11 +5551,23 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index, case IPA_4_5_MHI: if (src) { switch (group_index) { - case IPA_v4_2_GROUP_UL_DL: + case IPA_v4_5_MHI_GROUP_PCIE: + case IPA_v4_5_GROUP_UL_DL: ipahal_write_reg_n_fields( IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n, n, val); break; + case IPA_v4_5_MHI_GROUP_DMA: + case IPA_v4_5_MHI_GROUP_QDSS: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; default: IPAERR( " Invalid source resource group,index #%d\n", @@ -5445,11 +5576,23 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index, } } else { switch (group_index) { - case IPA_v4_2_GROUP_UL_DL: + case IPA_v4_5_MHI_GROUP_PCIE: + case IPA_v4_5_GROUP_UL_DL: ipahal_write_reg_n_fields( IPA_DST_RSRC_GRP_01_RSRC_TYPE_n, n, val); break; + case IPA_v4_5_MHI_GROUP_DMA: + case IPA_v4_5_MHI_GROUP_QDSS: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_23_RSRC_TYPE_n, + n, val); + break; + case IPA_v4_5_GROUP_UC_RX_Q: + ipahal_write_reg_n_fields( + IPA_DST_RSRC_GRP_45_RSRC_TYPE_n, + n, val); + break; default: IPAERR( " Invalid destination resource group,index #%d\n", @@ -5466,7 +5609,8 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index, } } -static void ipa3_configure_rx_hps_clients(int depth, bool min) +static void ipa3_configure_rx_hps_clients(int depth, + int max_clnt_in_depth, int base_index, bool min) { int i; struct ipahal_reg_rx_hps_clients val; @@ -5474,23 +5618,19 @@ static void ipa3_configure_rx_hps_clients(int depth, bool min) hw_type_idx = ipa3_get_hw_type_index(); - /* - * depth 0 contains 4 first clients out of 6 - * depth 1 contains 2 last clients out of 6 - */ - for (i = 0 ; i < (depth ? 2 : 4) ; i++) { + for (i = 0 ; i < max_clnt_in_depth ; i++) { if (min) val.client_minmax[i] = ipa3_rsrc_rx_grp_config [hw_type_idx] [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] - [!depth ? i : 4 + i].min; + [i + base_index].min; else val.client_minmax[i] = ipa3_rsrc_rx_grp_config [hw_type_idx] [IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] - [!depth ? i : 4 + i].max; + [i + base_index].max; } if (depth) { ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 : @@ -5540,9 +5680,9 @@ void ipa3_set_resorce_groups_min_max_limits(void) int dst_grp_idx_max; struct ipahal_reg_rsrc_grp_cfg val; u8 hw_type_idx; + int rx_hps_max_clnt_in_depth0; IPADBG("ENTER\n"); - IPADBG("Assign source rsrc groups min-max limits\n"); hw_type_idx = ipa3_get_hw_type_index(); switch (hw_type_idx) { @@ -5578,8 +5718,8 @@ void ipa3_set_resorce_groups_min_max_limits(void) case IPA_4_5_MHI: src_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_SRC_MAX; dst_rsrc_type_max = IPA_v4_0_RSRC_GRP_TYPE_DST_MAX; - src_grp_idx_max = IPA_v4_2_SRC_GROUP_MAX; - dst_grp_idx_max = IPA_v4_2_DST_GROUP_MAX; + src_grp_idx_max = IPA_v4_5_SRC_GROUP_MAX; + dst_grp_idx_max = IPA_v4_5_DST_GROUP_MAX; break; default: IPAERR("invalid hw type index\n"); @@ -5587,6 +5727,7 @@ void ipa3_set_resorce_groups_min_max_limits(void) return; } + IPADBG("Assign source rsrc groups min-max limits\n"); for (i = 0; i < src_rsrc_type_max; i++) { for (j = 0; j < src_grp_idx_max; j = j + 2) { val.x_min = @@ -5602,7 +5743,6 @@ void ipa3_set_resorce_groups_min_max_limits(void) } IPADBG("Assign destination rsrc groups min-max limits\n"); - for (i = 0; i < dst_rsrc_type_max; i++) { for (j = 0; j < dst_grp_idx_max; j = j + 2) { val.x_min = @@ -5617,26 +5757,38 @@ void ipa3_set_resorce_groups_min_max_limits(void) } } - /* move resource group configuration from HLOS to TZ */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) { + /* move rx_hps resource group configuration from HLOS to TZ */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1 && + ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) { IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n"); return; } IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n"); - ipa3_configure_rx_hps_clients(0, true); - ipa3_configure_rx_hps_clients(0, false); + /* Starting IPA4.5 have 5 RX_HPS_CMDQ */ + if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) + rx_hps_max_clnt_in_depth0 = 4; + else + rx_hps_max_clnt_in_depth0 = 5; + + ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, true); + ipa3_configure_rx_hps_clients(0, rx_hps_max_clnt_in_depth0, 0, false); - /* only hw_type v3_0\3_1 have 6 RX_HPS_CMDQ and needs depth 1*/ + /* + * IPA 3.0/3.1 uses 6 RX_HPS_CMDQ and needs depths1 for that + * which has two clients + */ if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) { - ipa3_configure_rx_hps_clients(1, true); - ipa3_configure_rx_hps_clients(1, false); + ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0, + true); + ipa3_configure_rx_hps_clients(1, 2, rx_hps_max_clnt_in_depth0, + false); } - /* In IPA4.2 no support to HPS weight config*/ + /* Starting IPA4.2 no support to HPS weight config */ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 && - (ipa3_ctx->ipa_hw_type != IPA_HW_v4_2)) + (ipa3_ctx->ipa_hw_type < IPA_HW_v4_2)) ipa3_configure_rx_hps_weight(); IPADBG("EXIT\n"); @@ -5928,16 +6080,89 @@ static int ipa3_load_single_fw(const struct firmware *firmware, return 0; } +/** + * emulator_load_single_fw() - load firmware into emulator's memory + * + * @firmware: Structure which contains the FW data from the user space. + * @phdr: ELF program header + * @fw_base: memory location to which firmware should get loaded + * @offset_from_base: offset to start relative to fw_base + * + * Return value: 0 on success, negative otherwise + */ +static int emulator_load_single_fw( + const struct firmware *firmware, + const struct elf32_phdr *phdr, + void __iomem *fw_base, + uint32_t offset_from_base) +{ + int index; + uint32_t ofb; + const uint32_t *elf_data_ptr; + + IPADBG("firmware(%pK) phdr(%pK) fw_base(%pK) offset_from_base(0x%x)\n", + firmware, phdr, fw_base, offset_from_base); + + if (phdr->p_offset > firmware->size) { + IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n", + phdr->p_offset, firmware->size); + return -EINVAL; + } + if ((firmware->size - phdr->p_offset) < phdr->p_filesz) { + IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n", + phdr->p_offset, phdr->p_filesz, firmware->size); + return -EINVAL; + } + + if (phdr->p_memsz % sizeof(uint32_t)) { + IPAERR("FW mem size %u doesn't align to 32bit\n", + phdr->p_memsz); + return -EFAULT; + } + + if (phdr->p_filesz > phdr->p_memsz) { + IPAERR("FW image too big src_size=%u dst_size=%u\n", + phdr->p_filesz, phdr->p_memsz); + return -EFAULT; + } + + IPADBG("ELF: p_memsz(0x%x) p_filesz(0x%x) p_filesz/4(0x%x)\n", + (uint32_t) phdr->p_memsz, + (uint32_t) phdr->p_filesz, + (uint32_t) (phdr->p_filesz/sizeof(uint32_t))); + + /* Set the entire region to 0s */ + ofb = offset_from_base; + for (index = 0; index < phdr->p_memsz/sizeof(uint32_t); index++) { + writel_relaxed(0, fw_base + ofb); + ofb += sizeof(uint32_t); + } + + elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset); + + /* Write the FW */ + ofb = offset_from_base; + for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) { + writel_relaxed(*elf_data_ptr, fw_base + ofb); + elf_data_ptr++; + ofb += sizeof(uint32_t); + } + + return 0; +} + /** * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM. * * @firmware: Structure which contains the FW data from the user space. * @gsi_mem_base: GSI base address + * @gsi_ver: GSI Version * * Return value: 0 on success, negative otherwise * */ -int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base) +int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base, + enum gsi_ver gsi_ver) { const struct elf32_hdr *ehdr; const struct elf32_phdr *phdr; @@ -5947,6 +6172,11 @@ int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base) u32 ipa_reg_ofst; int rc; + if (gsi_ver == GSI_VER_ERR) { + IPAERR("Invalid GSI Version\n"); + return -EINVAL; + } + if (!gsi_mem_base) { IPAERR("Invalid GSI base address\n"); return -EINVAL; @@ -5977,7 +6207,8 @@ int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base) */ /* Load GSI FW image */ - gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size); + gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size, + gsi_ver); if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) { IPAERR( "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n" @@ -6036,6 +6267,243 @@ int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base) return 0; } +/* + * The following needed for the EMULATION system. On a non-emulation + * system (ie. the real UE), this is functionality is done in the + * TZ... + */ +#define IPA_SPARE_REG_1_VAL (0xC0000805) + +static void ipa_gsi_setup_reg(void) +{ + u32 reg_val, start; + int i; + const struct ipa_gsi_ep_config *gsi_ep_info_cfg; + enum ipa_client_type type; + + IPADBG("Setting up registers in preparation for firmware download\n"); + + /* enable GSI interface */ + ipahal_write_reg(IPA_GSI_CONF, 1); + + /* + * Before configuring the FIFOs need to unset bit 30 in the + * spare register + */ + ipahal_write_reg(IPA_SPARE_REG_1, + (IPA_SPARE_REG_1_VAL & (~(1 << 30)))); + + /* setup IPA_ENDP_GSI_CFG_TLV_n reg */ + start = 0; + ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes(); + IPADBG("ipa_num_pipes=%u\n", ipa3_ctx->ipa_num_pipes); + + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + IPADBG("for ep %d client is %d\n", i, type); + if (!gsi_ep_info_cfg) + continue; + IPADBG("Config is true"); + reg_val = (gsi_ep_info_cfg->ipa_if_tlv << 16) + start; + start += gsi_ep_info_cfg->ipa_if_tlv; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG_TLV_n, i, reg_val); + } + + /* setup IPA_ENDP_GSI_CFG_AOS_n reg */ + start = 0; + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = (gsi_ep_info_cfg->ipa_if_aos << 16) + start; + start += gsi_ep_info_cfg->ipa_if_aos; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG_AOS_n, i, reg_val); + } + + /* setup IPA_ENDP_GSI_CFG1_n reg */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = (1 << 16) + + ((u32)gsi_ep_info_cfg->ipa_gsi_chan_num << 8) + + gsi_ep_info_cfg->ee; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG1_n, i, reg_val); + } + + /* + * Setup IPA_ENDP_GSI_CFG2_n reg: this register must be setup + * as last one + */ + for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) { + type = ipa3_get_client_by_pipe(i); + gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type); + if (!gsi_ep_info_cfg) + continue; + reg_val = 1 << 31; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val); + reg_val = 0; + ipahal_write_reg_n(IPA_ENDP_GSI_CFG2_n, i, reg_val); + } + + /* + * After configuring the FIFOs need to set bit 30 in the spare + * register + */ + ipahal_write_reg(IPA_SPARE_REG_1, + (IPA_SPARE_REG_1_VAL | (1 << 30))); +} + +/** + * emulator_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM. + * + * @firmware: Structure which contains the FW data from the user space. + * @transport_mem_base: Where to load + * @transport_mem_size: Space available to load into + * @gsi_ver: Version of the gsi + * + * Return value: 0 on success, negative otherwise + */ +int emulator_load_fws( + const struct firmware *firmware, + u32 transport_mem_base, + u32 transport_mem_size, + enum gsi_ver gsi_ver) +{ + const struct elf32_hdr *ehdr; + const struct elf32_phdr *phdr; + void __iomem *gsi_base; + uint32_t hps_seq_offs, dps_seq_offs; + unsigned long gsi_offset; + int rc; + + IPADBG("Loading firmware(%pK)\n", firmware); + + if (!firmware) { + IPAERR("firmware pointer passed to function is NULL\n"); + return -EINVAL; + } + + /* One program header per FW image: GSI, DPS and HPS */ + if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) { + IPAERR( + "Missing ELF and Program headers firmware size=%zu\n", + firmware->size); + return -EINVAL; + } + + ehdr = (struct elf32_hdr *) firmware->data; + + ipa_assert_on(!ehdr); + + if (ehdr->e_phnum != 3) { + IPAERR("Unexpected number of ELF program headers\n"); + return -EINVAL; + } + + hps_seq_offs = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST); + dps_seq_offs = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST); + + /* + * Each ELF program header represents a FW image and contains: + * p_vaddr : The starting address to which the FW needs to loaded. + * p_memsz : The size of the IRAM (where the image loaded) + * p_filesz: The size of the FW image embedded inside the ELF + * p_offset: Absolute offset to the image from the head of the ELF + * + * NOTE WELL: On the emulation platform, the p_vaddr address + * is not relevant and is unused. This is because + * on the emulation platform, the registers' + * address location is mutable, since it's mapped + * in via a PCIe probe. Given this, it is the + * mapped address info that's used while p_vaddr is + * ignored. + */ + phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr)); + + phdr += 2; + + /* + * Attempt to load IPA HPS FW image + */ + if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) { + IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n", + phdr->p_memsz, ipahal_get_hps_img_mem_size()); + return -EINVAL; + } + IPADBG("Loading HPS FW\n"); + rc = emulator_load_single_fw( + firmware, phdr, ipa3_ctx->mmio, hps_seq_offs); + if (rc) + return rc; + IPADBG("Loading HPS FW complete\n"); + + --phdr; + + /* + * Attempt to load IPA DPS FW image + */ + if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) { + IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n", + phdr->p_memsz, ipahal_get_dps_img_mem_size()); + return -EINVAL; + } + IPADBG("Loading DPS FW\n"); + rc = emulator_load_single_fw( + firmware, phdr, ipa3_ctx->mmio, dps_seq_offs); + if (rc) + return rc; + IPADBG("Loading DPS FW complete\n"); + + /* + * Run gsi register setup which is normally done in TZ on + * non-EMULATION systems... + */ + ipa_gsi_setup_reg(); + + /* + * Map to the GSI base... + */ + gsi_base = ioremap_nocache(transport_mem_base, transport_mem_size); + + IPADBG("GSI base(0x%x) mapped to (%pK) with len (0x%x)\n", + transport_mem_base, + gsi_base, + transport_mem_size); + + if (!gsi_base) { + IPAERR("ioremap_nocache failed\n"); + return -EFAULT; + } + + --phdr; + + /* + * Attempt to load GSI FW image + */ + if (phdr->p_memsz > transport_mem_size) { + IPAERR( + "Invalid GSI FW img size memsz=%d transport_mem_size=%u\n", + phdr->p_memsz, transport_mem_size); + return -EINVAL; + } + IPADBG("Loading GSI FW\n"); + gsi_get_inst_ram_offset_and_size(&gsi_offset, NULL, gsi_ver); + rc = emulator_load_single_fw( + firmware, phdr, gsi_base, (uint32_t) gsi_offset); + iounmap(gsi_base); + if (rc) + return rc; + IPADBG("Loading GSI FW complete\n"); + + IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n"); + + return 0; +} + /** * ipa3_is_msm_device() - Is the running device a MSM or MDM? * Determine according to IPA version @@ -6132,4 +6600,3 @@ void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc, desc->len = cmd_pyld->len; desc->type = IPA_IMM_CMD_DESC; } - diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c index c4c6785f942f1b7ac67425595a78e84b074e9eab..7f02a6b1079e78f64708bdd0d2229b1df4a63301 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c @@ -1564,6 +1564,12 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, goto bail_free_fltrt; } + /* create an IPC buffer for the registers dump */ + ipahal_ctx->regdumpbuf = ipc_log_context_create(IPAHAL_IPC_LOG_PAGES, + "ipa_regs", 0); + if (ipahal_ctx->regdumpbuf == NULL) + IPAHAL_ERR("failed to create IPA regdump log, continue...\n"); + ipahal_debugfs_init(); return 0; @@ -1571,6 +1577,8 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base, bail_free_fltrt: ipahal_fltrt_destroy(); bail_free_ctx: + if (ipahal_ctx->regdumpbuf) + ipc_log_context_destroy(ipahal_ctx->regdumpbuf); kfree(ipahal_ctx); ipahal_ctx = NULL; bail_err_exit: diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c index 1c21ed21cc9472acc26e066ce36398a65bae62e1..4237e1f7a726aacba80a1b0d43a668c9fe4a7676 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include "ipahal.h" @@ -579,6 +580,49 @@ static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = { }, }, + /* IPAv4.5 */ + [IPA_HW_v4_5] = { + true, + IPA3_0_HW_TBL_WIDTH, + IPA3_0_HW_TBL_SYSADDR_ALIGNMENT, + IPA3_0_HW_TBL_LCLADDR_ALIGNMENT, + IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT, + IPA3_0_HW_RULE_START_ALIGNMENT, + IPA3_0_HW_TBL_HDR_WIDTH, + IPA3_0_HW_TBL_ADDR_MASK, + IPA3_0_RULE_MAX_PRIORITY, + IPA3_0_RULE_MIN_PRIORITY, + IPA3_0_LOW_RULE_ID, + IPA3_0_RULE_ID_BIT_LEN, + IPA3_0_HW_RULE_BUF_SIZE, + ipa_write_64, + ipa_fltrt_create_flt_bitmap, + ipa_fltrt_create_tbl_addr, + ipa_fltrt_parse_tbl_addr, + ipa_rt_gen_hw_rule, + ipa_flt_gen_hw_rule_ipav4, + ipa_flt_generate_eq, + ipa_rt_parse_hw_rule, + ipa_flt_parse_hw_rule_ipav4, + { + [IPA_TOS_EQ] = 0, + [IPA_PROTOCOL_EQ] = 1, + [IPA_TC_EQ] = 2, + [IPA_OFFSET_MEQ128_0] = 3, + [IPA_OFFSET_MEQ128_1] = 4, + [IPA_OFFSET_MEQ32_0] = 5, + [IPA_OFFSET_MEQ32_1] = 6, + [IPA_IHL_OFFSET_MEQ32_0] = 7, + [IPA_IHL_OFFSET_MEQ32_1] = 8, + [IPA_METADATA_COMPARE] = 9, + [IPA_IHL_OFFSET_RANGE16_0] = 10, + [IPA_IHL_OFFSET_RANGE16_1] = 11, + [IPA_IHL_OFFSET_EQ_32] = 12, + [IPA_IHL_OFFSET_EQ_16] = 13, + [IPA_FL_EQ] = 14, + [IPA_IS_FRAG] = 15, + }, + }, }; static int ipa_flt_generate_eq(enum ipa_ip_type ipt, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h index 26b7f0fc005e7d42edb4786dfe39ac1e3190337f..816bc584199df38cb72ada1150056c81328ffbd4 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h @@ -46,15 +46,6 @@ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ } while (0) -#define IPAHAL_DBG_REG(fmt, args...) \ - do { \ - pr_err(fmt, ## args); \ - IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ - " %s:%d " fmt, ## args); \ - IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ - " %s:%d " fmt, ## args); \ - } while (0) - #define IPAHAL_ERR_RL(fmt, args...) \ do { \ pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \ @@ -65,9 +56,24 @@ IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \ } while (0) +#define IPAHAL_DBG_REG(fmt, args...) \ + do { \ + pr_err(fmt, ## args); \ + IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, \ + " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \ + do { \ + IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, \ + " %s:%d " fmt, ## args); \ + } while (0) + #define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \ (kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL))) +#define IPAHAL_IPC_LOG_PAGES 50 + /* * struct ipahal_context - HAL global context data * @hw_type: IPA H/W type/version. @@ -84,6 +90,7 @@ struct ipahal_context { struct dentry *dent; struct device *ipa_pdev; struct ipa_mem_buffer empty_fltrt_tbl; + void *regdumpbuf; }; extern struct ipahal_context *ipahal_ctx; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 44a2ce4c486f63262e8d407066cabeb5c315d1e8..79b43c3763946645b8f140005bb2938817bf701f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -134,6 +134,11 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = { __stringify(IPA_FEC_ATTR_EE_n), __stringify(IPA_MBIM_DEAGGR_FEC_ATTR_EE_n), __stringify(IPA_GEN_DEAGGR_FEC_ATTR_EE_n), + __stringify(IPA_GSI_CONF), + __stringify(IPA_ENDP_GSI_CFG1_n), + __stringify(IPA_ENDP_GSI_CFG2_n), + __stringify(IPA_ENDP_GSI_CFG_AOS_n), + __stringify(IPA_ENDP_GSI_CFG_TLV_n), }; static void ipareg_construct_dummy(enum ipahal_reg_name reg, @@ -213,6 +218,33 @@ static void ipareg_construct_rx_hps_clients_depth0_v3_5( IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3)); } +static void ipareg_construct_rx_hps_clients_depth0_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rx_hps_clients *clients = + (struct ipahal_reg_rx_hps_clients *)fields; + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5); + + IPA_SETFIELD_IN_REG(*val, clients->client_minmax[4], + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5, + IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5); +} + static void ipareg_construct_rsrg_grp_xy( enum ipahal_reg_name reg, const void *fields, u32 *val) { @@ -258,6 +290,32 @@ static void ipareg_construct_rsrg_grp_xy_v3_5( IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5); } +static void ipareg_construct_rsrg_grp_xy_v4_5( + enum ipahal_reg_name reg, const void *fields, u32 *val) +{ + struct ipahal_reg_rsrc_grp_cfg *grp = + (struct ipahal_reg_rsrc_grp_cfg *)fields; + + IPA_SETFIELD_IN_REG(*val, grp->x_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->x_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5); + + /* SRC_45 and DST_45 register has only X fields at ipa V4_5 */ + if (reg == IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n || + reg == IPA_DST_RSRC_GRP_45_RSRC_TYPE_n) + return; + + IPA_SETFIELD_IN_REG(*val, grp->y_min, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5); + IPA_SETFIELD_IN_REG(*val, grp->y_max, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5, + IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5); +} + static void ipareg_construct_hash_cfg_n( enum ipahal_reg_name reg, const void *fields, u32 *val) { @@ -2027,6 +2085,21 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v3_5][IPA_COUNTER_CFG] = { ipareg_construct_counter_cfg, ipareg_parse_counter_cfg, 0x000001F0, 0, 0, 0, 0}, + [IPA_HW_v3_5][IPA_GSI_CONF] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002790, 0x0, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG1_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002794, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG2_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002A2C, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_AOS_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x000029A8, 0x4, 0, 0, 0 }, + [IPA_HW_v3_5][IPA_ENDP_GSI_CFG_TLV_n] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00002924, 0x4, 0, 0, 0 }, /* IPAv4.0 */ [IPA_HW_v4_0][IPA_IRQ_SUSPEND_INFO_EE_n] = { @@ -2271,6 +2344,8 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v4_0][IPA_ENDP_YELLOW_RED_MARKER] = { ipareg_construct_dummy, ipareg_parse_dummy, 0x00000CC0, 0x70, 10, 23, 1}, + + /* IPA4.2 */ [IPA_HW_v4_2][IPA_IDLE_INDICATION_CFG] = { ipareg_construct_idle_indication_cfg, ipareg_parse_dummy, 0x00000240, 0, 0, 0, 0}, @@ -2278,9 +2353,37 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { ipareg_construct_endp_init_hol_block_timer_n_v4_2, ipareg_parse_dummy, 0x00000830, 0x70, 8, 17, 1}, + + /* IPA4.5 */ + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000400, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000404, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000408, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000500, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000504, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = { + ipareg_construct_rsrg_grp_xy_v4_5, ipareg_parse_dummy, + 0x00000508, 0x20, 0, 0, 0}, + [IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v4_5, + ipareg_parse_dummy, + 0x000023c4, 0, 0, 0, 0}, + [IPA_HW_v4_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = { + ipareg_construct_rx_hps_clients_depth0_v4_5, + ipareg_parse_dummy, + 0x000023cc, 0, 0, 0, 0}, }; -int ipahal_print_all_regs(void) +void ipahal_print_all_regs(bool print_to_dmesg) { int i, j; @@ -2290,7 +2393,7 @@ int ipahal_print_all_regs(void) if ((ipahal_ctx->hw_type < IPA_HW_v4_0) || (ipahal_ctx->hw_type >= IPA_HW_MAX)) { IPAHAL_ERR("invalid IPA HW type (%d)\n", ipahal_ctx->hw_type); - return -EINVAL; + return; } for (i = 0; i < IPA_REG_MAX ; i++) { @@ -2299,15 +2402,28 @@ int ipahal_print_all_regs(void) j = ipahal_reg_objs[ipahal_ctx->hw_type][i].n_start; - if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end) - IPAHAL_DBG_REG("%s=0x%x\n", ipahal_reg_name_str(i), - ipahal_read_reg_n(i, j)); + if (j == ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end) { + if (print_to_dmesg) + IPAHAL_DBG_REG("%s=0x%x\n", + ipahal_reg_name_str(i), + ipahal_read_reg_n(i, j)); + else + IPAHAL_DBG_REG_IPC_ONLY("%s=0x%x\n", + ipahal_reg_name_str(i), + ipahal_read_reg_n(i, j)); + } - for (; j < ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end; j++) - IPAHAL_DBG_REG("%s_%u=0x%x\n", ipahal_reg_name_str(i), - j, ipahal_read_reg_n(i, j)); + for (; j < ipahal_reg_objs[ipahal_ctx->hw_type][i].n_end; j++) { + if (print_to_dmesg) + IPAHAL_DBG_REG("%s_%u=0x%x\n", + ipahal_reg_name_str(i), + j, ipahal_read_reg_n(i, j)); + else + IPAHAL_DBG_REG_IPC_ONLY("%s_%u=0x%x\n", + ipahal_reg_name_str(i), + j, ipahal_read_reg_n(i, j)); + } } - return 0; } /* diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h index 0bece888a50c3d7cfcab16a0633bf1dd7f8a8e37..825c2f3a57acc0ed5fb94443087a4a946953054c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h @@ -135,6 +135,11 @@ enum ipahal_reg_name { IPA_FEC_ATTR_EE_n, IPA_MBIM_DEAGGR_FEC_ATTR_EE_n, IPA_GEN_DEAGGR_FEC_ATTR_EE_n, + IPA_GSI_CONF, + IPA_ENDP_GSI_CFG1_n, + IPA_ENDP_GSI_CFG2_n, + IPA_ENDP_GSI_CFG_AOS_n, + IPA_ENDP_GSI_CFG_TLV_n, IPA_REG_MAX, }; @@ -368,7 +373,7 @@ struct ipahal_reg_debug_cnt_ctrl { }; /* - * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups + * struct ipahal_reg_rsrc_grp_cfg - Min/Max values for two rsrc groups * @x_min - first group min value * @x_max - first group max value * @y_min - second group min value @@ -383,11 +388,11 @@ struct ipahal_reg_rsrc_grp_cfg { /* * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients - * @client_minmax - Min or Max values. In case of depth 0 the 4 values + * @client_minmax - Min or Max values. In case of depth 0 the 4 or 5 values * are used. In case of depth 1, only the first 2 values are used */ struct ipahal_reg_rx_hps_clients { - u32 client_minmax[4]; + u32 client_minmax[5]; }; /* @@ -535,7 +540,7 @@ struct ipahal_ep_cfg_ctrl_scnd { }; -int ipahal_print_all_regs(void); +void ipahal_print_all_regs(bool print_to_dmesg); /* * ipahal_reg_name_str() - returns string that represent the register @@ -656,4 +661,3 @@ void ipahal_get_fltrt_hash_flush_valmask( struct ipahal_reg_valmask *valmask); #endif /* _IPAHAL_REG_H_ */ - diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h index 2f7803ad5d6ef5b053dc53f8d2319aecf901c105..b4166435a11de089b2740a07f523337cf21ed776 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h @@ -365,11 +365,21 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type); #define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F #define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0 -/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */ +/* IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */ #define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n))) #define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \ (0xF << (8 * (n))) #define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n)) +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_BMSK_v4_5 0xF0000000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_4_SHFT_v4_5 28 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_BMSK_v4_5 0xF000000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_3_SHFT_v4_5 24 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_BMSK_v4_5 0xF0000 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_2_SHFT_v4_5 16 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_BMSK_v4_5 0xF00 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_1_SHFT_v4_5 8 +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_BMSK_v4_5 0xF +#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_0_CLIENT_0_SHFT_v4_5 0 /* IPA_QSB_MAX_WRITES register */ #define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf) diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index e18d023b38eff5ff07596a22954d5de8f5d08632..bbdbfdb5104e2cd80d621b5d9ae61aa27ac0d167 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -40,6 +40,22 @@ #include "ipa_trace.h" +#define OUTSTANDING_HIGH_DEFAULT 256 +#define OUTSTANDING_HIGH_CTL_DEFAULT (OUTSTANDING_HIGH_DEFAULT + 32) +#define OUTSTANDING_LOW_DEFAULT 128 + +static unsigned int outstanding_high = OUTSTANDING_HIGH_DEFAULT; +module_param(outstanding_high, uint, 0644); +MODULE_PARM_DESC(outstanding_high, "Outstanding high"); + +static unsigned int outstanding_high_ctl = OUTSTANDING_HIGH_CTL_DEFAULT; +module_param(outstanding_high_ctl, uint, 0644); +MODULE_PARM_DESC(outstanding_high_ctl, "Outstanding high control"); + +static unsigned int outstanding_low = OUTSTANDING_LOW_DEFAULT; +module_param(outstanding_low, uint, 0644); +MODULE_PARM_DESC(outstanding_low, "Outstanding low"); + #define WWAN_METADATA_SHFT 24 #define WWAN_METADATA_MASK 0xFF000000 #define WWAN_DATA_LEN 2000 @@ -48,9 +64,6 @@ #define TAILROOM 0 /* for padding by mux layer */ #define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */ #define UL_FILTER_RULE_HANDLE_START 69 -#define DEFAULT_OUTSTANDING_HIGH 128 -#define DEFAULT_OUTSTANDING_HIGH_CTL (DEFAULT_OUTSTANDING_HIGH+32) -#define DEFAULT_OUTSTANDING_LOW 64 #define IPA_WWAN_DEV_NAME "rmnet_ipa%d" #define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0" @@ -102,8 +115,6 @@ struct ipa3_rmnet_plat_drv_res { * @net: network interface struct implemented by this driver * @stats: iface statistics * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed - * @outstanding_high: number of outstanding packets allowed - * @outstanding_low: number of outstanding packets which shall cause * @ch_id: channel id * @lock: spinlock for mutual exclusion * @device_status: holds device status @@ -114,9 +125,6 @@ struct ipa3_wwan_private { struct net_device *net; struct net_device_stats stats; atomic_t outstanding_pkts; - int outstanding_high_ctl; - int outstanding_high; - int outstanding_low; uint32_t ch_id; spinlock_t lock; struct completion resource_granted_completion; @@ -1085,7 +1093,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) if (netif_queue_stopped(dev)) { if (qmap_check && atomic_read(&wwan_ptr->outstanding_pkts) < - wwan_ptr->outstanding_high_ctl) { + outstanding_high_ctl) { pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name); goto send; } else { @@ -1096,11 +1104,11 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) /* checking High WM hit */ if (atomic_read(&wwan_ptr->outstanding_pkts) >= - wwan_ptr->outstanding_high) { + outstanding_high) { if (!qmap_check) { IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n", atomic_read(&wwan_ptr->outstanding_pkts), - wwan_ptr->outstanding_high, + outstanding_high, netif_queue_stopped(dev)); IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check); netif_stop_queue(dev); @@ -1203,10 +1211,9 @@ static void apps_ipa_tx_complete_notify(void *priv, __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0)); if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) && netif_queue_stopped(wwan_ptr->net) && - atomic_read(&wwan_ptr->outstanding_pkts) < - (wwan_ptr->outstanding_low)) { + atomic_read(&wwan_ptr->outstanding_pkts) < outstanding_low) { IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n", - wwan_ptr->outstanding_low); + outstanding_low); netif_wake_queue(wwan_ptr->net); } @@ -2375,8 +2382,6 @@ static int ipa3_wwan_probe(struct platform_device *pdev) sizeof(*(rmnet_ipa3_ctx->wwan_priv))); IPAWANDBG("wwan_ptr (private) = %pK", rmnet_ipa3_ctx->wwan_priv); rmnet_ipa3_ctx->wwan_priv->net = dev; - rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH; - rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW; atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0); spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock); init_completion( @@ -3239,15 +3244,6 @@ static int rmnet_ipa3_query_tethering_stats_hw( int rc = 0; struct ipa_quota_stats_all *con_stats; - if (reset) { - IPAWANERR("only reset the pipe stats without returning stats"); - rc = ipa_get_teth_stats(); - if (rc) { - IPAWANERR("ipa_get_teth_stats failed %d,\n", rc); - return rc; - } - return 0; - } /* qet HW-stats */ rc = ipa_get_teth_stats(); if (rc) { diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c index 755ed583894d51aa4da6336f5ae1b1c1fc511e95..f0109e39aa570c9482528e29e15b38f0006eb36c 100644 --- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c +++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c @@ -327,6 +327,8 @@ struct ipa_test_mhi_context { u32 prod_hdl; u32 cons_hdl; u32 test_prod_hdl; + phys_addr_t transport_phys_addr; + unsigned long transport_size; }; static struct ipa_test_mhi_context *test_mhi_ctx; @@ -780,11 +782,6 @@ static int ipa_test_mhi_suite_setup(void **ppriv) IPA_UT_DBG("Start Setup\n"); - if (!gsi_ctx) { - IPA_UT_ERR("No GSI ctx\n"); - return -EINVAL; - } - if (!ipa3_ctx) { IPA_UT_ERR("No IPA ctx\n"); return -EINVAL; @@ -797,11 +794,20 @@ static int ipa_test_mhi_suite_setup(void **ppriv) return -ENOMEM; } - test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr, - gsi_ctx->per.size); - if (!test_mhi_ctx) { + rc = ipa3_get_transport_info(&test_mhi_ctx->transport_phys_addr, + &test_mhi_ctx->transport_size); + if (rc != 0) { + IPA_UT_ERR("ipa3_get_transport_info() failed\n"); + rc = -EFAULT; + goto fail_free_ctx; + } + + test_mhi_ctx->gsi_mmio = + ioremap_nocache(test_mhi_ctx->transport_phys_addr, + test_mhi_ctx->transport_size); + if (!test_mhi_ctx->gsi_mmio) { IPA_UT_ERR("failed to remap GSI HW size=%lu\n", - gsi_ctx->per.size); + test_mhi_ctx->transport_size); rc = -EFAULT; goto fail_free_ctx; } @@ -1385,7 +1391,7 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio, /* write value to event ring doorbell */ IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n", p_events[event_ring_index].wp, - &(gsi_ctx->per.phys_addr), + &(test_mhi_ctx->transport_phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS( event_ring_index + ipa3_ctx->mhi_evid_limits[0], 0)); iowrite32(p_events[event_ring_index].wp, @@ -1432,7 +1438,7 @@ static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio, IPA_UT_LOG( "DB to channel 0x%llx: base %pa ofst 0x%x\n" , p_channels[channel_idx].wp - , &(gsi_ctx->per.phys_addr) + , &(test_mhi_ctx->transport_phys_addr) , GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS( channel_idx, 0)); iowrite32(p_channels[channel_idx].wp, @@ -3324,4 +3330,3 @@ IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI", ipa_mhi_test_in_loop_channel_reset_ipa_holb, true, IPA_HW_v3_0, IPA_HW_MAX), } IPA_UT_DEFINE_SUITE_END(mhi); - diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c index ed8d82b5a1fec20c5e99b90fe10df397b2fcdea9..647b140723a1e2d93d3497e663b13dd2749837d8 100644 --- a/drivers/platform/msm/ipa/test/ipa_ut_framework.c +++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c @@ -1035,9 +1035,10 @@ static void ipa_ut_ipa_ready_cb(void *user_data) * If IPA driver already ready, continue initialization immediately. * if not, wait for IPA ready notification by IPA driver context */ -static int __init ipa_ut_module_init(void) +int __init ipa_ut_module_init(void) { - int ret; + int ret = 0; + bool init_framewok = true; IPA_UT_INFO("Loading IPA test module...\n"); @@ -1049,14 +1050,34 @@ static int __init ipa_ut_module_init(void) mutex_init(&ipa_ut_ctx->lock); if (!ipa_is_ready()) { + init_framewok = false; + IPA_UT_DBG("IPA driver not ready, registering callback\n"); + ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL); /* - * If we received -EEXIST, IPA has initialized. So we need - * to continue the initing process. + * If the call to ipa_register_ipa_ready_cb() above + * returns 0, this means that we've succeeded in + * queuing up a future call to ipa_ut_framework_init() + * and that the call to it will be made once the IPA + * becomes ready. If this is the case, the call to + * ipa_ut_framework_init() below need not be made. + * + * If the call to ipa_register_ipa_ready_cb() above + * returns -EEXIST, it means that during the call to + * ipa_register_ipa_ready_cb(), the IPA has become + * ready, and hence, no indirect call to + * ipa_ut_framework_init() will be made, so we need to + * call it ourselves below. + * + * If the call to ipa_register_ipa_ready_cb() above + * return something other than 0 or -EEXIST, that's a + * hard error. */ - if (ret != -EEXIST) { + if (ret == -EEXIST) { + init_framewok = true; + } else { if (ret) { IPA_UT_ERR("IPA CB reg failed - %d\n", ret); kfree(ipa_ut_ctx); @@ -1066,12 +1087,15 @@ static int __init ipa_ut_module_init(void) } } - ret = ipa_ut_framework_init(); - if (ret) { - IPA_UT_ERR("framework init failed\n"); - kfree(ipa_ut_ctx); - ipa_ut_ctx = NULL; + if (init_framewok) { + ret = ipa_ut_framework_init(); + if (ret) { + IPA_UT_ERR("framework init failed\n"); + kfree(ipa_ut_ctx); + ipa_ut_ctx = NULL; + } } + return ret; } @@ -1080,7 +1104,7 @@ static int __init ipa_ut_module_init(void) * * Destroys the Framework and removes its context */ -static void ipa_ut_module_exit(void) +void ipa_ut_module_exit(void) { IPA_UT_DBG("Entry\n"); @@ -1092,8 +1116,9 @@ static void ipa_ut_module_exit(void) ipa_ut_ctx = NULL; } +#if !defined(CONFIG_IPA_EMULATION) /* On real UE, we have a module */ module_init(ipa_ut_module_init); module_exit(ipa_ut_module_exit); - MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IPA Unit Test module"); +#endif diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index 4f09e465f23b30a2b627d96a110a714eaedb1ae4..89d0a0d246827c37e705b823040b9a78539ce69d 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -157,6 +157,38 @@ int get_se_proto(void __iomem *base) } EXPORT_SYMBOL(get_se_proto); +/** + * get_se_m_fw() - Read the Firmware ver for the Main seqeuncer engine + * @base: Base address of the serial engine's register block. + * + * Return: Firmware version for the Main seqeuncer engine + */ +int get_se_m_fw(void __iomem *base) +{ + int fw_ver_m; + + fw_ver_m = ((geni_read_reg(base, GENI_FW_REVISION_RO) + & FW_REV_VERSION_MSK)); + return fw_ver_m; +} +EXPORT_SYMBOL(get_se_m_fw); + +/** + * get_se_s_fw() - Read the Firmware ver for the Secondry seqeuncer engine + * @base: Base address of the serial engine's register block. + * + * Return: Firmware version for the Secondry seqeuncer engine + */ +int get_se_s_fw(void __iomem *base) +{ + int fw_ver_s; + + fw_ver_s = ((geni_read_reg(base, GENI_FW_S_REVISION_RO) + & FW_REV_VERSION_MSK)); + return fw_ver_s; +} +EXPORT_SYMBOL(get_se_s_fw); + static int se_geni_irq_en(void __iomem *base) { unsigned int common_geni_m_irq_en; diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c index 90586e5b54274af6f6f2f4e1960ad92a8411e179..34231a943af40bc37246835cdedad58d6d2906b9 100644 --- a/drivers/platform/msm/qpnp-revid.c +++ b/drivers/platform/msm/qpnp-revid.c @@ -64,6 +64,7 @@ static const char *const pmic_names[] = { [PM8150_SUBTYPE] = "PM8150", [PM8150B_SUBTYPE] = "PM8150B", [PM8150L_SUBTYPE] = "PM8150L", + [PM6150_SUBTYPE] = "PM6150", }; struct revid_chip { diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c index ca7f5e9332dfc69954d7a1bdcc8eb3d3cddb85c7..1a962bb9a013ddf49b0316d31023cde9cafa7947 100644 --- a/drivers/platform/msm/seemp_core/seemp_logk.c +++ b/drivers/platform/msm/seemp_core/seemp_logk.c @@ -472,20 +472,6 @@ static long seemp_logk_set_mapping(unsigned long arg) (UINT_MAX / sizeof(struct seemp_source_mask)))) return -EFAULT; - write_lock(&filter_lock); - if (pmask != NULL) { - /* - * Mask is getting set again. - * seemp_core was probably restarted. - */ - struct seemp_source_mask *ptempmask; - - num_sources = 0; - ptempmask = pmask; - pmask = NULL; - kfree(ptempmask); - } - write_unlock(&filter_lock); pbuffer = kmalloc_array(num_elements, sizeof(struct seemp_source_mask), GFP_KERNEL); if (pbuffer == NULL) @@ -511,6 +497,18 @@ static long seemp_logk_set_mapping(unsigned long arg) pnewmask[i].isOn = 0; } write_lock(&filter_lock); + if (pmask != NULL) { + /* + * Mask is getting set again. + * seemp_core was probably restarted. + */ + struct seemp_source_mask *ptempmask; + + num_sources = 0; + ptempmask = pmask; + pmask = NULL; + kfree(ptempmask); + } pmask = pnewmask; num_sources = num_elements; write_unlock(&filter_lock); diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index ab877f2965202b9304072e814718633d4fc2b145..6cba3d89f0d24dd599ca24f9e504444efeb6bbed 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -632,7 +632,7 @@ int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx, &ctx->usb_bam_connections[idx]; unsigned long peer_bam_handle; - ret = sps_phy2h(pipe_connect->dst_phy_addr, &peer_bam_handle); + ret = sps_phy2h(pipe_connect->src_phy_addr, &peer_bam_handle); if (ret) { log_event_err("%s: sps_phy2h failed (src BAM) %d\n", __func__, ret); @@ -962,7 +962,6 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx) static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user) { int i; - int bam; struct usb_bam_ctx_type *ctx = user; struct usb_bam_pipe_connect *pipe_connect; struct usb_bam_event_info *event_info; @@ -976,8 +975,6 @@ static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user) spin_lock(&ctx->usb_bam_lock); ctx->is_bam_inactivity = true; - log_event_dbg("%s: Inactivity happened on bam=%s,%d\n", - __func__, (char *)user, bam); for (i = 0; i < ctx->max_connections; i++) { pipe_connect = &ctx->usb_bam_connections[i]; diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 616b450119ebf602e8297013c75e0bef27e4e576..4885c669b93783f66a98e8e39aa653651968653e 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -369,6 +369,9 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(recharge_soc), POWER_SUPPLY_ATTR(hvdcp_opti_allowed), POWER_SUPPLY_ATTR(smb_en_mode), + POWER_SUPPLY_ATTR(esr_actual), + POWER_SUPPLY_ATTR(esr_nominal), + POWER_SUPPLY_ATTR(soh), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig index db0f56e3c6aa68657a647b3c6fdd1a8857462f46..a116e49dca820e70258686376c59814e5b87e6fa 100644 --- a/drivers/power/supply/qcom/Kconfig +++ b/drivers/power/supply/qcom/Kconfig @@ -94,6 +94,15 @@ config QPNP_QNOVO module. It also allows userspace code to read diagnostics of voltage and current measured during certain phases of the pulses. +config QPNP_QNOVO5 + bool "QPNP QNOVO5 driver" + depends on MFD_SPMI_PMIC + help + Say Y here to enable the Qnovo5 PBS-based pulse charging engine. + Qnovo5 driver accepts pulse parameters via sysfs entries and programs + the hardware module. It also allows userspace code to read diagnostics + of voltage and current measured during certain phases of the pulses. + config SMB1390_CHARGE_PUMP tristate "SMB1390 Charge Pump" depends on MFD_I2C_PMIC diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile index 22c326e5ebff3f167bdf3be5c254fc79061eda87..026409243ed559474363138ca57f91708c6b132a 100644 --- a/drivers/power/supply/qcom/Makefile +++ b/drivers/power/supply/qcom/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_SMB1355_SLAVE_CHARGER) += smb1355-charger.o pmic-voter.o obj-$(CONFIG_QPNP_SMB2) += step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o obj-$(CONFIG_SMB138X_CHARGER) += step-chg-jeita.o smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o obj-$(CONFIG_QPNP_QNOVO) += qpnp-qnovo.o battery.o +obj-$(CONFIG_QPNP_QNOVO5) += qpnp-qnovo5.o battery.o obj-$(CONFIG_QPNP_SMB5) += step-chg-jeita.o battery.o qpnp-smb5.o smb5-lib.o pmic-voter.o storm-watch.o schgm-flash.o obj-$(CONFIG_SMB1390_CHARGE_PUMP) += smb1390-charger.o pmic-voter.o diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c index 726623a3de6c6a33beb8b7d51b81a22e39037170..9500b664801110b6ea5b668bb4fe030217a53ffa 100644 --- a/drivers/power/supply/qcom/fg-alg.c +++ b/drivers/power/supply/qcom/fg-alg.c @@ -16,11 +16,37 @@ #include #include #include +#include +#include #include "fg-alg.h" #define FULL_SOC_RAW 255 #define CAPACITY_DELTA_DECIPCT 500 +#define CENTI_ICORRECT_C0 105 +#define CENTI_ICORRECT_C1 20 + +#define HOURS_TO_SECONDS 3600 +#define OCV_SLOPE_UV 10869 +#define MILLI_UNIT 1000 +#define MICRO_UNIT 1000000 +#define NANO_UNIT 1000000000 + +#define DEFAULT_TTF_RUN_PERIOD_MS 10000 +#define DEFAULT_TTF_ITERM_DELTA_MA 200 + +static const struct ttf_pt ttf_ln_table[] = { + { 1000, 0 }, + { 2000, 693 }, + { 4000, 1386 }, + { 6000, 1792 }, + { 8000, 2079 }, + { 16000, 2773 }, + { 32000, 3466 }, + { 64000, 4159 }, + { 128000, 4852 }, +}; + /* Cycle counter APIs */ /** @@ -670,3 +696,601 @@ int cap_learning_init(struct cap_learning *cl) mutex_init(&cl->lock); return 0; } + +/* Time to full/empty algorithm helper functions */ + +static void ttf_circ_buf_add(struct ttf_circ_buf *buf, int val) +{ + buf->arr[buf->head] = val; + buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr); + buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr)); +} + +static void ttf_circ_buf_clr(struct ttf_circ_buf *buf) +{ + buf->size = 0; + buf->head = 0; + memset(buf->arr, 0, sizeof(buf->arr)); +} + +static int cmp_int(const void *a, const void *b) +{ + return *(int *)a - *(int *)b; +} + +static int ttf_circ_buf_median(struct ttf_circ_buf *buf, int *median) +{ + int *temp; + + if (buf->size == 0) + return -ENODATA; + + if (buf->size == 1) { + *median = buf->arr[0]; + return 0; + } + + temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + memcpy(temp, buf->arr, buf->size * sizeof(*temp)); + sort(temp, buf->size, sizeof(*temp), cmp_int, NULL); + + if (buf->size % 2) + *median = temp[buf->size / 2]; + else + *median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2; + + kfree(temp); + return 0; +} + +static int ttf_lerp(const struct ttf_pt *pts, size_t tablesize, + s32 input, s32 *output) +{ + int i; + s64 temp; + + if (pts == NULL) { + pr_err("Table is NULL\n"); + return -EINVAL; + } + + if (tablesize < 1) { + pr_err("Table has no entries\n"); + return -ENOENT; + } + + if (tablesize == 1) { + *output = pts[0].y; + return 0; + } + + if (pts[0].x > pts[1].x) { + pr_err("Table is not in acending order\n"); + return -EINVAL; + } + + if (input <= pts[0].x) { + *output = pts[0].y; + return 0; + } + + if (input >= pts[tablesize - 1].x) { + *output = pts[tablesize - 1].y; + return 0; + } + + for (i = 1; i < tablesize; i++) { + if (input >= pts[i].x) + continue; + + temp = ((s64)pts[i].y - pts[i - 1].y) * + ((s64)input - pts[i - 1].x); + temp = div_s64(temp, pts[i].x - pts[i - 1].x); + *output = temp + pts[i - 1].y; + return 0; + } + + return -EINVAL; +} + +static int get_time_to_full_locked(struct ttf *ttf, int *val) +{ + struct step_chg_data *step_chg_data = ttf->step_chg_data; + struct range_data *step_chg_cfg = ttf->step_chg_cfg; + int rc, ibatt_avg, vbatt_avg, rbatt = 0, msoc = 0, act_cap_mah = 0, + i_cc2cv = 0, soc_cc2cv, tau, divisor, iterm = 0, ttf_mode = 0, + i, soc_per_step, msoc_this_step, msoc_next_step, + ibatt_this_step, t_predicted_this_step, ttf_slope, + t_predicted_cv, t_predicted = 0, charge_type = 0, i_step, + float_volt_uv = 0; + int vbatt_now, multiplier, curr_window = 0, pbatt_avg; + bool power_approx = false; + s64 delta_ms; + + rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc); + if (rc < 0) { + pr_err("failed to get msoc rc=%d\n", rc); + return rc; + } + pr_debug("TTF: msoc=%d\n", msoc); + + /* the battery is considered full if the SOC is 100% */ + if (msoc >= 100) { + *val = 0; + return 0; + } + + rc = ttf->get_ttf_param(ttf->data, TTF_MODE, &ttf_mode); + + /* when switching TTF algorithms the TTF needs to be reset */ + if (ttf->mode != ttf_mode) { + ttf_circ_buf_clr(&ttf->ibatt); + ttf_circ_buf_clr(&ttf->vbatt); + ttf->last_ttf = 0; + ttf->last_ms = 0; + ttf->mode = ttf_mode; + } + + /* at least 10 samples are required to produce a stable IBATT */ + if (ttf->ibatt.size < MAX_TTF_SAMPLES) { + *val = -1; + return 0; + } + + rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg); + if (rc < 0) { + pr_err("failed to get IBATT AVG rc=%d\n", rc); + return rc; + } + + rc = ttf_circ_buf_median(&ttf->vbatt, &vbatt_avg); + if (rc < 0) { + pr_err("failed to get VBATT AVG rc=%d\n", rc); + return rc; + } + + ibatt_avg = -ibatt_avg / MILLI_UNIT; + vbatt_avg /= MILLI_UNIT; + + rc = ttf->get_ttf_param(ttf->data, TTF_ITERM, &iterm); + if (rc < 0) { + pr_err("failed to get iterm rc=%d\n", rc); + return rc; + } + /* clamp ibatt_avg to iterm */ + if (ibatt_avg < abs(iterm)) + ibatt_avg = abs(iterm); + + rc = ttf->get_ttf_param(ttf->data, TTF_RBATT, &rbatt); + if (rc < 0) { + pr_err("failed to get battery resistance rc=%d\n", rc); + return rc; + } + rbatt /= MILLI_UNIT; + + rc = ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah); + if (rc < 0) { + pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc); + return rc; + } + + pr_debug("TTF: ibatt_avg=%d vbatt_avg=%d rbatt=%d act_cap_mah=%d\n", + ibatt_avg, vbatt_avg, rbatt, act_cap_mah); + + rc = ttf->get_ttf_param(ttf->data, TTF_VFLOAT, &float_volt_uv); + if (rc < 0) { + pr_err("failed to get float_volt_uv rc=%d\n", rc); + return rc; + } + + rc = ttf->get_ttf_param(ttf->data, TTF_CHG_TYPE, &charge_type); + if (rc < 0) { + pr_err("failed to get charge_type rc=%d\n", rc); + return rc; + } + + pr_debug("TTF: mode: %d\n", ttf->mode); + + /* estimated battery current at the CC to CV transition */ + switch (ttf->mode) { + case TTF_MODE_NORMAL: + case TTF_MODE_V_STEP_CHG: + i_cc2cv = ibatt_avg * vbatt_avg / + max(MILLI_UNIT, float_volt_uv / MILLI_UNIT); + break; + case TTF_MODE_QNOVO: + i_cc2cv = min( + ttf->cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT, + ibatt_avg * vbatt_avg / + max(MILLI_UNIT, float_volt_uv / MILLI_UNIT)); + break; + default: + pr_err("TTF mode %d is not supported\n", ttf->mode); + break; + } + pr_debug("TTF: i_cc2cv=%d\n", i_cc2cv); + + /* if we are already in CV state then we can skip estimating CC */ + if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) + goto cv_estimate; + + /* estimated SOC at the CC to CV transition */ + soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV); + soc_cc2cv = 100 - soc_cc2cv; + pr_debug("TTF: soc_cc2cv=%d\n", soc_cc2cv); + + switch (ttf->mode) { + case TTF_MODE_NORMAL: + if (soc_cc2cv - msoc <= 0) + goto cv_estimate; + + divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100); + t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) * + HOURS_TO_SECONDS, divisor); + break; + case TTF_MODE_QNOVO: + soc_per_step = 100 / MAX_CC_STEPS; + for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) { + msoc_next_step = (i + 1) * soc_per_step; + if (i == msoc / soc_per_step) + msoc_this_step = msoc; + else + msoc_this_step = i * soc_per_step; + + /* scale ibatt by 85% to account for discharge pulses */ + ibatt_this_step = min( + ttf->cc_step.arr[i] / MILLI_UNIT, + ibatt_avg) * 85 / 100; + divisor = max(100, ibatt_this_step * 100); + t_predicted_this_step = div_s64((s64)act_cap_mah * + (msoc_next_step - msoc_this_step) * + HOURS_TO_SECONDS, divisor); + t_predicted += t_predicted_this_step; + pr_debug("TTF: [%d, %d] ma=%d t=%d\n", + msoc_this_step, msoc_next_step, + ibatt_this_step, t_predicted_this_step); + } + break; + case TTF_MODE_V_STEP_CHG: + if (!step_chg_data || !step_chg_cfg) + break; + + pbatt_avg = vbatt_avg * ibatt_avg; + + rc = ttf->get_ttf_param(ttf->data, TTF_VBAT, &vbatt_now); + if (rc < 0) { + pr_err("failed to get battery voltage, rc=%d\n", rc); + return rc; + } + + curr_window = ttf->step_chg_num_params - 1; + for (i = 0; i < ttf->step_chg_num_params; i++) { + if (is_between(step_chg_cfg[i].low_threshold, + step_chg_cfg[i].high_threshold, + vbatt_now)) + curr_window = i; + } + + pr_debug("TTF: curr_window: %d pbatt_avg: %d\n", curr_window, + pbatt_avg); + + t_predicted_this_step = 0; + for (i = 0; i < ttf->step_chg_num_params; i++) { + /* + * If Ibatt_avg differs by step charging threshold by + * more than 100 mA, then use power approximation to + * get charging current step. + */ + + if (step_chg_cfg[i].value - ibatt_avg > 100) + power_approx = true; + + /* Calculate OCV for each window */ + if (power_approx) { + i_step = pbatt_avg / max((u32)MILLI_UNIT, + (step_chg_cfg[i].high_threshold / + MILLI_UNIT)); + } else { + if (i == curr_window) + i_step = ((step_chg_cfg[i].value / + MILLI_UNIT) + + ibatt_avg) / 2; + else + i_step = (step_chg_cfg[i].value / + MILLI_UNIT); + } + + step_chg_data[i].ocv = step_chg_cfg[i].high_threshold - + (rbatt * i_step); + + /* Calculate SOC for each window */ + step_chg_data[i].soc = (float_volt_uv - + step_chg_data[i].ocv) / OCV_SLOPE_UV; + step_chg_data[i].soc = 100 - step_chg_data[i].soc; + + /* Calculate CC time for each window */ + multiplier = act_cap_mah * HOURS_TO_SECONDS; + if (curr_window > 0 && i < curr_window) + t_predicted_this_step = 0; + else if (i == curr_window) + t_predicted_this_step = + div_s64((s64)multiplier * + (step_chg_data[i].soc - msoc), + i_step); + else if (i > 0) + t_predicted_this_step = + div_s64((s64)multiplier * + (step_chg_data[i].soc - + step_chg_data[i - 1].soc), + i_step); + + if (t_predicted_this_step < 0) + t_predicted_this_step = 0; + + t_predicted_this_step = + DIV_ROUND_CLOSEST(t_predicted_this_step, 100); + pr_debug("TTF: step: %d i_step: %d OCV: %d SOC: %d t_pred: %d\n", + i, i_step, step_chg_data[i].ocv, + step_chg_data[i].soc, t_predicted_this_step); + t_predicted += t_predicted_this_step; + } + + break; + default: + pr_err("TTF mode %d is not supported\n", ttf->mode); + break; + } + +cv_estimate: + pr_debug("TTF: t_predicted_cc=%d\n", t_predicted); + + iterm = max(100, abs(iterm) + ttf->iterm_delta); + pr_debug("TTF: iterm=%d\n", iterm); + + if (charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) + tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm); + else + tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm); + + rc = ttf_lerp(ttf_ln_table, ARRAY_SIZE(ttf_ln_table), tau, &tau); + if (rc < 0) { + pr_err("failed to interpolate tau rc=%d\n", rc); + return rc; + } + + /* tau is scaled linearly from 95% to 100% SOC */ + if (msoc >= 95) + tau = tau * 2 * (100 - msoc) / 10; + + pr_debug("TTF: tau=%d\n", tau); + t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau * + HOURS_TO_SECONDS, NANO_UNIT); + pr_debug("TTF: t_predicted_cv=%d\n", t_predicted_cv); + t_predicted += t_predicted_cv; + + pr_debug("TTF: t_predicted_prefilter=%d\n", t_predicted); + if (ttf->last_ms != 0) { + delta_ms = ktime_ms_delta(ktime_get_boottime(), + ms_to_ktime(ttf->last_ms)); + if (delta_ms > 10000) { + ttf_slope = div64_s64( + ((s64)t_predicted - ttf->last_ttf) * + MICRO_UNIT, delta_ms); + if (ttf_slope > -100) + ttf_slope = -100; + else if (ttf_slope < -2000) + ttf_slope = -2000; + + t_predicted = div_s64( + (s64)ttf_slope * delta_ms, MICRO_UNIT) + + ttf->last_ttf; + pr_debug("TTF: ttf_slope=%d\n", ttf_slope); + } else { + t_predicted = ttf->last_ttf; + } + } + + /* clamp the ttf to 0 */ + if (t_predicted < 0) + t_predicted = 0; + + pr_debug("TTF: t_predicted_postfilter=%d\n", t_predicted); + *val = t_predicted; + return 0; +} + +/** + * ttf_get_time_to_full - + * @ttf: ttf object + * @val: Average time to full returned to the caller + * + * Get Average time to full the battery based on current soc, rbatt + * battery voltage and charge current etc. + */ +int ttf_get_time_to_full(struct ttf *ttf, int *val) +{ + int rc; + + mutex_lock(&ttf->lock); + rc = get_time_to_full_locked(ttf, val); + mutex_unlock(&ttf->lock); + + return rc; +} + +static void ttf_work(struct work_struct *work) +{ + struct ttf *ttf = container_of(work, + struct ttf, ttf_work.work); + int rc, ibatt_now, vbatt_now, ttf_now, charge_status; + ktime_t ktime_now; + + mutex_lock(&ttf->lock); + rc = ttf->get_ttf_param(ttf->data, TTF_CHG_STATUS, &charge_status); + if (rc < 0) { + pr_err("failed to get charge_status rc=%d\n", rc); + goto end_work; + } + if (charge_status != POWER_SUPPLY_STATUS_CHARGING && + charge_status != POWER_SUPPLY_STATUS_DISCHARGING) + goto end_work; + + rc = ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_now); + if (rc < 0) { + pr_err("failed to get battery current, rc=%d\n", rc); + goto end_work; + } + + rc = ttf->get_ttf_param(ttf->data, TTF_VBAT, &vbatt_now); + if (rc < 0) { + pr_err("failed to get battery voltage, rc=%d\n", rc); + goto end_work; + } + + ttf_circ_buf_add(&ttf->ibatt, ibatt_now); + ttf_circ_buf_add(&ttf->vbatt, vbatt_now); + + if (charge_status == POWER_SUPPLY_STATUS_CHARGING) { + rc = get_time_to_full_locked(ttf, &ttf_now); + if (rc < 0) { + pr_err("failed to get ttf, rc=%d\n", rc); + goto end_work; + } + + /* keep the wake lock and prime the IBATT and VBATT buffers */ + if (ttf_now < 0) { + /* delay for one FG cycle */ + schedule_delayed_work(&ttf->ttf_work, + msecs_to_jiffies(1000)); + mutex_unlock(&ttf->lock); + return; + } + + /* update the TTF reference point every minute */ + ktime_now = ktime_get_boottime(); + if (ktime_ms_delta(ktime_now, + ms_to_ktime(ttf->last_ms)) > 60000 || + ttf->last_ms == 0) { + ttf->last_ttf = ttf_now; + ttf->last_ms = ktime_to_ms(ktime_now); + } + } + + /* recurse every 10 seconds */ + schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(ttf->period_ms)); +end_work: + ttf->awake_voter(ttf->data, false); + mutex_unlock(&ttf->lock); +} + +/** + * ttf_get_time_to_empty - + * @ttf: ttf object + * @val: Average time to empty returned to the caller + * + * Get Average time to empty the battery based on current soc + * and average battery current. + */ +int ttf_get_time_to_empty(struct ttf *ttf, int *val) +{ + int rc, ibatt_avg, msoc, act_cap_mah, divisor; + + rc = ttf_circ_buf_median(&ttf->ibatt, &ibatt_avg); + if (rc < 0) { + /* try to get instantaneous current */ + rc = ttf->get_ttf_param(ttf->data, TTF_IBAT, &ibatt_avg); + if (rc < 0) { + pr_err("failed to get battery current, rc=%d\n", rc); + return rc; + } + } + + ibatt_avg /= MILLI_UNIT; + /* clamp ibatt_avg to 100mA */ + if (ibatt_avg < 100) + ibatt_avg = 100; + + rc = ttf->get_ttf_param(ttf->data, TTF_MSOC, &msoc); + if (rc < 0) { + pr_err("Error in getting capacity, rc=%d\n", rc); + return rc; + } + + rc = ttf->get_ttf_param(ttf->data, TTF_FCC, &act_cap_mah); + if (rc < 0) { + pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc); + return rc; + } + + divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc; + divisor = ibatt_avg * divisor / 100; + divisor = max(100, divisor); + *val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor; + return 0; +} + +/** + * ttf_update - + * @ttf: ttf object + * @input_present: Indicator for input presence + * + * Called by FG/QG driver when there is a state change (Charging status, SOC) + * + */ +void ttf_update(struct ttf *ttf, bool input_present) +{ + int delay_ms; + + if (ttf->input_present == input_present) + return; + + ttf->input_present = input_present; + if (input_present) + /* wait 35 seconds for the input to settle */ + delay_ms = 35000; + else + /* wait 5 seconds for current to settle during discharge */ + delay_ms = 5000; + + ttf->awake_voter(ttf->data, true); + cancel_delayed_work_sync(&ttf->ttf_work); + mutex_lock(&ttf->lock); + ttf_circ_buf_clr(&ttf->ibatt); + ttf_circ_buf_clr(&ttf->vbatt); + ttf->last_ttf = 0; + ttf->last_ms = 0; + mutex_unlock(&ttf->lock); + schedule_delayed_work(&ttf->ttf_work, msecs_to_jiffies(delay_ms)); +} + +/** + * ttf_tte_init - + * @ttf: Time to full object + * + * FG/QG have to call this during driver probe to validate the required + * parameters after allocating ttf object. + * + */ +int ttf_tte_init(struct ttf *ttf) +{ + if (!ttf) + return -ENODEV; + + if (!ttf->awake_voter || !ttf->get_ttf_param) { + pr_err("Insufficient functions for supporting ttf\n"); + return -EINVAL; + } + + if (!ttf->iterm_delta) + ttf->iterm_delta = DEFAULT_TTF_ITERM_DELTA_MA; + if (!ttf->period_ms) + ttf->period_ms = DEFAULT_TTF_RUN_PERIOD_MS; + + mutex_init(&ttf->lock); + INIT_DELAYED_WORK(&ttf->ttf_work, ttf_work); + + return 0; +} diff --git a/drivers/power/supply/qcom/fg-alg.h b/drivers/power/supply/qcom/fg-alg.h index 0eba2bdc5a7c679e2be060a5b3f94e6618383bb9..89670d50cfaa3d68bd8e9fe51e4e3661333b5cb3 100644 --- a/drivers/power/supply/qcom/fg-alg.h +++ b/drivers/power/supply/qcom/fg-alg.h @@ -13,9 +13,18 @@ #ifndef __FG_ALG_H__ #define __FG_ALG_H__ +#include "step-chg-jeita.h" + #define BUCKET_COUNT 8 #define BUCKET_SOC_PCT (256 / BUCKET_COUNT) +#define MAX_CC_STEPS 20 +#define MAX_TTF_SAMPLES 10 +#define is_between(left, right, value) \ + (((left) >= (right) && (left) >= (value) \ + && (value) >= (right)) \ + || ((left) <= (right) && (left) <= (value) \ + && (value) <= (right))) struct cycle_counter { void *data; char str_buf[BUCKET_COUNT * 8]; @@ -58,6 +67,67 @@ struct cap_learning { int (*prime_cc_soc)(void *data, u32 cc_soc_sw); }; +enum ttf_mode { + TTF_MODE_NORMAL = 0, + TTF_MODE_QNOVO, + TTF_MODE_V_STEP_CHG, +}; + +enum ttf_param { + TTF_MSOC = 0, + TTF_VBAT, + TTF_IBAT, + TTF_FCC, + TTF_MODE, + TTF_ITERM, + TTF_RBATT, + TTF_VFLOAT, + TTF_CHG_TYPE, + TTF_CHG_STATUS, +}; + +struct ttf_circ_buf { + int arr[MAX_TTF_SAMPLES]; + int size; + int head; +}; + +struct ttf_cc_step_data { + int arr[MAX_CC_STEPS]; + int sel; +}; + +struct ttf_pt { + s32 x; + s32 y; +}; + +struct step_chg_data { + int ocv; + int soc; +}; + +struct ttf { + void *data; + struct ttf_circ_buf ibatt; + struct ttf_circ_buf vbatt; + struct ttf_cc_step_data cc_step; + struct mutex lock; + struct step_chg_data *step_chg_data; + struct range_data *step_chg_cfg; + bool step_chg_cfg_valid; + int step_chg_num_params; + int mode; + int last_ttf; + int input_present; + int iterm_delta; + int period_ms; + s64 last_ms; + struct delayed_work ttf_work; + int (*get_ttf_param)(void *data, enum ttf_param, int *val); + int (*awake_voter)(void *data, bool vote); +}; + int restore_cycle_count(struct cycle_counter *counter); void clear_cycle_count(struct cycle_counter *counter); void cycle_count_update(struct cycle_counter *counter, int batt_soc, @@ -72,5 +142,9 @@ void cap_learning_update(struct cap_learning *cl, int batt_temp, int cap_learning_init(struct cap_learning *cl); int cap_learning_post_profile_init(struct cap_learning *cl, int64_t nom_cap_uah); +void ttf_update(struct ttf *ttf, bool input_present); +int ttf_get_time_to_empty(struct ttf *ttf, int *val); +int ttf_get_time_to_full(struct ttf *ttf, int *val); +int ttf_tte_init(struct ttf *ttf); #endif diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 06df23611eb23fc65d394f5dfad6f484f6697e46..571a995a31d01bc8e4d62424de90dd0d6fd7503b 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -63,10 +63,14 @@ #define SRAM_WRITE "fg_sram_write" #define PROFILE_LOAD "fg_profile_load" #define TTF_PRIMING "fg_ttf_priming" +#define ESR_CALIB "fg_esr_calib" /* Delta BSOC irq votable reasons */ #define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq" +/* Delta ESR irq votable reasons */ +#define DELTA_ESR_IRQ_VOTER "fg_delta_esr_irq" + /* Battery missing irq votable reasons */ #define BATT_MISS_IRQ_VOTER "fg_batt_miss_irq" @@ -82,6 +86,7 @@ #define DEBUG_BATT_SOC 67 #define BATT_MISS_SOC 50 +#define ESR_SOH_SOC 50 #define EMPTY_SOC 0 enum prof_load_status { @@ -162,9 +167,12 @@ enum fg_irq_index { enum fg_sram_param_id { FG_SRAM_BATT_SOC = 0, FG_SRAM_FULL_SOC, + FG_SRAM_MONOTONIC_SOC, FG_SRAM_VOLTAGE_PRED, FG_SRAM_OCV, FG_SRAM_ESR, + FG_SRAM_ESR_MDL, + FG_SRAM_ESR_ACT, FG_SRAM_RSLOW, FG_SRAM_ALG_FLAGS, FG_SRAM_CC_SOC, @@ -190,14 +198,23 @@ enum fg_sram_param_id { FG_SRAM_DELTA_BSOC_THR, FG_SRAM_RECHARGE_SOC_THR, FG_SRAM_RECHARGE_VBATT_THR, + FG_SRAM_KI_COEFF_LOW_DISCHG, FG_SRAM_KI_COEFF_MED_DISCHG, FG_SRAM_KI_COEFF_HI_DISCHG, + FG_SRAM_KI_COEFF_LOW_CHG, + FG_SRAM_KI_COEFF_MED_CHG, + FG_SRAM_KI_COEFF_HI_CHG, FG_SRAM_KI_COEFF_FULL_SOC, FG_SRAM_ESR_TIGHT_FILTER, FG_SRAM_ESR_BROAD_FILTER, FG_SRAM_SLOPE_LIMIT, FG_SRAM_BATT_TEMP_COLD, FG_SRAM_BATT_TEMP_HOT, + FG_SRAM_ESR_CAL_SOC_MIN, + FG_SRAM_ESR_CAL_SOC_MAX, + FG_SRAM_ESR_CAL_TEMP_MIN, + FG_SRAM_ESR_CAL_TEMP_MAX, + FG_SRAM_DELTA_ESR_THR, FG_SRAM_MAX, }; @@ -253,6 +270,7 @@ enum wa_flags { PMI8998_V1_REV_WA = BIT(0), PM660_TSMC_OSC_WA = BIT(1), PM8150B_V1_DMA_WA = BIT(2), + PM8150B_V1_RSLOW_COMP_WA = BIT(3), }; enum slope_limit_status { @@ -269,9 +287,9 @@ enum esr_timer_config { NUM_ESR_TIMERS, }; -enum ttf_mode { - TTF_MODE_NORMAL = 0, - TTF_MODE_QNOVO, +enum fg_ttf_mode { + FG_TTF_MODE_NORMAL = 0, + FG_TTF_MODE_QNOVO, }; /* parameters from battery profile */ @@ -283,6 +301,9 @@ struct fg_batt_props { int fastchg_curr_ma; int *therm_coeffs; int therm_ctr_offset; + int therm_pull_up_kohms; + int *rslow_normal_coeffs; + int *rslow_low_coeffs; }; struct fg_cyc_ctr_data { @@ -327,7 +348,7 @@ struct fg_pt { s32 y; }; -struct ttf { +struct fg_ttf { struct fg_circ_buf ibatt; struct fg_circ_buf vbatt; struct fg_cc_step_data cc_step; diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c index 049639e0548a7c181e7e6b9ae064591967a6d9a4..08e803818daec826ca495c56641ff1e3d06b4553 100644 --- a/drivers/power/supply/qcom/fg-memif.c +++ b/drivers/power/supply/qcom/fg-memif.c @@ -793,36 +793,91 @@ int fg_interleaved_mem_write(struct fg_dev *fg, u16 address, u8 offset, return rc; } -#define MEM_GNT_WAIT_TIME_US 10000 -#define MEM_GNT_RETRIES 50 -static int fg_direct_mem_request(struct fg_dev *fg, bool request) +static int fg_poll_alg_active(struct fg_dev *fg) { - int rc, ret, i = 0; - u8 val, mask, poll_bit; + u32 retries = 35, poll_time_us = 10000; + int rc; + u8 val; + + /* + * ALG active should be asserted low within ~164 ms mostly however + * during ESR pulsing, a worst case delay of ~320 ms is needed. + */ + while (retries--) { + rc = fg_read(fg, BATT_INFO_PEEK_RD(fg), &val, 1); + if (rc < 0) { + pr_err("failed to read PEEK_MUX rc=%d\n", rc); + return rc; + } + + if (!(val & ALG_ACTIVE_BIT)) + break; + + usleep_range(poll_time_us, poll_time_us + 1); + } + + if (val & ALG_ACTIVE_BIT) + return -ETIMEDOUT; + + /* Wait for 1 ms after ALG active is asserted low */ + usleep_range(1000, 1001); + return rc; +} + +static int fg_direct_mem_release(struct fg_dev *fg) +{ + int rc; + u8 val = 0, mask; mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT; - val = request ? MEM_ACCESS_REQ_BIT : 0; rc = fg_masked_write(fg, MEM_IF_MEM_INTF_CFG(fg), mask, val); if (rc < 0) { pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc); return rc; } - mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT; - val = request ? mask : 0; + mask = MEM_ARB_REQ_BIT; rc = fg_masked_write(fg, MEM_IF_MEM_ARB_CFG(fg), mask, val); if (rc < 0) { pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc); - goto release; + return rc; } - if (request) - pr_debug("requesting access\n"); - else - pr_debug("releasing access\n"); + pr_debug("released access\n"); + return rc; +} - if (!request) - return 0; +#define MEM_GNT_WAIT_TIME_US 10000 +#define MEM_GNT_RETRIES 50 +static int fg_direct_mem_request(struct fg_dev *fg) +{ + int rc, ret, i = 0; + u8 val, mask, poll_bit; + + if (fg->wa_flags & PM8150B_V1_DMA_WA) { + rc = fg_poll_alg_active(fg); + if (rc < 0) { + pr_err("Failed to assert ALG active rc=%d\n", rc); + return rc; + } + } + + val = mask = MEM_ARB_REQ_BIT; + rc = fg_masked_write(fg, MEM_IF_MEM_ARB_CFG(fg), mask, val); + if (rc < 0) { + pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc); + return rc; + } + + mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT; + val = MEM_ACCESS_REQ_BIT; + rc = fg_masked_write(fg, MEM_IF_MEM_INTF_CFG(fg), mask, val); + if (rc < 0) { + pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc); + goto release; + } + + pr_debug("requesting access\n"); /* * HW takes 5 cycles (200 KHz clock) to grant access after requesting @@ -858,20 +913,9 @@ static int fg_direct_mem_request(struct fg_dev *fg, bool request) fg_dump_regs(fg); release: - val = 0; - mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT; - ret = fg_masked_write(fg, MEM_IF_MEM_INTF_CFG(fg), mask, val); - if (ret < 0) { - pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc); + ret = fg_direct_mem_release(fg); + if (ret < 0) return ret; - } - - mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT; - ret = fg_masked_write(fg, MEM_IF_MEM_ARB_CFG(fg), mask, val); - if (ret < 0) { - pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc); - return ret; - } return rc; } @@ -987,7 +1031,7 @@ static int __fg_direct_mem_rw(struct fg_dev *fg, u16 sram_addr, u8 offset, pr_debug("number of partitions: %d\n", num_partitions); - rc = fg_direct_mem_request(fg, true); + rc = fg_direct_mem_request(fg); if (rc < 0) { pr_err("Error in requesting direct_mem access rc=%d\n", rc); return rc; @@ -1032,7 +1076,7 @@ static int __fg_direct_mem_rw(struct fg_dev *fg, u16 sram_addr, u8 offset, offset = 0; } - ret = fg_direct_mem_request(fg, false); + ret = fg_direct_mem_release(fg); if (ret < 0) { pr_err("Error in releasing direct_mem access rc=%d\n", rc); return ret; @@ -1156,7 +1200,7 @@ static struct fg_dma_address fg_gen4_addr_map[6] = { /* wk/scratch pad partition continued */ { .partition_start = 406, - .partition_end = 480, + .partition_end = 486, .spmi_addr_base = GEN4_FG_DMA5_BASE + SRAM_ADDR_OFFSET, }, }; @@ -1164,6 +1208,7 @@ static struct fg_dma_address fg_gen4_addr_map[6] = { static int fg_dma_init(struct fg_dev *fg) { int rc; + u8 val; if (fg->version == GEN3_FG) { fg->sram.addr_map = fg_gen3_addr_map; @@ -1174,7 +1219,7 @@ static int fg_dma_init(struct fg_dev *fg) fg->sram.addr_map = fg_gen4_addr_map; fg->sram.num_partitions = 6; fg->sram.num_bytes_per_word = 2; - fg->sram.address_max = 479; + fg->sram.address_max = 485; } else { pr_err("Unknown FG version %d\n", fg->version); return -ENXIO; @@ -1196,13 +1241,33 @@ static int fg_dma_init(struct fg_dev *fg) } /* Release the DMA initially so that request can happen */ - rc = fg_direct_mem_request(fg, false); + rc = fg_direct_mem_release(fg); if (rc < 0) { pr_err("Error in releasing direct_mem access rc=%d\n", rc); return rc; } + /* Set low latency always and clear log bit */ + rc = fg_masked_write(fg, MEM_IF_MEM_ARB_CFG(fg), + MEM_ARB_LO_LATENCY_EN_BIT | MEM_CLR_LOG_BIT, + MEM_ARB_LO_LATENCY_EN_BIT); + if (rc < 0) { + pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc); + return rc; + } + + /* Configure PEEK_MUX only for PM8150B v1.0 */ + if (fg->wa_flags & PM8150B_V1_DMA_WA) { + val = ALG_ACTIVE_PEEK_CFG; + rc = fg_write(fg, BATT_INFO_PEEK_MUX4(fg), &val, 1); + if (rc < 0) { + pr_err("failed to configure batt_info_peek_mux4 rc:%d\n", + rc); + return rc; + } + } + return 0; } diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h index aa30626d9427c92dd733c3ff13d9e9e0281d9527..931af0b2ed8fe64507e988a10b748ad79b169030 100644 --- a/drivers/power/supply/qcom/fg-reg.h +++ b/drivers/power/supply/qcom/fg-reg.h @@ -35,6 +35,12 @@ #define ADC_RR_BATT_ID_LO_BIAS_LSB(chip) (chip->rradc_base + 0x76) #define ADC_RR_BATT_ID_LO_BIAS_MSB(chip) (chip->rradc_base + 0x77) +#define ADC_RR_BATT_THERM_BASE_CFG1(chip) (chip->rradc_base + 0x81) +#define BATT_THERM_PULL_UP_30K 1 +#define BATT_THERM_PULL_UP_100K 2 +#define BATT_THERM_PULL_UP_400K 3 +#define BATT_THERM_PULL_UP_MASK GENMASK(1, 0) + #define ADC_RR_BATT_TEMP_LSB(chip) (chip->rradc_base + 0x88) #define ADC_RR_BATT_TEMP_MSB(chip) (chip->rradc_base + 0x89) #define GEN4_BATT_TEMP_MSB_MASK GENMASK(1, 0) @@ -275,6 +281,12 @@ #define ESR_REQ_CTL_BIT BIT(1) #define ESR_REQ_CTL_EN_BIT BIT(0) +#define BATT_INFO_PEEK_MUX4(chip) (chip->batt_info_base + 0xEE) +#define ALG_ACTIVE_PEEK_CFG 0xAC + +#define BATT_INFO_PEEK_RD(chip) (chip->batt_info_base + 0xEF) +#define ALG_ACTIVE_BIT BIT(3) + /* FG_MEM_IF register and bit definitions */ #define MEM_IF_INT_RT_STS(chip) ((chip->mem_if_base) + 0x10) #define MEM_XCP_BIT BIT(1) @@ -284,6 +296,7 @@ #define GEN4_MEM_ATTN_BIT BIT(4) #define MEM_IF_MEM_ARB_CFG(chip) ((chip->mem_if_base) + 0x40) +#define MEM_CLR_LOG_BIT BIT(2) #define MEM_ARB_LO_LATENCY_EN_BIT BIT(1) #define MEM_ARB_REQ_BIT BIT(0) diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index 7a0e4686ef5175d01759bc601b7eeafe6c44c6d3..9b34ab20de730d18beed6334e12883f126aaf4ba 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -26,6 +26,7 @@ CHARS_PER_ITEM) + 1) \ #define VOLTAGE_15BIT_MASK GENMASK(14, 0) +#define MAX_READ_TRIES 5 int fg_decode_voltage_15b(struct fg_sram_param *sp, enum fg_sram_param_id id, int value) @@ -826,13 +827,12 @@ int fg_restart(struct fg_dev *fg, int wait_time_ms) /* All fg_get_* , fg_set_* functions here */ -#define MAX_TRIES_SOC 5 int fg_get_msoc_raw(struct fg_dev *fg, int *val) { u8 cap[2]; int rc, tries = 0; - while (tries < MAX_TRIES_SOC) { + while (tries < MAX_READ_TRIES) { rc = fg_read(fg, BATT_SOC_FG_MONOTONIC_SOC(fg), cap, 2); if (rc < 0) { pr_err("failed to read addr=0x%04x, rc=%d\n", @@ -846,8 +846,8 @@ int fg_get_msoc_raw(struct fg_dev *fg, int *val) tries++; } - if (tries == MAX_TRIES_SOC) { - pr_err("shadow registers do not match\n"); + if (tries == MAX_READ_TRIES) { + pr_err("MSOC: shadow registers do not match\n"); return -EINVAL; } @@ -934,15 +934,32 @@ int fg_get_battery_resistance(struct fg_dev *fg, int *val) #define BATT_CURRENT_DENR 1000 int fg_get_battery_current(struct fg_dev *fg, int *val) { - int rc = 0; + int rc = 0, tries = 0; int64_t temp = 0; - u8 buf[2]; + u8 buf[2], buf_cp[2]; - rc = fg_read(fg, BATT_INFO_IBATT_LSB(fg), buf, 2); - if (rc < 0) { - pr_err("failed to read addr=0x%04x, rc=%d\n", - BATT_INFO_IBATT_LSB(fg), rc); - return rc; + while (tries++ < MAX_READ_TRIES) { + rc = fg_read(fg, BATT_INFO_IBATT_LSB(fg), buf, 2); + if (rc < 0) { + pr_err("failed to read addr=0x%04x, rc=%d\n", + BATT_INFO_IBATT_LSB(fg), rc); + return rc; + } + + rc = fg_read(fg, BATT_INFO_IBATT_LSB_CP(fg), buf_cp, 2); + if (rc < 0) { + pr_err("failed to read addr=0x%04x, rc=%d\n", + BATT_INFO_IBATT_LSB_CP(fg), rc); + return rc; + } + + if (buf[0] == buf_cp[0] && buf[1] == buf_cp[1]) + break; + } + + if (tries == MAX_READ_TRIES) { + pr_err("IBATT: shadow registers do not match\n"); + return -EINVAL; } if (fg->wa_flags & PMI8998_V1_REV_WA) @@ -961,15 +978,32 @@ int fg_get_battery_current(struct fg_dev *fg, int *val) #define BATT_VOLTAGE_DENR 1000 int fg_get_battery_voltage(struct fg_dev *fg, int *val) { - int rc = 0; + int rc = 0, tries = 0; u16 temp = 0; - u8 buf[2]; + u8 buf[2], buf_cp[2]; - rc = fg_read(fg, BATT_INFO_VBATT_LSB(fg), buf, 2); - if (rc < 0) { - pr_err("failed to read addr=0x%04x, rc=%d\n", - BATT_INFO_VBATT_LSB(fg), rc); - return rc; + while (tries++ < MAX_READ_TRIES) { + rc = fg_read(fg, BATT_INFO_VBATT_LSB(fg), buf, 2); + if (rc < 0) { + pr_err("failed to read addr=0x%04x, rc=%d\n", + BATT_INFO_VBATT_LSB(fg), rc); + return rc; + } + + rc = fg_read(fg, BATT_INFO_VBATT_LSB_CP(fg), buf_cp, 2); + if (rc < 0) { + pr_err("failed to read addr=0x%04x, rc=%d\n", + BATT_INFO_VBATT_LSB_CP(fg), rc); + return rc; + } + + if (buf[0] == buf_cp[0] && buf[1] == buf_cp[1]) + break; + } + + if (tries == MAX_READ_TRIES) { + pr_err("VBATT: shadow registers do not match\n"); + return -EINVAL; } if (fg->wa_flags & PMI8998_V1_REV_WA) diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c index c1f10977a508b5ecf5ebd4220823f4397e27a904..beb6f519539e142bc78bb120035f0329e3c7eced 100644 --- a/drivers/power/supply/qcom/qg-battery-profile.c +++ b/drivers/power/supply/qcom/qg-battery-profile.c @@ -398,10 +398,8 @@ int lookup_soc_ocv(u32 *soc, u32 ocv_uv, int batt_temp, bool charging) { u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2; - if (!the_battery || !the_battery->profile_node) { - pr_err("Battery profile not loaded\n"); + if (!the_battery || !the_battery->profile_node) return -ENODEV; - } *soc = interpolate_soc(&the_battery->profile[table_index], batt_temp, UV_TO_DECIUV(ocv_uv)); @@ -416,10 +414,8 @@ int qg_get_nominal_capacity(u32 *nom_cap_uah, int batt_temp, bool charging) u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2; u32 fcc_mah; - if (!the_battery || !the_battery->profile_node) { - pr_err("Battery profile not loaded\n"); + if (!the_battery || !the_battery->profile_node) return -ENODEV; - } fcc_mah = interpolate_single_row_lut( &the_battery->profile[table_index], diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h index 1591aa15a79ab552ec9d52afafbedc475e1eab00..54ef074f5fd982ff77f177dbaa505f049e7aac8e 100644 --- a/drivers/power/supply/qcom/qg-core.h +++ b/drivers/power/supply/qcom/qg-core.h @@ -14,6 +14,7 @@ #include #include "fg-alg.h" +#include "qg-defs.h" struct qg_batt_props { const char *batt_type_str; @@ -50,10 +51,25 @@ struct qg_dt { int rbat_conn_mohm; int ignore_shutdown_soc_secs; int cold_temp_threshold; + int esr_qual_i_ua; + int esr_qual_v_uv; + int esr_disable_soc; bool hold_soc_while_full; bool linearize_soc; bool cl_disable; bool cl_feedback_on; + bool esr_disable; + bool esr_discharge_enable; + bool qg_ext_sense; +}; + +struct qg_esr_data { + u32 pre_esr_v; + u32 pre_esr_i; + u32 post_esr_v; + u32 post_esr_i; + u32 esr; + bool valid; }; struct qpnp_qg { @@ -87,6 +103,7 @@ struct qpnp_qg { struct power_supply *batt_psy; struct power_supply *usb_psy; struct power_supply *parallel_psy; + struct qg_esr_data esr_data[QG_MAX_ESR_COUNT]; /* status variable */ u32 *debug_mask; @@ -102,10 +119,18 @@ struct qpnp_qg { bool charge_full; int charge_status; int charge_type; + int chg_iterm_ma; int next_wakeup_ms; + int esr_actual; + int esr_nominal; + int soh; + int soc_reporting_ready; + u32 fifo_done_count; u32 wa_flags; u32 seq_no; u32 charge_counter_uah; + u32 esr_avg; + u32 esr_last; ktime_t last_user_update_time; ktime_t last_fifo_update_time; struct iio_channel *batt_therm_chan; @@ -118,6 +143,7 @@ struct qpnp_qg { int pon_soc; int batt_soc; int cc_soc; + int full_soc; struct alarm alarm_timer; u32 sdam_data[SDAM_MAX]; @@ -128,6 +154,14 @@ struct qpnp_qg { struct cap_learning *cl; /* charge counter */ struct cycle_counter *counter; + /* ttf */ + struct ttf *ttf; +}; + +struct ocv_all { + u32 ocv_uv; + u32 ocv_raw; + char ocv_type[20]; }; enum ocv_type { @@ -135,6 +169,7 @@ enum ocv_type { S3_GOOD_OCV, S3_LAST_OCV, SDAM_PON_OCV, + PON_OCV_MAX, }; enum debug_mask { @@ -149,6 +184,7 @@ enum debug_mask { QG_DEBUG_BUS_READ = BIT(8), QG_DEBUG_BUS_WRITE = BIT(9), QG_DEBUG_ALG_CL = BIT(10), + QG_DEBUG_ESR = BIT(11), }; enum qg_irq { diff --git a/drivers/power/supply/qcom/qg-defs.h b/drivers/power/supply/qcom/qg-defs.h index 2061208ad55ccf49d8a8754a528c94ce5f58a943..997ff701c77d671760f9e11b83c8b2f2c8953335 100644 --- a/drivers/power/supply/qcom/qg-defs.h +++ b/drivers/power/supply/qcom/qg-defs.h @@ -34,6 +34,7 @@ #define GOOD_OCV_VOTER "GOOD_OCV_VOTER" #define PROFILE_IRQ_DISABLE "NO_PROFILE_IRQ_DISABLE" #define QG_INIT_STATE_IRQ_DISABLE "QG_INIT_STATE_IRQ_DISABLE" +#define TTF_AWAKE_VOTER "TTF_AWAKE_VOTER" #define V_RAW_TO_UV(V_RAW) div_u64(194637ULL * (u64)V_RAW, 1000) #define I_RAW_TO_UA(I_RAW) div_s64(152588LL * (s64)I_RAW, 1000) @@ -44,6 +45,9 @@ #define UV_TO_DECIUV(a) (a / 100) #define DECIUV_TO_UV(a) (a * 100) +#define QG_MAX_ESR_COUNT 10 +#define QG_MIN_ESR_COUNT 2 + #define CAP(min, max, value) \ ((min > value) ? min : ((value > max) ? max : value)) diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h index 66f9be11a7df9dd1063e312f725eb2cf1050106c..e0f400d05cd31a4c94404a3e92fe7a7e23793aec 100644 --- a/drivers/power/supply/qcom/qg-reg.h +++ b/drivers/power/supply/qcom/qg-reg.h @@ -17,7 +17,9 @@ #define QG_TYPE 0x0D #define QG_STATUS1_REG 0x08 +#define QG_OK_BIT BIT(7) #define BATTERY_PRESENT_BIT BIT(0) +#define ESR_MEAS_DONE_BIT BIT(4) #define QG_STATUS2_REG 0x09 #define GOOD_OCV_BIT BIT(1) @@ -25,9 +27,13 @@ #define QG_STATUS3_REG 0x0A #define COUNT_FIFO_RT_MASK GENMASK(3, 0) +#define QG_STATUS4_REG 0x0B +#define ESR_MEAS_IN_PROGRESS_BIT BIT(4) + #define QG_INT_RT_STS_REG 0x10 #define FIFO_UPDATE_DONE_RT_STS_BIT BIT(3) #define VBAT_LOW_INT_RT_STS_BIT BIT(1) +#define BATTERY_MISSING_INT_RT_STS_BIT BIT(0) #define QG_INT_LATCHED_STS_REG 0x18 #define FIFO_UPDATE_DONE_INT_LAT_STS_BIT BIT(3) @@ -60,11 +66,25 @@ #define QG_S3_ENTRY_IBAT_THRESHOLD_REG 0x5E #define QG_S3_EXIT_IBAT_THRESHOLD_REG 0x5F +#define QG_S5_OCV_VALIDATE_MEAS_CTL1_REG 0x60 +#define ALLOW_S5_BIT BIT(7) + +#define QG_S7_PON_OCV_MEAS_CTL1_REG 0x64 +#define ADC_CONV_DLY_MASK GENMASK(3, 0) + +#define QG_ESR_MEAS_TRIG_REG 0x68 +#define HW_ESR_MEAS_START_BIT BIT(0) + #define QG_S7_PON_OCV_V_DATA0_REG 0x70 #define QG_S7_PON_OCV_I_DATA0_REG 0x72 #define QG_S3_GOOD_OCV_V_DATA0_REG 0x74 #define QG_S3_GOOD_OCV_I_DATA0_REG 0x76 +#define QG_PRE_ESR_V_DATA0_REG 0x78 +#define QG_PRE_ESR_I_DATA0_REG 0x7A +#define QG_POST_ESR_V_DATA0_REG 0x7C +#define QG_POST_ESR_I_DATA0_REG 0x7E + #define QG_V_ACCUM_DATA0_RT_REG 0x88 #define QG_I_ACCUM_DATA0_RT_REG 0x8B #define QG_ACCUM_CNT_RT_REG 0x8E @@ -80,15 +100,22 @@ #define QG_LAST_S3_SLEEP_V_DATA0_REG 0xCC /* SDAM offsets */ -#define QG_SDAM_VALID_OFFSET 0x46 -#define QG_SDAM_SOC_OFFSET 0x47 -#define QG_SDAM_TEMP_OFFSET 0x48 -#define QG_SDAM_RBAT_OFFSET 0x4A -#define QG_SDAM_OCV_OFFSET 0x4C -#define QG_SDAM_IBAT_OFFSET 0x50 -#define QG_SDAM_TIME_OFFSET 0x54 -#define QG_SDAM_CYCLE_COUNT_OFFSET 0x58 -#define QG_SDAM_LEARNED_CAPACITY_OFFSET 0x68 -#define QG_SDAM_PON_OCV_OFFSET 0x7C +#define QG_SDAM_VALID_OFFSET 0x46 /* 1-byte 0x46 */ +#define QG_SDAM_SOC_OFFSET 0x47 /* 1-byte 0x47 */ +#define QG_SDAM_TEMP_OFFSET 0x48 /* 2-byte 0x48-0x49 */ +#define QG_SDAM_RBAT_OFFSET 0x4A /* 2-byte 0x4A-0x4B */ +#define QG_SDAM_OCV_OFFSET 0x4C /* 4-byte 0x4C-0x4F */ +#define QG_SDAM_IBAT_OFFSET 0x50 /* 4-byte 0x50-0x53 */ +#define QG_SDAM_TIME_OFFSET 0x54 /* 4-byte 0x54-0x57 */ +#define QG_SDAM_CYCLE_COUNT_OFFSET 0x58 /* 16-byte 0x58-0x67 */ +#define QG_SDAM_LEARNED_CAPACITY_OFFSET 0x68 /* 2-byte 0x68-0x69 */ +#define QG_SDAM_ESR_CHARGE_DELTA_OFFSET 0x6A /* 4-byte 0x6A-0x6D */ +#define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET 0x6E /* 4-byte 0x6E-0x71 */ +#define QG_SDAM_ESR_CHARGE_SF_OFFSET 0x72 /* 2-byte 0x72-0x73 */ +#define QG_SDAM_ESR_DISCHARGE_SF_OFFSET 0x74 /* 2-byte 0x74-0x75 */ +#define QG_SDAM_MAX_OFFSET 0xA4 + +/* Below offset is used by PBS */ +#define QG_SDAM_PON_OCV_OFFSET 0xBC /* 2-byte 0xBC-0xBD */ #endif diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c index 7bc4afac1b447d600df17254fec6ce34f4d45f3e..a7cb97e0e53d925e1aa656ef42650b23e2fe707d 100644 --- a/drivers/power/supply/qcom/qg-sdam.c +++ b/drivers/power/supply/qcom/qg-sdam.c @@ -68,6 +68,26 @@ static struct qg_sdam_info sdam_info[] = { .offset = QG_SDAM_PON_OCV_OFFSET, .length = 2, }, + [SDAM_ESR_CHARGE_DELTA] = { + .name = "SDAM_ESR_CHARGE_DELTA", + .offset = QG_SDAM_ESR_CHARGE_DELTA_OFFSET, + .length = 4, + }, + [SDAM_ESR_DISCHARGE_DELTA] = { + .name = "SDAM_ESR_DISCHARGE_DELTA", + .offset = QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET, + .length = 4, + }, + [SDAM_ESR_CHARGE_SF] = { + .name = "SDAM_ESR_CHARGE_SF_OFFSET", + .offset = QG_SDAM_ESR_CHARGE_SF_OFFSET, + .length = 2, + }, + [SDAM_ESR_DISCHARGE_SF] = { + .name = "SDAM_ESR_DISCHARGE_SF_OFFSET", + .offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET, + .length = 2, + }, }; int qg_sdam_write(u8 param, u32 data) @@ -91,7 +111,7 @@ int qg_sdam_write(u8 param, u32 data) length = sdam_info[param].length; rc = regmap_bulk_write(chip->regmap, offset, (u8 *)&data, length); if (rc < 0) - pr_err("Failed to write offset=%0x4x param=%d value=%d\n", + pr_err("Failed to write offset=%0x4 param=%d value=%d\n", offset, param, data); else pr_debug("QG SDAM write param=%s value=%d\n", @@ -117,11 +137,12 @@ int qg_sdam_read(u8 param, u32 *data) return -EINVAL; } + *data = 0; offset = chip->sdam_base + sdam_info[param].offset; length = sdam_info[param].length; rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, length); if (rc < 0) - pr_err("Failed to read offset=%0x4x param=%d\n", + pr_err("Failed to read offset=%0x4 param=%d\n", offset, param); else pr_debug("QG SDAM read param=%s value=%d\n", @@ -143,11 +164,11 @@ int qg_sdam_multibyte_write(u32 offset, u8 *data, u32 length) offset = chip->sdam_base + offset; rc = regmap_bulk_write(chip->regmap, offset, data, (size_t)length); if (rc < 0) { - pr_err("Failed to write offset=%0x4x value=%d\n", + pr_err("Failed to write offset=%0x4 value=%d\n", offset, *data); } else { for (i = 0; i < length; i++) - pr_debug("QG SDAM write offset=%0x4x value=%d\n", + pr_debug("QG SDAM write offset=%0x4 value=%d\n", offset++, data[i]); } @@ -167,10 +188,10 @@ int qg_sdam_multibyte_read(u32 offset, u8 *data, u32 length) offset = chip->sdam_base + offset; rc = regmap_raw_read(chip->regmap, offset, (u8 *)data, (size_t)length); if (rc < 0) { - pr_err("Failed to read offset=%0x4x\n", offset); + pr_err("Failed to read offset=%0x4\n", offset); } else { for (i = 0; i < length; i++) - pr_debug("QG SDAM read offset=%0x4x value=%d\n", + pr_debug("QG SDAM read offset=%0x4 value=%d\n", offset++, data[i]); } diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h index 10e684f8ec403d70e23c1c75caa8541af438e24d..45218a83776e1aaab17d42d79d68e2963d0a6df5 100644 --- a/drivers/power/supply/qcom/qg-sdam.h +++ b/drivers/power/supply/qcom/qg-sdam.h @@ -24,6 +24,10 @@ enum qg_sdam_param { SDAM_IBAT_UA, SDAM_TIME_SEC, SDAM_PON_OCV_UV, + SDAM_ESR_CHARGE_DELTA, + SDAM_ESR_DISCHARGE_DELTA, + SDAM_ESR_CHARGE_SF, + SDAM_ESR_DISCHARGE_SF, SDAM_MAX, }; diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c index 9daa20479d6b92c9454f0c0b51baef6a32d6d494..fc4396e7e95a745371ed3836760f66aded5515e5 100644 --- a/drivers/power/supply/qcom/qg-util.c +++ b/drivers/power/supply/qcom/qg-util.c @@ -111,6 +111,22 @@ int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val) return rc; } +int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data) +{ + int rc; + u8 reg[2] = {0}; + + rc = qg_read(chip, chip->qg_base + addr, ®[0], 2); + if (rc < 0) { + pr_err("Failed to read QG addr %d rc=%d\n", addr, rc); + return rc; + } + + *data = reg[0] | (reg[1] << 8); + + return rc; +} + int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt) { int rc; diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h index 385c9e07256257734db3fbe06d2a65af3461dcb9..bb17afb4f5beb8419397197cf76c14e2a8cfaf91 100644 --- a/drivers/power/supply/qcom/qg-util.h +++ b/drivers/power/supply/qcom/qg-util.h @@ -15,6 +15,7 @@ int qg_read(struct qpnp_qg *chip, u32 addr, u8 *val, int len); int qg_write(struct qpnp_qg *chip, u32 addr, u8 *val, int len); int qg_masked_write(struct qpnp_qg *chip, int addr, u32 mask, u32 val); +int qg_read_raw_data(struct qpnp_qg *chip, int addr, u32 *data); int get_fifo_length(struct qpnp_qg *chip, u32 *fifo_length, bool rt); int get_sample_count(struct qpnp_qg *chip, u32 *sample_count); int get_sample_interval(struct qpnp_qg *chip, u32 *sample_interval); diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index a0f5226c6f973321a9235d388a3da08f2eed73c9..3e4447c3de943da378708d18b1e561e3a00eb9bf 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -214,7 +214,7 @@ struct fg_gen3_chip { struct mutex qnovo_esr_ctrl_lock; struct fg_cyc_ctr_data cyc_ctr; struct fg_cap_learning cl; - struct ttf ttf; + struct fg_ttf ttf; struct delayed_work ttf_work; struct delayed_work pl_enable_work; enum slope_limit_status slope_limit_sts; @@ -1693,13 +1693,13 @@ static int fg_adjust_recharge_soc(struct fg_dev *fg) if (!chip->dt.auto_recharge_soc) return 0; - rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH, + rc = power_supply_get_property(fg->batt_psy, POWER_SUPPLY_PROP_HEALTH, &prop); if (rc < 0) { pr_err("Error in getting battery health, rc=%d\n", rc); return rc; } - chip->health = prop.intval; + fg->health = prop.intval; recharge_soc = chip->dt.recharge_soc_thr; recharge_soc_status = fg->recharge_soc_adjusted; @@ -1731,7 +1731,7 @@ static int fg_adjust_recharge_soc(struct fg_dev *fg) if (!fg->recharge_soc_adjusted) return 0; - if (chip->health != POWER_SUPPLY_HEALTH_GOOD) + if (fg->health != POWER_SUPPLY_HEALTH_GOOD) return 0; /* Restore the default value */ @@ -2727,9 +2727,9 @@ static int fg_get_time_to_full_locked(struct fg_dev *fg, int *val) } if (is_qnovo_en(fg)) - ttf_mode = TTF_MODE_QNOVO; + ttf_mode = FG_TTF_MODE_QNOVO; else - ttf_mode = TTF_MODE_NORMAL; + ttf_mode = FG_TTF_MODE_NORMAL; /* when switching TTF algorithms the TTF needs to be reset */ if (chip->ttf.mode != ttf_mode) { @@ -2795,11 +2795,11 @@ static int fg_get_time_to_full_locked(struct fg_dev *fg, int *val) /* estimated battery current at the CC to CV transition */ switch (chip->ttf.mode) { - case TTF_MODE_NORMAL: + case FG_TTF_MODE_NORMAL: i_cc2cv = ibatt_avg * vbatt_avg / max(MILLI_UNIT, fg->bp.float_volt_uv / MILLI_UNIT); break; - case TTF_MODE_QNOVO: + case FG_TTF_MODE_QNOVO: i_cc2cv = min( chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT, ibatt_avg * vbatt_avg / @@ -2821,7 +2821,7 @@ static int fg_get_time_to_full_locked(struct fg_dev *fg, int *val) fg_dbg(fg, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv); switch (chip->ttf.mode) { - case TTF_MODE_NORMAL: + case FG_TTF_MODE_NORMAL: if (soc_cc2cv - msoc <= 0) goto cv_estimate; @@ -2829,7 +2829,7 @@ static int fg_get_time_to_full_locked(struct fg_dev *fg, int *val) t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) * HOURS_TO_SECONDS, divisor); break; - case TTF_MODE_QNOVO: + case FG_TTF_MODE_QNOVO: soc_per_step = 100 / MAX_CC_STEPS; for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) { msoc_next_step = (i + 1) * soc_per_step; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c index b15f5960d68ed5f4096756e6290d80c6c25fe60a..d34b8470d216c21eb5973cb523f138f238db2148 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen4.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) "FG: %s: " fmt, __func__ +#include #include #include #include @@ -23,6 +24,7 @@ #include "fg-alg.h" #define FG_GEN4_DEV_NAME "qcom,fg-gen4" +#define TTF_AWAKE_VOTER "fg_ttf_awake" #define PERPH_SUBTYPE_REG 0x05 #define FG_BATT_SOC_PM8150B 0x10 @@ -32,12 +34,14 @@ #define FG_SRAM_LEN 960 #define PROFILE_LEN 416 -#define PROFILE_COMP_LEN 208 +#define PROFILE_COMP_LEN 24 #define KI_COEFF_SOC_LEVELS 3 +#define ESR_CAL_LEVELS 2 #define KI_COEFF_MAX 15564 #define SLOPE_LIMIT_NUM_COEFFS 4 #define SLOPE_LIMIT_COEFF_MAX 31128 #define BATT_THERM_NUM_COEFFS 5 +#define RSLOW_NUM_COEFFS 4 /* SRAM address/offset definitions in ascending order */ #define BATT_THERM_CONFIG_WORD 3 @@ -49,8 +53,15 @@ #define BATT_TEMP_COLD_OFFSET 1 #define BATT_TEMP_CONFIG2_WORD 10 #define BATT_TEMP_HYST_DELTA_OFFSET 0 +#define ESR_CAL_SOC_MIN_OFFSET 1 +#define ESR_CAL_THRESH_WORD 11 +#define ESR_CAL_SOC_MAX_OFFSET 0 +#define ESR_CAL_TEMP_MIN_OFFSET 1 #define ESR_PULSE_THRESH_WORD 12 +#define ESR_CAL_TEMP_MAX_OFFSET 0 #define ESR_PULSE_THRESH_OFFSET 1 +#define DELTA_ESR_THR_WORD 14 +#define DELTA_ESR_THR_OFFSET 0 #define ESR_TIMER_DISCHG_MAX_WORD 17 #define ESR_TIMER_DISCHG_MAX_OFFSET 0 #define ESR_TIMER_DISCHG_INIT_WORD 17 @@ -67,10 +78,18 @@ #define SYS_TERM_CURR_OFFSET 0 #define VBATT_FULL_WORD 23 #define VBATT_FULL_OFFSET 0 +#define KI_COEFF_LOW_DISCHG_WORD 25 +#define KI_COEFF_LOW_DISCHG_OFFSET 1 #define KI_COEFF_MED_DISCHG_WORD 26 #define KI_COEFF_MED_DISCHG_OFFSET 0 #define KI_COEFF_HI_DISCHG_WORD 26 #define KI_COEFF_HI_DISCHG_OFFSET 1 +#define KI_COEFF_LOW_CHG_WORD 28 +#define KI_COEFF_LOW_CHG_OFFSET 0 +#define KI_COEFF_MED_CHG_WORD 28 +#define KI_COEFF_MED_CHG_OFFSET 1 +#define KI_COEFF_HI_CHG_WORD 29 +#define KI_COEFF_HI_CHG_OFFSET 0 #define DELTA_BSOC_THR_WORD 30 #define DELTA_BSOC_THR_OFFSET 1 #define SLOPE_LIMIT_WORD 32 @@ -79,8 +98,14 @@ #define DELTA_MSOC_THR_OFFSET 1 #define VBATT_LOW_WORD 35 #define VBATT_LOW_OFFSET 1 +#define SYS_CONFIG_WORD 60 +#define SYS_CONFIG_OFFSET 0 #define PROFILE_LOAD_WORD 65 #define PROFILE_LOAD_OFFSET 0 +#define RSLOW_COEFF_DISCHG_WORD 78 +#define RSLOW_COEFF_LOW_OFFSET 0 +#define RSLOW_CONFIG_WORD 241 +#define RSLOW_CONFIG_OFFSET 0 #define NOM_CAP_WORD 271 #define NOM_CAP_OFFSET 0 #define RCONN_WORD 275 @@ -93,6 +118,16 @@ #define PROFILE_INTEGRITY_OFFSET 0 #define ESR_WORD 331 #define ESR_OFFSET 0 +#define ESR_MDL_WORD 335 +#define ESR_MDL_OFFSET 0 +#define ESR_CHAR_WORD 336 +#define ESR_CHAR_OFFSET 0 +#define ESR_DELTA_DISCHG_WORD 340 +#define ESR_DELTA_DISCHG_OFFSET 0 +#define ESR_DELTA_CHG_WORD 341 +#define ESR_DELTA_CHG_OFFSET 0 +#define ESR_ACT_WORD 342 +#define ESR_ACT_OFFSET 0 #define RSLOW_WORD 368 #define RSLOW_OFFSET 0 #define OCV_WORD 417 @@ -107,9 +142,29 @@ #define CC_SOC_SW_OFFSET 0 #define CC_SOC_WORD 460 #define CC_SOC_OFFSET 0 -#define MONOTONIC_SOC_WORD 455 +#define MONOTONIC_SOC_WORD 463 #define MONOTONIC_SOC_OFFSET 0 +/* v2 SRAM address and offset in ascending order */ +#define ACT_BATT_CAP_v2_WORD 287 +#define ACT_BATT_CAP_v2_OFFSET 0 +#define RSLOW_v2_WORD 371 +#define RSLOW_v2_OFFSET 0 +#define OCV_v2_WORD 425 +#define OCV_v2_OFFSET 0 +#define VOLTAGE_PRED_v2_WORD 440 +#define VOLTAGE_PRED_v2_OFFSET 0 +#define BATT_SOC_v2_WORD 455 +#define BATT_SOC_v2_OFFSET 0 +#define FULL_SOC_v2_WORD 461 +#define FULL_SOC_v2_OFFSET 0 +#define CC_SOC_SW_v2_WORD 464 +#define CC_SOC_SW_v2_OFFSET 0 +#define CC_SOC_v2_WORD 466 +#define CC_SOC_v2_OFFSET 0 +#define MONOTONIC_SOC_v2_WORD 469 +#define MONOTONIC_SOC_v2_OFFSET 0 + static struct fg_irq_info fg_irqs[FG_GEN4_IRQ_MAX]; /* DT parameters for FG device */ @@ -126,6 +181,11 @@ struct fg_dt_props { int esr_timer_chg_slow[NUM_ESR_TIMERS]; int esr_timer_dischg_fast[NUM_ESR_TIMERS]; int esr_timer_dischg_slow[NUM_ESR_TIMERS]; + u32 esr_cal_soc_thresh[ESR_CAL_LEVELS]; + int esr_cal_temp_thresh[ESR_CAL_LEVELS]; + int esr_filter_factor; + int delta_esr_disable_count; + int delta_esr_thr_uohms; int rconn_uohms; int batt_temp_cold_thresh; int batt_temp_hot_thresh; @@ -134,7 +194,11 @@ struct fg_dt_props { int esr_pulse_thresh_ma; int esr_meas_curr_ma; int slope_limit_temp; + int ki_coeff_low_chg; + int ki_coeff_med_chg; + int ki_coeff_hi_chg; int ki_coeff_soc[KI_COEFF_SOC_LEVELS]; + int ki_coeff_low_dischg[KI_COEFF_SOC_LEVELS]; int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS]; int ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS]; int slope_limit_coeffs[SLOPE_LIMIT_NUM_COEFFS]; @@ -145,12 +209,22 @@ struct fg_gen4_chip { struct fg_dt_props dt; struct cycle_counter *counter; struct cap_learning *cl; - struct ttf ttf; - struct delayed_work ttf_work; + struct ttf *ttf; + struct votable *delta_esr_irq_en_votable; + struct work_struct esr_calib_work; + struct alarm esr_fast_cal_timer; char batt_profile[PROFILE_LEN]; + int delta_esr_count; int recharge_soc_thr; + int esr_actual; + int esr_nominal; + int soh; bool ki_coeff_dischg_en; bool slope_limit_en; + bool esr_fast_calib; + bool esr_fast_calib_done; + bool esr_fast_cal_timer_expired; + bool rslow_low; }; struct bias_config { @@ -176,18 +250,25 @@ module_param_named( static int fg_restart_mp; static bool fg_sram_dump; +static bool fg_esr_fast_cal_en; -static struct fg_sram_param pm8150_sram_params[] = { +static struct fg_sram_param pm8150b_v1_sram_params[] = { PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL, fg_decode_default), - PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL, - fg_decode_default), + PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, + fg_encode_default, fg_decode_default), + PARAM(MONOTONIC_SOC, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET, 2, 1, 1, + 0, NULL, fg_decode_default), PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_voltage_15b), PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_voltage_15b), PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default, fg_decode_value_16b), + PARAM(ESR_MDL, ESR_MDL_WORD, ESR_MDL_OFFSET, 2, 1000, 244141, 0, + fg_encode_default, fg_decode_value_16b), + PARAM(ESR_ACT, ESR_ACT_WORD, ESR_ACT_OFFSET, 2, 1000, 244141, 0, + fg_encode_default, fg_decode_value_16b), PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL, fg_decode_value_16b), PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL, @@ -223,18 +304,122 @@ static struct fg_sram_param pm8150_sram_params[] = { ESR_TIMER_CHG_INIT_OFFSET, 1, 1, 1, 0, fg_encode_default, NULL), PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET, 1, 1000, 15625, 0, fg_encode_default, NULL), + PARAM(DELTA_ESR_THR, DELTA_ESR_THR_WORD, DELTA_ESR_THR_OFFSET, 2, 1000, + 61036, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_LOW_DISCHG, KI_COEFF_LOW_DISCHG_WORD, + KI_COEFF_LOW_DISCHG_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD, KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 61035, 0, fg_encode_default, NULL), PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD, KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 61035, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_LOW_CHG, KI_COEFF_LOW_CHG_WORD, KI_COEFF_LOW_CHG_OFFSET, + 1, 1000, 61035, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_MED_CHG, KI_COEFF_MED_CHG_WORD, KI_COEFF_MED_CHG_OFFSET, + 1, 1000, 61035, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_HI_CHG, KI_COEFF_HI_CHG_WORD, KI_COEFF_HI_CHG_OFFSET, 1, + 1000, 61035, 0, fg_encode_default, NULL), PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000000, 0, fg_encode_default, NULL), PARAM(BATT_TEMP_COLD, BATT_TEMP_CONFIG_WORD, BATT_TEMP_COLD_OFFSET, 1, 1, 1, 0, fg_encode_default, NULL), PARAM(BATT_TEMP_HOT, BATT_TEMP_CONFIG_WORD, BATT_TEMP_HOT_OFFSET, 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_SOC_MIN, BATT_TEMP_CONFIG2_WORD, ESR_CAL_SOC_MIN_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_SOC_MAX, ESR_CAL_THRESH_WORD, ESR_CAL_SOC_MAX_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_TEMP_MIN, ESR_CAL_THRESH_WORD, ESR_CAL_TEMP_MIN_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_TEMP_MAX, ESR_PULSE_THRESH_WORD, ESR_CAL_TEMP_MAX_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), +}; + +static struct fg_sram_param pm8150b_v2_sram_params[] = { + PARAM(BATT_SOC, BATT_SOC_v2_WORD, BATT_SOC_v2_OFFSET, 4, 1, 1, 0, NULL, + fg_decode_default), + PARAM(FULL_SOC, FULL_SOC_v2_WORD, FULL_SOC_v2_OFFSET, 2, 1, 1, 0, NULL, + fg_decode_default), + PARAM(MONOTONIC_SOC, MONOTONIC_SOC_v2_WORD, MONOTONIC_SOC_v2_OFFSET, 2, + 1, 1, 0, NULL, fg_decode_default), + PARAM(VOLTAGE_PRED, VOLTAGE_PRED_v2_WORD, VOLTAGE_PRED_v2_OFFSET, 2, + 1000, 244141, 0, NULL, fg_decode_voltage_15b), + PARAM(OCV, OCV_v2_WORD, OCV_v2_OFFSET, 2, 1000, 244141, 0, NULL, + fg_decode_voltage_15b), + PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default, + fg_decode_value_16b), + PARAM(ESR_MDL, ESR_MDL_WORD, ESR_MDL_OFFSET, 2, 1000, 244141, 0, + fg_encode_default, fg_decode_value_16b), + PARAM(ESR_ACT, ESR_ACT_WORD, ESR_ACT_OFFSET, 2, 1000, 244141, 0, + fg_encode_default, fg_decode_value_16b), + PARAM(RSLOW, RSLOW_v2_WORD, RSLOW_v2_OFFSET, 2, 1000, 244141, 0, NULL, + fg_decode_value_16b), + PARAM(CC_SOC, CC_SOC_v2_WORD, CC_SOC_v2_OFFSET, 4, 1, 1, 0, NULL, + fg_decode_cc_soc), + PARAM(CC_SOC_SW, CC_SOC_SW_v2_WORD, CC_SOC_SW_v2_OFFSET, 4, 1, 1, 0, + NULL, fg_decode_cc_soc), + PARAM(ACT_BATT_CAP, ACT_BATT_CAP_v2_WORD, ACT_BATT_CAP_v2_OFFSET, 2, + 1, 1, 0, NULL, fg_decode_default), + /* Entries below here are configurable during initialization */ + PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000, + 244141, 0, fg_encode_voltage, NULL), + PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 1000, + 15625, -2000, fg_encode_voltage, NULL), + PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000, + 244141, 0, fg_encode_voltage, fg_decode_voltage_15b), + PARAM(CUTOFF_CURR, CUTOFF_CURR_WORD, CUTOFF_CURR_OFFSET, 2, + 100000, 48828, 0, fg_encode_current, NULL), + PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 2, + 100000, 48828, 0, fg_encode_current, NULL), + PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, + 1, 2048, 100, 0, fg_encode_default, NULL), + PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, + 1, 2048, 100, 0, fg_encode_default, NULL), + PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD, + ESR_TIMER_DISCHG_MAX_OFFSET, 1, 1, 1, 0, fg_encode_default, + NULL), + PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD, + ESR_TIMER_DISCHG_INIT_OFFSET, 1, 1, 1, 0, fg_encode_default, + NULL), + PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD, + ESR_TIMER_CHG_MAX_OFFSET, 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD, + ESR_TIMER_CHG_INIT_OFFSET, 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET, + 1, 1000, 15625, 0, fg_encode_default, NULL), + PARAM(DELTA_ESR_THR, DELTA_ESR_THR_WORD, DELTA_ESR_THR_OFFSET, 2, 1000, + 61036, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_LOW_DISCHG, KI_COEFF_LOW_DISCHG_WORD, + KI_COEFF_LOW_DISCHG_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), + PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD, + KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), + PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD, + KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 61035, 0, + fg_encode_default, NULL), + PARAM(KI_COEFF_LOW_CHG, KI_COEFF_LOW_CHG_WORD, KI_COEFF_LOW_CHG_OFFSET, + 1, 1000, 61035, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_MED_CHG, KI_COEFF_MED_CHG_WORD, KI_COEFF_MED_CHG_OFFSET, + 1, 1000, 61035, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_HI_CHG, KI_COEFF_HI_CHG_WORD, KI_COEFF_HI_CHG_OFFSET, 1, + 1000, 61035, 0, fg_encode_default, NULL), + PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, + 1000000, 0, fg_encode_default, NULL), + PARAM(BATT_TEMP_COLD, BATT_TEMP_CONFIG_WORD, BATT_TEMP_COLD_OFFSET, 1, + 1, 1, 0, fg_encode_default, NULL), + PARAM(BATT_TEMP_HOT, BATT_TEMP_CONFIG_WORD, BATT_TEMP_HOT_OFFSET, 1, + 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_SOC_MIN, BATT_TEMP_CONFIG2_WORD, ESR_CAL_SOC_MIN_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_SOC_MAX, ESR_CAL_THRESH_WORD, ESR_CAL_SOC_MAX_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_TEMP_MIN, ESR_CAL_THRESH_WORD, ESR_CAL_TEMP_MIN_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_CAL_TEMP_MAX, ESR_PULSE_THRESH_WORD, ESR_CAL_TEMP_MAX_OFFSET, + 1, 1, 1, 0, fg_encode_default, NULL), }; static bool is_batt_empty(struct fg_dev *fg); @@ -548,6 +733,78 @@ static inline void get_esr_meas_current(int curr_ma, u8 *val) /* ALG callback functions below */ +static int fg_gen4_get_ttf_param(void *data, enum ttf_param param, int *val) +{ + struct fg_gen4_chip *chip = data; + struct fg_dev *fg; + int rc = 0, act_cap_mah, full_soc; + + if (!chip) + return -ENODEV; + + fg = &chip->fg; + if (fg->battery_missing) + return -EPERM; + + switch (param) { + case TTF_MSOC: + rc = fg_gen4_get_prop_capacity(fg, val); + break; + case TTF_VBAT: + rc = fg_get_battery_voltage(fg, val); + break; + case TTF_IBAT: + rc = fg_get_battery_current(fg, val); + break; + case TTF_FCC: + rc = fg_get_sram_prop(fg, FG_SRAM_ACT_BATT_CAP, &act_cap_mah); + if (rc < 0) { + pr_err("Failed to get ACT_BATT_CAP rc=%d\n", rc); + break; + } + + rc = fg_get_sram_prop(fg, FG_SRAM_FULL_SOC, &full_soc); + if (rc < 0) { + pr_err("Failed to get FULL_SOC rc=%d\n", rc); + break; + } + + full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * + FULL_CAPACITY, FULL_SOC_RAW); + *val = full_soc * act_cap_mah / FULL_CAPACITY; + break; + case TTF_MODE: + if (is_qnovo_en(fg)) + *val = TTF_MODE_QNOVO; + else if (chip->ttf->step_chg_cfg_valid) + *val = TTF_MODE_V_STEP_CHG; + else + *val = TTF_MODE_NORMAL; + break; + case TTF_ITERM: + *val = chip->dt.sys_term_curr_ma; + break; + case TTF_RBATT: + rc = fg_get_battery_resistance(fg, val); + break; + case TTF_VFLOAT: + *val = fg->bp.float_volt_uv; + break; + case TTF_CHG_TYPE: + *val = fg->charge_type; + break; + case TTF_CHG_STATUS: + *val = fg->charge_status; + break; + default: + pr_err_ratelimited("Unsupported parameter %d\n", param); + rc = -EINVAL; + break; + } + + return rc; +} + static int fg_gen4_store_learned_capacity(void *data, int64_t learned_cap_uah) { struct fg_gen4_chip *chip = data; @@ -670,12 +927,75 @@ static int fg_gen4_store_count(void *data, u16 *buf, int id, int length) /* All worker and helper functions below */ +static int fg_parse_dt_property_u32_array(struct device_node *node, + const char *prop_name, int *buf, int len) +{ + int rc; + + rc = of_property_count_elems_of_size(node, prop_name, sizeof(u32)); + if (rc < 0) { + if (rc == -EINVAL) + return 0; + else + return rc; + } else if (rc != len) { + pr_err("Incorrect length %d for %s, rc=%d\n", len, prop_name, + rc); + return -EINVAL; + } + + rc = of_property_read_u32_array(node, prop_name, buf, len); + if (rc < 0) { + pr_err("Error in reading %s, rc=%d\n", prop_name, rc); + return rc; + } + + return 0; +} + +static void fg_gen4_update_rslow_coeff(struct fg_dev *fg, int batt_temp) +{ + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + int rc, i; + bool rslow_low = false; + u8 buf[RSLOW_NUM_COEFFS]; + + if (!fg->bp.rslow_normal_coeffs || !fg->bp.rslow_low_coeffs) + return; + + /* Update Rslow low coefficients when Tbatt is < 0 C */ + if (batt_temp < 0) + rslow_low = true; + + if (chip->rslow_low == rslow_low) + return; + + for (i = 0; i < RSLOW_NUM_COEFFS; i++) { + if (rslow_low) + buf[i] = fg->bp.rslow_low_coeffs[i] & 0xFF; + else + buf[i] = fg->bp.rslow_normal_coeffs[i] & 0xFF; + } + + rc = fg_sram_write(fg, RSLOW_COEFF_DISCHG_WORD, RSLOW_COEFF_LOW_OFFSET, + buf, RSLOW_NUM_COEFFS, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Failed to write RLOW_COEFF_DISCHG_WORD rc=%d\n", rc); + } else { + chip->rslow_low = rslow_low; + fg_dbg(fg, FG_STATUS, "Updated Rslow %s coefficients\n", + rslow_low ? "low" : "normal"); + } +} + +#define KI_COEFF_LOW_DISCHG_DEFAULT 428 #define KI_COEFF_MED_DISCHG_DEFAULT 245 #define KI_COEFF_HI_DISCHG_DEFAULT 123 static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg) { struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); int rc, i, msoc; + int ki_coeff_low = KI_COEFF_LOW_DISCHG_DEFAULT; int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT; int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT; u8 val; @@ -692,36 +1012,58 @@ static int fg_gen4_adjust_ki_coeff_dischg(struct fg_dev *fg) if (fg->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) { for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) { if (msoc < chip->dt.ki_coeff_soc[i]) { + ki_coeff_low = chip->dt.ki_coeff_low_dischg[i]; ki_coeff_med = chip->dt.ki_coeff_med_dischg[i]; ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i]; } } } - fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val); - rc = fg_sram_write(fg, + if (ki_coeff_low > 0) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_DISCHG, ki_coeff_low, + &val); + rc = fg_sram_write(fg, + fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_word, + fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].addr_byte, &val, + fg->sp[FG_SRAM_KI_COEFF_LOW_DISCHG].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_low, rc=%d\n", rc); + return rc; + } + fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_low %d\n", ki_coeff_low); + } + + if (ki_coeff_med > 0) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, + &val); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word, fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val, fg->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ki_coeff_med, rc=%d\n", rc); - return rc; + if (rc < 0) { + pr_err("Error in writing ki_coeff_med, rc=%d\n", rc); + return rc; + } + fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_med %d\n", ki_coeff_med); } - fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val); - rc = fg_sram_write(fg, + if (ki_coeff_hi > 0) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, + &val); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word, fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val, fg->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc); - return rc; + if (rc < 0) { + pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc); + return rc; + } + fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_hi %d\n", ki_coeff_hi); } - fg_dbg(fg, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n", - ki_coeff_med, ki_coeff_hi); return 0; } @@ -731,7 +1073,7 @@ static int fg_gen4_get_batt_profile(struct fg_dev *fg) struct device_node *node = fg->dev->of_node; struct device_node *batt_node, *profile_node; const char *data; - int rc, len; + int rc, len, i, tuple_len; batt_node = of_find_node_by_name(node, "qcom,battery-data"); if (!batt_node) { @@ -806,6 +1148,117 @@ static int fg_gen4_get_batt_profile(struct fg_dev *fg) } } + /* + * Currently step charging thresholds should be read only for Vbatt + * based and not for SOC based. + */ + if (!of_property_read_bool(profile_node, "qcom,soc-based-step-chg") && + of_find_property(profile_node, "qcom,step-chg-ranges", &len) && + fg->bp.float_volt_uv > 0 && fg->bp.fastchg_curr_ma > 0) { + len /= sizeof(u32); + tuple_len = len / (sizeof(struct range_data) / sizeof(u32)); + if (tuple_len <= 0 || tuple_len > MAX_STEP_CHG_ENTRIES) + return -EINVAL; + + mutex_lock(&chip->ttf->lock); + chip->ttf->step_chg_cfg = + kcalloc(len, sizeof(*chip->ttf->step_chg_cfg), + GFP_KERNEL); + if (!chip->ttf->step_chg_cfg) { + mutex_unlock(&chip->ttf->lock); + return -ENOMEM; + } + + chip->ttf->step_chg_data = + kcalloc(tuple_len, sizeof(*chip->ttf->step_chg_data), + GFP_KERNEL); + if (!chip->ttf->step_chg_data) { + kfree(chip->ttf->step_chg_cfg); + mutex_unlock(&chip->ttf->lock); + return -ENOMEM; + } + + rc = read_range_data_from_node(profile_node, + "qcom,step-chg-ranges", + chip->ttf->step_chg_cfg, + fg->bp.float_volt_uv, + fg->bp.fastchg_curr_ma * 1000); + if (rc < 0) { + pr_err("Error in reading qcom,step-chg-ranges from battery profile, rc=%d\n", + rc); + kfree(chip->ttf->step_chg_data); + kfree(chip->ttf->step_chg_cfg); + chip->ttf->step_chg_cfg = NULL; + mutex_unlock(&chip->ttf->lock); + return rc; + } + + chip->ttf->step_chg_num_params = tuple_len; + chip->ttf->step_chg_cfg_valid = true; + mutex_unlock(&chip->ttf->lock); + + if (chip->ttf->step_chg_cfg_valid) { + for (i = 0; i < tuple_len; i++) + pr_debug("Vbatt_low: %d Vbatt_high: %d FCC: %d\n", + chip->ttf->step_chg_cfg[i].low_threshold, + chip->ttf->step_chg_cfg[i].high_threshold, + chip->ttf->step_chg_cfg[i].value); + } + } + + if (of_find_property(profile_node, "qcom,therm-pull-up", NULL)) { + rc = of_property_read_u32(profile_node, "qcom,therm-pull-up", + &fg->bp.therm_pull_up_kohms); + if (rc < 0) { + pr_err("Couldn't read therm-pull-up, rc:%d\n", rc); + fg->bp.therm_pull_up_kohms = -EINVAL; + } + } + + if (of_find_property(profile_node, "qcom,rslow-normal-coeffs", NULL) && + of_find_property(profile_node, "qcom,rslow-low-coeffs", NULL)) { + if (!fg->bp.rslow_normal_coeffs) { + fg->bp.rslow_normal_coeffs = devm_kcalloc(fg->dev, + RSLOW_NUM_COEFFS, sizeof(u32), + GFP_KERNEL); + if (!fg->bp.rslow_normal_coeffs) + return -ENOMEM; + } + + if (!fg->bp.rslow_low_coeffs) { + fg->bp.rslow_low_coeffs = devm_kcalloc(fg->dev, + RSLOW_NUM_COEFFS, sizeof(u32), + GFP_KERNEL); + if (!fg->bp.rslow_low_coeffs) { + devm_kfree(fg->dev, fg->bp.rslow_normal_coeffs); + fg->bp.rslow_normal_coeffs = NULL; + return -ENOMEM; + } + } + + rc = fg_parse_dt_property_u32_array(profile_node, + "qcom,rslow-normal-coeffs", + fg->bp.rslow_normal_coeffs, RSLOW_NUM_COEFFS); + if (rc < 0) { + devm_kfree(fg->dev, fg->bp.rslow_normal_coeffs); + fg->bp.rslow_normal_coeffs = NULL; + devm_kfree(fg->dev, fg->bp.rslow_low_coeffs); + fg->bp.rslow_low_coeffs = NULL; + return rc; + } + + rc = fg_parse_dt_property_u32_array(profile_node, + "qcom,rslow-low-coeffs", + fg->bp.rslow_low_coeffs, RSLOW_NUM_COEFFS); + if (rc < 0) { + devm_kfree(fg->dev, fg->bp.rslow_normal_coeffs); + fg->bp.rslow_normal_coeffs = NULL; + devm_kfree(fg->dev, fg->bp.rslow_low_coeffs); + fg->bp.rslow_low_coeffs = NULL; + return rc; + } + } + data = of_get_property(profile_node, "qcom,fg-profile-data", &len); if (!data) { pr_err("No profile data available\n"); @@ -825,8 +1278,10 @@ static int fg_gen4_get_batt_profile(struct fg_dev *fg) static int fg_gen4_bp_params_config(struct fg_dev *fg) { + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); int rc, i; u8 buf, therm_coeffs[BATT_THERM_NUM_COEFFS * 2]; + u8 rslow_coeffs[RSLOW_NUM_COEFFS], val, mask; if (fg->bp.vbatt_full_mv > 0) { rc = fg_set_constant_chg_voltage(fg, @@ -859,6 +1314,70 @@ static int fg_gen4_bp_params_config(struct fg_dev *fg) } } + if (fg->bp.rslow_normal_coeffs && fg->bp.rslow_low_coeffs) { + rc = fg_sram_read(fg, RSLOW_COEFF_DISCHG_WORD, + RSLOW_COEFF_LOW_OFFSET, rslow_coeffs, + RSLOW_NUM_COEFFS, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Failed to read RLOW_COEFF_DISCHG_WORD rc=%d\n", + rc); + return rc; + } + + /* Read Rslow coefficients back and set the status */ + for (i = 0; i < RSLOW_NUM_COEFFS; i++) { + buf = fg->bp.rslow_low_coeffs[i] & 0xFF; + if (rslow_coeffs[i] == buf) { + chip->rslow_low = true; + } else { + chip->rslow_low = false; + break; + } + } + fg_dbg(fg, FG_STATUS, "Rslow_low: %d\n", chip->rslow_low); + } + + /* + * Since this SRAM word falls inside profile region, configure it after + * the profile is loaded. This parameter doesn't come from battery + * profile DT property. + */ + if (fg->wa_flags & PM8150B_V1_RSLOW_COMP_WA) { + val = 0; + mask = BIT(1); + rc = fg_sram_masked_write(fg, RSLOW_CONFIG_WORD, + RSLOW_CONFIG_OFFSET, mask, val, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing RSLOW_CONFIG_WORD, rc=%d\n", + rc); + return rc; + } + } + + if (fg->bp.therm_pull_up_kohms > 0) { + switch (fg->bp.therm_pull_up_kohms) { + case 30: + buf = BATT_THERM_PULL_UP_30K; + break; + case 100: + buf = BATT_THERM_PULL_UP_100K; + break; + case 400: + buf = BATT_THERM_PULL_UP_400K; + break; + default: + return -EINVAL; + } + + rc = fg_masked_write(fg, ADC_RR_BATT_THERM_BASE_CFG1(fg), + BATT_THERM_PULL_UP_MASK, buf); + if (rc < 0) { + pr_err("failed to write to 0x%04X, rc=%d\n", + ADC_RR_BATT_THERM_BASE_CFG1(fg), rc); + return rc; + } + } + return 0; } @@ -955,7 +1474,7 @@ static void profile_load_work(struct work_struct *work) struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); int64_t nom_cap_uah; - u8 val; + u8 val, buf[2]; int rc; vote(fg->awake_votable, PROFILE_LOAD, true, 0); @@ -1015,6 +1534,27 @@ static void profile_load_work(struct work_struct *work) fg_dbg(fg, FG_STATUS, "SOC is ready\n"); fg->profile_load_status = PROFILE_LOADED; + + if (fg->wa_flags & PM8150B_V1_DMA_WA) + msleep(1000); + + /* + * Whenever battery profile is loaded, read nominal capacity and write + * it to actual (or aged) capacity as it is outside the profile region + * and might contain OTP values. + */ + rc = fg_sram_read(fg, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD, + NOM_CAP_OFFSET, rc); + } else { + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ACT_BATT_CAP].addr_word, + fg->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, buf, + fg->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT); + if (rc < 0) + pr_err("Error in writing to ACT_BATT_CAP rc=%d\n", rc); + } done: rc = fg_gen4_bp_params_config(fg); if (rc < 0) @@ -1032,6 +1572,7 @@ static void profile_load_work(struct work_struct *work) batt_psy_initialized(fg); fg_notify_charger(fg); + schedule_delayed_work(&chip->ttf->ttf_work, 10000); fg_dbg(fg, FG_STATUS, "profile loaded successfully"); out: fg->soc_reporting_ready = true; @@ -1100,6 +1641,53 @@ static void get_batt_psy_props(struct fg_dev *fg) } } +static int fg_gen4_esr_soh_update(struct fg_dev *fg) +{ + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + int rc, msoc, esr_uohms; + + if (!fg->soc_reporting_ready || fg->battery_missing) { + chip->esr_actual = -EINVAL; + chip->esr_nominal = -EINVAL; + return 0; + } + + if (fg->charge_status == POWER_SUPPLY_STATUS_CHARGING) { + rc = fg_get_msoc(fg, &msoc); + if (rc < 0) { + pr_err("Error in getting msoc, rc=%d\n", rc); + return rc; + } + + if (msoc == ESR_SOH_SOC) { + rc = fg_get_sram_prop(fg, FG_SRAM_ESR_ACT, &esr_uohms); + if (rc < 0) { + pr_err("Error in getting esr_actual, rc=%d\n", + rc); + return rc; + } + chip->esr_actual = esr_uohms; + + rc = fg_get_sram_prop(fg, FG_SRAM_ESR_MDL, &esr_uohms); + if (rc < 0) { + pr_err("Error in getting esr_nominal, rc=%d\n", + rc); + chip->esr_actual = -EINVAL; + return rc; + } + chip->esr_nominal = esr_uohms; + + fg_dbg(fg, FG_STATUS, "esr_actual: %d esr_nominal: %d\n", + chip->esr_actual, chip->esr_nominal); + + if (fg->batt_psy) + power_supply_changed(fg->batt_psy); + } + } + + return 0; +} + static int fg_gen4_update_maint_soc(struct fg_dev *fg) { struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); @@ -1149,22 +1737,25 @@ static int fg_gen4_update_maint_soc(struct fg_dev *fg) static int fg_gen4_configure_full_soc(struct fg_dev *fg, int bsoc) { int rc; - u8 full_soc[2] = {0xFF, 0xFF}; + u8 full_soc[2] = {0xFF, 0xFF}, buf[2]; /* * Once SOC masking condition is cleared, FULL_SOC and MONOTONIC_SOC * needs to be updated to reflect the same. Write battery SOC to * FULL_SOC and write a full value to MONOTONIC_SOC. */ - rc = fg_sram_write(fg, FULL_SOC_WORD, FULL_SOC_OFFSET, - (u8 *)&bsoc, 2, FG_IMA_ATOMIC); + fg_encode(fg->sp, FG_SRAM_FULL_SOC, bsoc, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_FULL_SOC].addr_word, + fg->sp[FG_SRAM_FULL_SOC].addr_byte, buf, + fg->sp[FG_SRAM_FULL_SOC].len, FG_IMA_ATOMIC); if (rc < 0) { pr_err("failed to write full_soc rc=%d\n", rc); return rc; } - rc = fg_sram_write(fg, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET, - full_soc, 2, FG_IMA_ATOMIC); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_MONOTONIC_SOC].addr_word, + fg->sp[FG_SRAM_MONOTONIC_SOC].addr_byte, full_soc, + fg->sp[FG_SRAM_MONOTONIC_SOC].len, FG_IMA_ATOMIC); if (rc < 0) { pr_err("failed to write monotonic_soc rc=%d\n", rc); return rc; @@ -1392,6 +1983,7 @@ static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data) static irqreturn_t fg_delta_esr_irq_handler(int irq, void *data) { struct fg_dev *fg = data; + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); int esr_mohms, rc; rc = fg_get_battery_resistance(fg, &esr_mohms); @@ -1400,6 +1992,11 @@ static irqreturn_t fg_delta_esr_irq_handler(int irq, void *data) fg_dbg(fg, FG_IRQ, "irq %d triggered esr_mohms: %d\n", irq, esr_mohms); + if (chip->esr_fast_calib) { + vote(fg->awake_votable, ESR_CALIB, true, 0); + schedule_work(&chip->esr_calib_work); + } + return IRQ_HANDLED; } @@ -1422,6 +2019,7 @@ static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data) static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data) { struct fg_dev *fg = data; + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); u8 status; int rc; @@ -1440,6 +2038,15 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data) fg->profile_load_status = PROFILE_NOT_LOADED; fg->soc_reporting_ready = false; fg->batt_id_ohms = -EINVAL; + + mutex_lock(&chip->ttf->lock); + chip->ttf->step_chg_cfg_valid = false; + chip->ttf->step_chg_num_params = 0; + kfree(chip->ttf->step_chg_cfg); + chip->ttf->step_chg_cfg = NULL; + kfree(chip->ttf->step_chg_data); + chip->ttf->step_chg_data = NULL; + mutex_unlock(&chip->ttf->lock); return IRQ_HANDLED; } @@ -1483,6 +2090,7 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data) if (batt_psy_initialized(fg)) power_supply_changed(fg->batt_psy); + fg_gen4_update_rslow_coeff(fg, batt_temp); return IRQ_HANDLED; } @@ -1548,6 +2156,10 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in charge_full_update, rc=%d\n", rc); + rc = fg_gen4_esr_soh_update(fg); + if (rc < 0) + pr_err("Error in updating ESR for SOH, rc=%d\n", rc); + rc = fg_gen4_update_maint_soc(fg); if (rc < 0) pr_err("Error in updating maint_soc, rc=%d\n", rc); @@ -1631,6 +2243,7 @@ static struct fg_irq_info fg_irqs[FG_GEN4_IRQ_MAX] = { [ESR_DELTA_IRQ] = { .name = "esr-delta", .handler = fg_delta_esr_irq_handler, + .wakeable = true, }, [VBATT_LOW_IRQ] = { .name = "vbatt-low", @@ -1718,484 +2331,355 @@ static bool is_batt_empty(struct fg_dev *fg) return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false); } -static void fg_ttf_update(struct fg_dev *fg) +static int fg_gen4_configure_esr_cal_soc(struct fg_dev *fg, int soc_min, + int soc_max) { - struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); int rc; - int delay_ms; - union power_supply_propval prop = {0, }; - int online = 0; - - if (usb_psy_initialized(fg)) { - rc = power_supply_get_property(fg->usb_psy, - POWER_SUPPLY_PROP_ONLINE, &prop); - if (rc < 0) { - pr_err("Couldn't read usb ONLINE prop rc=%d\n", rc); - return; - } - - online = online || prop.intval; - } - - if (pc_port_psy_initialized(fg)) { - rc = power_supply_get_property(fg->pc_port_psy, - POWER_SUPPLY_PROP_ONLINE, &prop); - if (rc < 0) { - pr_err("Couldn't read pc_port ONLINE prop rc=%d\n", rc); - return; - } + u8 buf[2]; - online = online || prop.intval; + fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MIN, soc_min, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_word, + fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_SOC_MIN].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_SOC_MIN, rc=%d\n", rc); + return rc; } - if (dc_psy_initialized(fg)) { - rc = power_supply_get_property(fg->dc_psy, - POWER_SUPPLY_PROP_ONLINE, &prop); - if (rc < 0) { - pr_err("Couldn't read dc ONLINE prop rc=%d\n", rc); - return; - } - - online = online || prop.intval; + fg_encode(fg->sp, FG_SRAM_ESR_CAL_SOC_MAX, soc_max, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_word, + fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_SOC_MAX].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_CAL_SOC_MAX, rc=%d\n", rc); + return rc; } - - if (fg->online_status == online) - return; - - fg->online_status = online; - if (online) - /* wait 35 seconds for the input to settle */ - delay_ms = 35000; - else - /* wait 5 seconds for current to settle during discharge */ - delay_ms = 5000; - - vote(fg->awake_votable, TTF_PRIMING, true, 0); - cancel_delayed_work_sync(&chip->ttf_work); - mutex_lock(&chip->ttf.lock); - fg_circ_buf_clr(&chip->ttf.ibatt); - fg_circ_buf_clr(&chip->ttf.vbatt); - chip->ttf.last_ttf = 0; - chip->ttf.last_ms = 0; - mutex_unlock(&chip->ttf.lock); - schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(delay_ms)); + return 0; } -static void status_change_work(struct work_struct *work) +static int fg_gen4_configure_esr_cal_temp(struct fg_dev *fg, int temp_min, + int temp_max) { - struct fg_dev *fg = container_of(work, - struct fg_dev, status_change_work); - struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); - int rc, batt_soc, batt_temp; - bool input_present, qnovo_en; - - if (!batt_psy_initialized(fg)) { - fg_dbg(fg, FG_STATUS, "Charger not available?!\n"); - goto out; - } - - if (!fg->soc_reporting_ready) { - fg_dbg(fg, FG_STATUS, "Profile load is not complete yet\n"); - goto out; - } - - get_batt_psy_props(fg); + int rc; + u8 buf[2]; - rc = fg_get_sram_prop(fg, FG_SRAM_BATT_SOC, &batt_soc); + fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MIN, temp_min, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_word, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MIN].len, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("Failed to read battery soc rc: %d\n", rc); - goto out; + pr_err("Error in writing ESR_CAL_TEMP_MIN, rc=%d\n", rc); + return rc; } - rc = fg_gen4_get_battery_temp(fg, &batt_temp); + fg_encode(fg->sp, FG_SRAM_ESR_CAL_TEMP_MAX, temp_max, buf); + rc = fg_sram_write(fg, fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_word, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].addr_byte, buf, + fg->sp[FG_SRAM_ESR_CAL_TEMP_MAX].len, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("Failed to read battery temp rc: %d\n", rc); - goto out; + pr_err("Error in writing ESR_CAL_TEMP_MAX, rc=%d\n", rc); + return rc; } - input_present = is_input_present(fg); - qnovo_en = is_qnovo_en(fg); - cycle_count_update(chip->counter, (u32)batt_soc >> 24, - fg->charge_status, fg->charge_done, input_present); - - if (fg->charge_status != fg->prev_charge_status) - cap_learning_update(chip->cl, batt_temp, batt_soc, - fg->charge_status, fg->charge_done, input_present, - qnovo_en); - - rc = fg_gen4_charge_full_update(fg); - if (rc < 0) - pr_err("Error in charge_full_update, rc=%d\n", rc); - - rc = fg_gen4_adjust_ki_coeff_dischg(fg); - if (rc < 0) - pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); - - rc = fg_gen4_adjust_recharge_soc(chip); - if (rc < 0) - pr_err("Error in adjusting recharge SOC, rc=%d\n", rc); - - fg_ttf_update(fg); - fg->prev_charge_status = fg->charge_status; -out: - fg_dbg(fg, FG_STATUS, "charge_status:%d charge_type:%d charge_done:%d\n", - fg->charge_status, fg->charge_type, fg->charge_done); - pm_relax(fg->dev); + return 0; } -#define HOURS_TO_SECONDS 3600 -#define OCV_SLOPE_UV 10869 -#define MILLI_UNIT 1000 -#define MICRO_UNIT 1000000 -#define NANO_UNIT 1000000000 -static int fg_get_time_to_full_locked(struct fg_dev *fg, int *val) +#define ESR_CAL_TEMP_MIN -127 +#define ESR_CAL_TEMP_MAX 127 +static int fg_gen4_esr_fast_calib_config(struct fg_gen4_chip *chip, bool en) { - struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); - int rc, ibatt_avg, vbatt_avg, rbatt, msoc, full_soc, act_cap_mah, - i_cc2cv = 0, soc_cc2cv, tau, divisor, iterm, ttf_mode, - i, soc_per_step, msoc_this_step, msoc_next_step, - ibatt_this_step, t_predicted_this_step, ttf_slope, - t_predicted_cv, t_predicted = 0; - s64 delta_ms; - - if (!fg->soc_reporting_ready) - return -ENODATA; - - if (fg->bp.float_volt_uv <= 0) { - pr_err("battery profile is not loaded\n"); - return -ENODATA; - } - - if (!batt_psy_initialized(fg)) { - fg_dbg(fg, FG_TTF, "charger is not available\n"); - return -ENODATA; - } - - rc = fg_gen4_get_prop_capacity(fg, &msoc); - if (rc < 0) { - pr_err("failed to get msoc rc=%d\n", rc); - return rc; - } - fg_dbg(fg, FG_TTF, "msoc=%d\n", msoc); - - /* the battery is considered full if the SOC is 100% */ - if (msoc >= 100) { - *val = 0; - return 0; - } + struct fg_dev *fg = &chip->fg; + int rc, esr_timer_chg_init, esr_timer_chg_max, esr_timer_dischg_init, + esr_timer_dischg_max, esr_fast_cal_ms, esr_cal_soc_min, + esr_cal_soc_max, esr_cal_temp_min, esr_cal_temp_max; + u8 val, mask; + + esr_timer_chg_init = esr_timer_chg_max = -EINVAL; + esr_timer_dischg_init = esr_timer_dischg_max = -EINVAL; + if (en) { + esr_timer_chg_init = chip->dt.esr_timer_chg_fast[TIMER_RETRY]; + esr_timer_chg_max = chip->dt.esr_timer_chg_fast[TIMER_MAX]; + esr_timer_dischg_init = + chip->dt.esr_timer_dischg_fast[TIMER_RETRY]; + esr_timer_dischg_max = + chip->dt.esr_timer_dischg_fast[TIMER_MAX]; + + esr_cal_soc_min = 0; + esr_cal_soc_max = FULL_SOC_RAW; + esr_cal_temp_min = ESR_CAL_TEMP_MIN; + esr_cal_temp_max = ESR_CAL_TEMP_MAX; + + vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, + true, 0); + chip->delta_esr_count = 0; + chip->esr_fast_calib_done = false; + } else { + chip->esr_fast_calib_done = true; - if (is_qnovo_en(fg)) - ttf_mode = TTF_MODE_QNOVO; - else - ttf_mode = TTF_MODE_NORMAL; + esr_timer_chg_init = chip->dt.esr_timer_chg_slow[TIMER_RETRY]; + esr_timer_chg_max = chip->dt.esr_timer_chg_slow[TIMER_MAX]; + esr_timer_dischg_init = + chip->dt.esr_timer_dischg_slow[TIMER_RETRY]; + esr_timer_dischg_max = + chip->dt.esr_timer_dischg_slow[TIMER_MAX]; - /* when switching TTF algorithms the TTF needs to be reset */ - if (chip->ttf.mode != ttf_mode) { - fg_circ_buf_clr(&chip->ttf.ibatt); - fg_circ_buf_clr(&chip->ttf.vbatt); - chip->ttf.last_ttf = 0; - chip->ttf.last_ms = 0; - chip->ttf.mode = ttf_mode; - } + esr_cal_soc_min = chip->dt.esr_cal_soc_thresh[0]; + esr_cal_soc_max = chip->dt.esr_cal_soc_thresh[1]; + esr_cal_temp_min = chip->dt.esr_cal_temp_thresh[0]; + esr_cal_temp_max = chip->dt.esr_cal_temp_thresh[1]; - /* at least 10 samples are required to produce a stable IBATT */ - if (chip->ttf.ibatt.size < 10) { - *val = -1; - return 0; + vote(chip->delta_esr_irq_en_votable, DELTA_ESR_IRQ_VOTER, + false, 0); } - rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg); + rc = fg_set_esr_timer(fg, esr_timer_chg_init, esr_timer_chg_max, true, + FG_IMA_DEFAULT); if (rc < 0) { - pr_err("failed to get IBATT AVG rc=%d\n", rc); + pr_err("Error in setting ESR charge timer, rc=%d\n", + rc); return rc; } - rc = fg_circ_buf_median(&chip->ttf.vbatt, &vbatt_avg); + rc = fg_set_esr_timer(fg, esr_timer_dischg_init, esr_timer_dischg_max, + false, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("failed to get VBATT AVG rc=%d\n", rc); + pr_err("Error in setting ESR discharge timer, rc=%d\n", + rc); return rc; } - ibatt_avg = -ibatt_avg / MILLI_UNIT; - vbatt_avg /= MILLI_UNIT; - - /* clamp ibatt_avg to iterm */ - if (ibatt_avg < abs(chip->dt.sys_term_curr_ma)) - ibatt_avg = abs(chip->dt.sys_term_curr_ma); - - fg_dbg(fg, FG_TTF, "ibatt_avg=%d\n", ibatt_avg); - fg_dbg(fg, FG_TTF, "vbatt_avg=%d\n", vbatt_avg); - - rc = fg_get_battery_resistance(fg, &rbatt); + rc = fg_gen4_configure_esr_cal_soc(fg, esr_cal_soc_min, + esr_cal_soc_max); if (rc < 0) { - pr_err("failed to get battery resistance rc=%d\n", rc); + pr_err("Error in configuring SOC thresholds, rc=%d\n", + rc); return rc; } - rbatt /= MILLI_UNIT; - fg_dbg(fg, FG_TTF, "rbatt=%d\n", rbatt); - - rc = fg_get_sram_prop(fg, FG_SRAM_ACT_BATT_CAP, &act_cap_mah); + rc = fg_gen4_configure_esr_cal_temp(fg, esr_cal_temp_min, + esr_cal_temp_max); if (rc < 0) { - pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc); + pr_err("Error in configuring temperature thresholds, rc=%d\n", + rc); return rc; } - rc = fg_get_sram_prop(fg, FG_SRAM_FULL_SOC, &full_soc); + /* + * Disable ESR discharging timer and ESR pulsing during + * discharging when ESR fast calibration is disabled. Otherwise, keep + * it enabled so that ESR pulses can happen during discharging. + */ + val = en ? BIT(6) | BIT(7) : 0; + mask = BIT(6) | BIT(7); + rc = fg_sram_masked_write(fg, SYS_CONFIG_WORD, + SYS_CONFIG_OFFSET, mask, val, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("failed to get full soc rc=%d\n", rc); + pr_err("Error in writing SYS_CONFIG_WORD, rc=%d\n", rc); return rc; } - full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY, - FULL_SOC_RAW); - act_cap_mah = full_soc * act_cap_mah / 100; - fg_dbg(fg, FG_TTF, "act_cap_mah=%d\n", act_cap_mah); - - /* estimated battery current at the CC to CV transition */ - switch (chip->ttf.mode) { - case TTF_MODE_NORMAL: - i_cc2cv = ibatt_avg * vbatt_avg / - max(MILLI_UNIT, fg->bp.float_volt_uv / MILLI_UNIT); - break; - case TTF_MODE_QNOVO: - i_cc2cv = min( - chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT, - ibatt_avg * vbatt_avg / - max(MILLI_UNIT, fg->bp.float_volt_uv / MILLI_UNIT)); - break; - default: - pr_err("TTF mode %d is not supported\n", chip->ttf.mode); - break; - } - fg_dbg(fg, FG_TTF, "i_cc2cv=%d\n", i_cc2cv); - - /* if we are already in CV state then we can skip estimating CC */ - if (fg->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) - goto cv_estimate; - /* estimated SOC at the CC to CV transition */ - soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV); - soc_cc2cv = 100 - soc_cc2cv; - fg_dbg(fg, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv); + if (en) { + /* Set ESR fast calibration timer to 300 seconds as default */ + esr_fast_cal_ms = 300000; + if (chip->dt.esr_timer_chg_fast > 0 && + chip->dt.delta_esr_disable_count > 0) + esr_fast_cal_ms = 3 * chip->dt.delta_esr_disable_count * + chip->dt.esr_timer_chg_fast[TIMER_MAX] * 1000; - switch (chip->ttf.mode) { - case TTF_MODE_NORMAL: - if (soc_cc2cv - msoc <= 0) - goto cv_estimate; - - divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100); - t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) * - HOURS_TO_SECONDS, divisor); - break; - case TTF_MODE_QNOVO: - soc_per_step = 100 / MAX_CC_STEPS; - for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) { - msoc_next_step = (i + 1) * soc_per_step; - if (i == msoc / soc_per_step) - msoc_this_step = msoc; - else - msoc_this_step = i * soc_per_step; - - /* scale ibatt by 85% to account for discharge pulses */ - ibatt_this_step = min( - chip->ttf.cc_step.arr[i] / MILLI_UNIT, - ibatt_avg) * 85 / 100; - divisor = max(100, ibatt_this_step * 100); - t_predicted_this_step = div_s64((s64)act_cap_mah * - (msoc_next_step - msoc_this_step) * - HOURS_TO_SECONDS, divisor); - t_predicted += t_predicted_this_step; - fg_dbg(fg, FG_TTF, "[%d, %d] ma=%d t=%d\n", - msoc_this_step, msoc_next_step, - ibatt_this_step, t_predicted_this_step); - } - break; - default: - pr_err("TTF mode %d is not supported\n", chip->ttf.mode); - break; + alarm_start_relative(&chip->esr_fast_cal_timer, + ms_to_ktime(esr_fast_cal_ms)); + } else { + alarm_cancel(&chip->esr_fast_cal_timer); } -cv_estimate: - fg_dbg(fg, FG_TTF, "t_predicted_cc=%d\n", t_predicted); - - iterm = max(100, abs(chip->dt.sys_term_curr_ma) + 200); - fg_dbg(fg, FG_TTF, "iterm=%d\n", iterm); + fg_dbg(fg, FG_STATUS, "%sabling ESR fast calibration\n", + en ? "En" : "Dis"); + return 0; +} - if (fg->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) - tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm); - else - tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm); +static enum alarmtimer_restart fg_esr_fast_cal_timer(struct alarm *alarm, + ktime_t time) +{ + struct fg_gen4_chip *chip = container_of(alarm, struct fg_gen4_chip, + esr_fast_cal_timer); + struct fg_dev *fg = &chip->fg; - rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), tau, &tau); - if (rc < 0) { - pr_err("failed to interpolate tau rc=%d\n", rc); - return rc; - } + if (!chip->esr_fast_calib_done) { + fg_dbg(fg, FG_STATUS, "ESR fast calibration timer expired\n"); - /* tau is scaled linearly from 95% to 100% SOC */ - if (msoc >= 95) - tau = tau * 2 * (100 - msoc) / 10; - - fg_dbg(fg, FG_TTF, "tau=%d\n", tau); - t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau * - HOURS_TO_SECONDS, NANO_UNIT); - fg_dbg(fg, FG_TTF, "t_predicted_cv=%d\n", t_predicted_cv); - t_predicted += t_predicted_cv; - - fg_dbg(fg, FG_TTF, "t_predicted_prefilter=%d\n", t_predicted); - if (chip->ttf.last_ms != 0) { - delta_ms = ktime_ms_delta(ktime_get_boottime(), - ms_to_ktime(chip->ttf.last_ms)); - if (delta_ms > 10000) { - ttf_slope = div64_s64( - ((s64)t_predicted - chip->ttf.last_ttf) * - MICRO_UNIT, delta_ms); - if (ttf_slope > -100) - ttf_slope = -100; - else if (ttf_slope < -2000) - ttf_slope = -2000; - - t_predicted = div_s64( - (s64)ttf_slope * delta_ms, MICRO_UNIT) + - chip->ttf.last_ttf; - fg_dbg(fg, FG_TTF, "ttf_slope=%d\n", ttf_slope); - } else { - t_predicted = chip->ttf.last_ttf; - } + /* + * We cannot vote for awake votable here as that takes + * a mutex lock and this is executed in an atomic context. + */ + pm_stay_awake(fg->dev); + chip->esr_fast_cal_timer_expired = true; + schedule_work(&chip->esr_calib_work); } - /* clamp the ttf to 0 */ - if (t_predicted < 0) - t_predicted = 0; - - fg_dbg(fg, FG_TTF, "t_predicted_postfilter=%d\n", t_predicted); - *val = t_predicted; - return 0; + return ALARMTIMER_NORESTART; } -static int fg_get_time_to_full(struct fg_dev *fg, int *val) +static void esr_calib_work(struct work_struct *work) { - struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); - int rc; + struct fg_gen4_chip *chip = container_of(work, struct fg_gen4_chip, + esr_calib_work); + struct fg_dev *fg = &chip->fg; + int rc, fg_esr_meas_diff; + s16 esr_raw, esr_char_raw, esr_delta, esr_meas_diff, esr_filtered; + u8 buf[2]; - mutex_lock(&chip->ttf.lock); - rc = fg_get_time_to_full_locked(fg, val); - mutex_unlock(&chip->ttf.lock); - return rc; -} + if (chip->delta_esr_count > chip->dt.delta_esr_disable_count || + chip->esr_fast_calib_done) { + fg_dbg(fg, FG_STATUS, "delta_esr_count: %d esr_fast_calib_done:%d\n", + chip->delta_esr_count, chip->esr_fast_calib_done); + goto out; + } -static void ttf_work(struct work_struct *work) -{ - struct fg_gen4_chip *chip = container_of(work, - struct fg_gen4_chip, ttf_work.work); - struct fg_dev *fg = &chip->fg; - int rc, ibatt_now, vbatt_now, ttf; - ktime_t ktime_now; + /* + * If the number of delta ESR interrupts fired is more than the count + * to disable the interrupt OR ESR fast calibration timer is expired, + * disable ESR fast calibration. + */ + if (chip->delta_esr_count >= chip->dt.delta_esr_disable_count || + chip->esr_fast_cal_timer_expired) { + rc = fg_gen4_esr_fast_calib_config(chip, false); + if (rc < 0) + pr_err("Error in configuring esr_fast_calib, rc=%d\n", + rc); + + if (chip->esr_fast_cal_timer_expired) { + pm_relax(fg->dev); + chip->esr_fast_cal_timer_expired = false; + } - mutex_lock(&chip->ttf.lock); - if (fg->charge_status != POWER_SUPPLY_STATUS_CHARGING && - fg->charge_status != POWER_SUPPLY_STATUS_DISCHARGING) - goto end_work; + goto out; + } + + rc = fg_sram_read(fg, ESR_WORD, ESR_OFFSET, buf, 2, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in reading ESR, rc=%d\n", rc); + goto out; + } + esr_raw = buf[1] << 8 | buf[0]; - rc = fg_get_battery_current(fg, &ibatt_now); + rc = fg_sram_read(fg, ESR_CHAR_WORD, ESR_CHAR_OFFSET, buf, 2, + FG_IMA_DEFAULT); if (rc < 0) { - pr_err("failed to get battery current, rc=%d\n", rc); - goto end_work; + pr_err("Error in reading ESR_CHAR, rc=%d\n", rc); + goto out; } + esr_char_raw = buf[1] << 8 | buf[0]; + + esr_meas_diff = esr_raw - esr_char_raw; - rc = fg_get_battery_voltage(fg, &vbatt_now); + rc = fg_sram_read(fg, ESR_DELTA_DISCHG_WORD, ESR_DELTA_DISCHG_OFFSET, + buf, 2, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("failed to get battery voltage, rc=%d\n", rc); - goto end_work; + pr_err("Error in reading ESR_DELTA_DISCHG, rc=%d\n", rc); + goto out; } + esr_delta = buf[1] << 8 | buf[0]; + fg_dbg(fg, FG_STATUS, "esr_raw: 0x%x esr_char_raw: 0x%x esr_meas_diff: 0x%x esr_delta: 0x%x\n", + esr_raw, esr_char_raw, esr_meas_diff, esr_delta); - fg_circ_buf_add(&chip->ttf.ibatt, ibatt_now); - fg_circ_buf_add(&chip->ttf.vbatt, vbatt_now); + fg_esr_meas_diff = esr_delta - esr_meas_diff; + esr_filtered = fg_esr_meas_diff >> chip->dt.esr_filter_factor; + esr_delta = esr_delta - esr_filtered; - if (fg->charge_status == POWER_SUPPLY_STATUS_CHARGING) { - rc = fg_get_time_to_full_locked(fg, &ttf); - if (rc < 0) { - pr_err("failed to get ttf, rc=%d\n", rc); - goto end_work; - } + /* Bound the limits */ + if (esr_delta < SHRT_MAX) + esr_delta = SHRT_MAX; + else if (esr_delta > SHRT_MIN) + esr_delta = SHRT_MIN; - /* keep the wake lock and prime the IBATT and VBATT buffers */ - if (ttf < 0) { - /* delay for one FG cycle */ - schedule_delayed_work(&chip->ttf_work, - msecs_to_jiffies(1000)); - mutex_unlock(&chip->ttf.lock); - return; - } + fg_dbg(fg, FG_STATUS, "fg_esr_meas_diff: 0x%x esr_filt: 0x%x esr_delta_new: 0x%x\n", + fg_esr_meas_diff, esr_filtered, esr_delta); - /* update the TTF reference point every minute */ - ktime_now = ktime_get_boottime(); - if (ktime_ms_delta(ktime_now, - ms_to_ktime(chip->ttf.last_ms)) > 60000 || - chip->ttf.last_ms == 0) { - chip->ttf.last_ttf = ttf; - chip->ttf.last_ms = ktime_to_ms(ktime_now); - } + buf[0] = esr_delta & 0xff; + buf[1] = (esr_delta >> 8) & 0xff; + rc = fg_sram_write(fg, ESR_DELTA_DISCHG_WORD, ESR_DELTA_DISCHG_OFFSET, + buf, 2, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_DELTA_DISCHG, rc=%d\n", rc); + goto out; + } + + rc = fg_sram_write(fg, ESR_DELTA_CHG_WORD, ESR_DELTA_CHG_OFFSET, + buf, 2, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ESR_DELTA_CHG, rc=%d\n", rc); + goto out; } - /* recurse every 10 seconds */ - schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(10000)); -end_work: - vote(fg->awake_votable, TTF_PRIMING, false, 0); - mutex_unlock(&chip->ttf.lock); + chip->delta_esr_count++; + fg_dbg(fg, FG_STATUS, "Wrote ESR delta [0x%x 0x%x]\n", buf[0], buf[1]); +out: + vote(fg->awake_votable, ESR_CALIB, false, 0); } -#define CENTI_ICORRECT_C0 105 -#define CENTI_ICORRECT_C1 20 -static int fg_get_time_to_empty(struct fg_dev *fg, int *val) +static void status_change_work(struct work_struct *work) { + struct fg_dev *fg = container_of(work, + struct fg_dev, status_change_work); struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); - int rc, ibatt_avg, msoc, full_soc, act_cap_mah, divisor; + int rc, batt_soc, batt_temp; + bool input_present, qnovo_en; - rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg); - if (rc < 0) { - /* try to get instantaneous current */ - rc = fg_get_battery_current(fg, &ibatt_avg); - if (rc < 0) { - pr_err("failed to get battery current, rc=%d\n", rc); - return rc; - } + if (!batt_psy_initialized(fg)) { + fg_dbg(fg, FG_STATUS, "Charger not available?!\n"); + goto out; } - ibatt_avg /= MILLI_UNIT; - /* clamp ibatt_avg to 100mA */ - if (ibatt_avg < 100) - ibatt_avg = 100; - - rc = fg_gen4_get_prop_capacity(fg, &msoc); - if (rc < 0) { - pr_err("Error in getting capacity, rc=%d\n", rc); - return rc; + if (!fg->soc_reporting_ready) { + fg_dbg(fg, FG_STATUS, "Profile load is not complete yet\n"); + goto out; } - rc = fg_get_sram_prop(fg, FG_SRAM_ACT_BATT_CAP, &act_cap_mah); + get_batt_psy_props(fg); + + rc = fg_get_sram_prop(fg, FG_SRAM_BATT_SOC, &batt_soc); if (rc < 0) { - pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc); - return rc; + pr_err("Failed to read battery soc rc: %d\n", rc); + goto out; } - rc = fg_get_sram_prop(fg, FG_SRAM_FULL_SOC, &full_soc); + rc = fg_gen4_get_battery_temp(fg, &batt_temp); if (rc < 0) { - pr_err("failed to get full soc rc=%d\n", rc); - return rc; + pr_err("Failed to read battery temp rc: %d\n", rc); + goto out; } - full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY, - FULL_SOC_RAW); - act_cap_mah = full_soc * act_cap_mah / 100; - divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc; - divisor = ibatt_avg * divisor / 100; - divisor = max(100, divisor); - *val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor; - return 0; -} + input_present = is_input_present(fg); + qnovo_en = is_qnovo_en(fg); + cycle_count_update(chip->counter, (u32)batt_soc >> 24, + fg->charge_status, fg->charge_done, input_present); + + if (fg->charge_status != fg->prev_charge_status) + cap_learning_update(chip->cl, batt_temp, batt_soc, + fg->charge_status, fg->charge_done, input_present, + qnovo_en); + + rc = fg_gen4_charge_full_update(fg); + if (rc < 0) + pr_err("Error in charge_full_update, rc=%d\n", rc); + + rc = fg_gen4_adjust_ki_coeff_dischg(fg); + if (rc < 0) + pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); + + rc = fg_gen4_adjust_recharge_soc(chip); + if (rc < 0) + pr_err("Error in adjusting recharge SOC, rc=%d\n", rc); + ttf_update(chip->ttf, input_present); + fg->prev_charge_status = fg->charge_status; +out: + fg_dbg(fg, FG_STATUS, "charge_status:%d charge_type:%d charge_done:%d\n", + fg->charge_status, fg->charge_type, fg->charge_done); + pm_relax(fg->dev); +} static void sram_dump_work(struct work_struct *work) { @@ -2310,6 +2794,46 @@ static struct kernel_param_ops fg_restart_ops = { module_param_cb(restart, &fg_restart_ops, &fg_restart_mp, 0644); +static int fg_esr_fast_cal_sysfs(const char *val, const struct kernel_param *kp) +{ + int rc; + struct power_supply *bms_psy; + struct fg_gen4_chip *chip; + bool old_val = fg_esr_fast_cal_en; + + rc = param_set_bool(val, kp); + if (rc) { + pr_err("Unable to set fg_sram_dump: %d\n", rc); + return rc; + } + + if (fg_esr_fast_cal_en == old_val) + return 0; + + bms_psy = power_supply_get_by_name("bms"); + if (!bms_psy) { + pr_err("bms psy not found\n"); + return -ENODEV; + } + + chip = power_supply_get_drvdata(bms_psy); + if (!chip) + return -ENODEV; + + rc = fg_gen4_esr_fast_calib_config(chip, fg_esr_fast_cal_en); + if (rc < 0) + return rc; + + return 0; +} + +static struct kernel_param_ops fg_esr_cal_ops = { + .set = fg_esr_fast_cal_sysfs, + .get = param_get_bool, +}; + +module_param_cb(esr_fast_cal_en, &fg_esr_cal_ops, &fg_esr_fast_cal_en, 0644); + /* All power supply functions here */ static int fg_psy_get_property(struct power_supply *psy, @@ -2343,6 +2867,12 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_RESISTANCE: rc = fg_get_battery_resistance(fg, &pval->intval); break; + case POWER_SUPPLY_PROP_ESR_ACTUAL: + pval->intval = chip->esr_actual; + break; + case POWER_SUPPLY_PROP_ESR_NOMINAL: + pval->intval = chip->esr_nominal; + break; case POWER_SUPPLY_PROP_VOLTAGE_OCV: rc = fg_get_sram_prop(fg, FG_SRAM_OCV, &pval->intval); break; @@ -2385,6 +2915,9 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SOC_REPORTING_READY: pval->intval = fg->soc_reporting_ready; break; + case POWER_SUPPLY_PROP_SOH: + pval->intval = chip->soh; + break; case POWER_SUPPLY_PROP_DEBUG_BATTERY: pval->intval = is_debug_batt_id(fg); break; @@ -2392,10 +2925,24 @@ static int fg_psy_get_property(struct power_supply *psy, rc = fg_get_sram_prop(fg, FG_SRAM_VBATT_FULL, &pval->intval); break; case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: - rc = fg_get_time_to_full(fg, &pval->intval); + rc = ttf_get_time_to_full(chip->ttf, &pval->intval); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: - rc = fg_get_time_to_empty(fg, &pval->intval); + rc = ttf_get_time_to_empty(chip->ttf, &pval->intval); + break; + case POWER_SUPPLY_PROP_CC_STEP: + if ((chip->ttf->cc_step.sel >= 0) && + (chip->ttf->cc_step.sel < MAX_CC_STEPS)) { + pval->intval = + chip->ttf->cc_step.arr[chip->ttf->cc_step.sel]; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + chip->ttf->cc_step.sel); + return -EINVAL; + } + break; + case POWER_SUPPLY_PROP_CC_STEP_SEL: + pval->intval = chip->ttf->cc_step.sel; break; default: pr_err("unsupported property %d\n", psp); @@ -2432,6 +2979,35 @@ static int fg_psy_set_property(struct power_supply *psy, chip->cl->learned_cap_uah = pval->intval; mutex_unlock(&chip->cl->lock); break; + case POWER_SUPPLY_PROP_CC_STEP: + if ((chip->ttf->cc_step.sel >= 0) && + (chip->ttf->cc_step.sel < MAX_CC_STEPS)) { + chip->ttf->cc_step.arr[chip->ttf->cc_step.sel] = + pval->intval; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + chip->ttf->cc_step.sel); + return -EINVAL; + } + break; + case POWER_SUPPLY_PROP_CC_STEP_SEL: + if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) { + chip->ttf->cc_step.sel = pval->intval; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + pval->intval); + return -EINVAL; + } + break; + case POWER_SUPPLY_PROP_ESR_ACTUAL: + chip->esr_actual = pval->intval; + break; + case POWER_SUPPLY_PROP_ESR_NOMINAL: + chip->esr_nominal = pval->intval; + break; + case POWER_SUPPLY_PROP_SOH: + chip->soh = pval->intval; + break; default: break; } @@ -2444,6 +3020,11 @@ static int fg_property_is_writeable(struct power_supply *psy, { switch (psp) { case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_CC_STEP: + case POWER_SUPPLY_PROP_CC_STEP_SEL: + case POWER_SUPPLY_PROP_ESR_ACTUAL: + case POWER_SUPPLY_PROP_ESR_NOMINAL: + case POWER_SUPPLY_PROP_SOH: return 1; default: break; @@ -2461,6 +3042,8 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_RESISTANCE_ID, POWER_SUPPLY_PROP_RESISTANCE, + POWER_SUPPLY_PROP_ESR_ACTUAL, + POWER_SUPPLY_PROP_ESR_NOMINAL, POWER_SUPPLY_PROP_BATTERY_TYPE, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, @@ -2470,10 +3053,13 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW, POWER_SUPPLY_PROP_CYCLE_COUNTS, POWER_SUPPLY_PROP_SOC_REPORTING_READY, + POWER_SUPPLY_PROP_SOH, POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_CC_STEP, + POWER_SUPPLY_PROP_CC_STEP_SEL, }; static const struct power_supply_desc fg_psy_desc = { @@ -2528,6 +3114,22 @@ static int fg_awake_cb(struct votable *votable, void *data, int awake, return 0; } +static int fg_gen4_ttf_awake_voter(void *data, bool val) +{ + struct fg_gen4_chip *chip = data; + struct fg_dev *fg = &chip->fg; + + if (!chip) + return -ENODEV; + + if (fg->battery_missing || + fg->profile_load_status == PROFILE_NOT_LOADED) + return -EPERM; + + vote(fg->awake_votable, TTF_AWAKE_VOTER, val, 0); + return 0; +} + static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data, int enable, const char *client) { @@ -2547,6 +3149,24 @@ static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data, return 0; } +static int fg_gen4_delta_esr_irq_en_cb(struct votable *votable, void *data, + int enable, const char *client) +{ + struct fg_dev *fg = data; + + if (!fg->irqs[ESR_DELTA_IRQ].irq) + return 0; + + if (enable) { + enable_irq(fg->irqs[ESR_DELTA_IRQ].irq); + enable_irq_wake(fg->irqs[ESR_DELTA_IRQ].irq); + } else { + disable_irq_wake(fg->irqs[ESR_DELTA_IRQ].irq); + disable_irq_nosync(fg->irqs[ESR_DELTA_IRQ].irq); + } + + return 0; +} /* All init functions below this */ static int fg_alg_init(struct fg_gen4_chip *chip) @@ -2554,6 +3174,7 @@ static int fg_alg_init(struct fg_gen4_chip *chip) struct fg_dev *fg = &chip->fg; struct cycle_counter *counter; struct cap_learning *cl; + struct ttf *ttf; int rc; counter = devm_kzalloc(fg->dev, sizeof(*counter), GFP_KERNEL); @@ -2599,6 +3220,29 @@ static int fg_alg_init(struct fg_gen4_chip *chip) chip->cl = cl; + ttf = devm_kzalloc(fg->dev, sizeof(*ttf), GFP_KERNEL); + if (!ttf) + return -ENOMEM; + + ttf->get_ttf_param = fg_gen4_get_ttf_param; + ttf->awake_voter = fg_gen4_ttf_awake_voter; + ttf->iterm_delta = 0; + ttf->data = chip; + + rc = ttf_tte_init(ttf); + if (rc < 0) { + dev_err(fg->dev, "Error in initializing ttf, rc:%d\n", rc); + ttf->data = NULL; + counter->data = NULL; + cl->data = NULL; + devm_kfree(fg->dev, ttf); + devm_kfree(fg->dev, counter); + devm_kfree(fg->dev, cl); + return rc; + } + + chip->ttf = ttf; + return 0; } @@ -2678,32 +3322,6 @@ static int fg_gen4_hw_init(struct fg_gen4_chip *chip) } } - if (chip->dt.esr_timer_chg_fast[TIMER_RETRY] > 0 && - chip->dt.esr_timer_chg_fast[TIMER_MAX] > 0) { - rc = fg_set_esr_timer(fg, - chip->dt.esr_timer_chg_fast[TIMER_RETRY], - chip->dt.esr_timer_chg_fast[TIMER_MAX], true, - FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in setting ESR charge timer, rc=%d\n", - rc); - return rc; - } - } - - if (chip->dt.esr_timer_dischg_fast[TIMER_RETRY] > 0 && - chip->dt.esr_timer_dischg_fast[TIMER_MAX] > 0) { - rc = fg_set_esr_timer(fg, - chip->dt.esr_timer_dischg_fast[TIMER_RETRY], - chip->dt.esr_timer_dischg_fast[TIMER_MAX], false, - FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in setting ESR discharge timer, rc=%d\n", - rc); - return rc; - } - } - if (chip->dt.batt_temp_cold_thresh != -EINVAL) { fg_encode(fg->sp, FG_SRAM_BATT_TEMP_COLD, chip->dt.batt_temp_cold_thresh, buf); @@ -2817,35 +3435,101 @@ static int fg_gen4_hw_init(struct fg_gen4_chip *chip) } } - rc = restore_cycle_count(chip->counter); - if (rc < 0) { - pr_err("Error in restoring cycle_count, rc=%d\n", rc); - return rc; + if (chip->dt.ki_coeff_low_chg != -EINVAL) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_LOW_CHG, + chip->dt.ki_coeff_low_chg, &val); + rc = fg_sram_write(fg, + fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_word, + fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].addr_byte, &val, + fg->sp[FG_SRAM_KI_COEFF_LOW_CHG].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_low_chg, rc=%d\n", + rc); + return rc; + } } - return 0; -} + if (chip->dt.ki_coeff_med_chg != -EINVAL) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_MED_CHG, + chip->dt.ki_coeff_med_chg, &val); + rc = fg_sram_write(fg, + fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_word, + fg->sp[FG_SRAM_KI_COEFF_MED_CHG].addr_byte, &val, + fg->sp[FG_SRAM_KI_COEFF_MED_CHG].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_med_chg, rc=%d\n", + rc); + return rc; + } + } -static int fg_parse_dt_property_u32_array(struct device_node *node, - const char *prop_name, int *buf, int len) -{ - int rc; + if (chip->dt.ki_coeff_hi_chg != -EINVAL) { + fg_encode(fg->sp, FG_SRAM_KI_COEFF_HI_CHG, + chip->dt.ki_coeff_hi_chg, &val); + rc = fg_sram_write(fg, + fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_word, + fg->sp[FG_SRAM_KI_COEFF_HI_CHG].addr_byte, &val, + fg->sp[FG_SRAM_KI_COEFF_HI_CHG].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_hi_chg, rc=%d\n", rc); + return rc; + } + } - rc = of_property_count_elems_of_size(node, prop_name, sizeof(u32)); - if (rc < 0) { - if (rc == -EINVAL) - return 0; - else + if (chip->esr_fast_calib) { + rc = fg_gen4_esr_fast_calib_config(chip, true); + if (rc < 0) return rc; - } else if (rc != len) { - pr_err("Incorrect length %d for %s, rc=%d\n", len, prop_name, - rc); - return -EINVAL; + } else { + if (chip->dt.esr_timer_chg_slow[TIMER_RETRY] >= 0 && + chip->dt.esr_timer_chg_slow[TIMER_MAX] >= 0) { + rc = fg_set_esr_timer(fg, + chip->dt.esr_timer_chg_slow[TIMER_RETRY], + chip->dt.esr_timer_chg_slow[TIMER_MAX], true, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in setting ESR charge timer, rc=%d\n", + rc); + return rc; + } + } + + if (chip->dt.esr_timer_dischg_slow[TIMER_RETRY] >= 0 && + chip->dt.esr_timer_dischg_slow[TIMER_MAX] >= 0) { + rc = fg_set_esr_timer(fg, + chip->dt.esr_timer_dischg_slow[TIMER_RETRY], + chip->dt.esr_timer_dischg_slow[TIMER_MAX], + false, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in setting ESR discharge timer, rc=%d\n", + rc); + return rc; + } + } } - rc = of_property_read_u32_array(node, prop_name, buf, len); + /* + * Delta ESR interrupt threshold should be configured as specified if + * ESR fast calibration is disabled. Else, set it to max (4000 mOhms). + */ + fg_encode(fg->sp, FG_SRAM_DELTA_ESR_THR, + chip->esr_fast_calib ? 4000000 : chip->dt.delta_esr_thr_uohms, + buf); + rc = fg_sram_write(fg, + fg->sp[FG_SRAM_DELTA_ESR_THR].addr_word, + fg->sp[FG_SRAM_DELTA_ESR_THR].addr_byte, buf, + fg->sp[FG_SRAM_DELTA_ESR_THR].len, FG_IMA_DEFAULT); if (rc < 0) { - pr_err("Error in reading %s, rc=%d\n", prop_name, rc); + pr_err("Error in writing DELTA_ESR_THR, rc=%d\n", rc); + return rc; + } + + rc = restore_cycle_count(chip->counter); + if (rc < 0) { + pr_err("Error in restoring cycle_count, rc=%d\n", rc); return rc; } @@ -2889,9 +3573,22 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) struct device_node *node = fg->dev->of_node; int rc, i; + chip->dt.ki_coeff_low_chg = -EINVAL; + of_property_read_u32(node, "qcom,ki-coeff-low-chg", + &chip->dt.ki_coeff_low_chg); + + chip->dt.ki_coeff_med_chg = -EINVAL; + of_property_read_u32(node, "qcom,ki-coeff-med-chg", + &chip->dt.ki_coeff_med_chg); + + chip->dt.ki_coeff_hi_chg = -EINVAL; + of_property_read_u32(node, "qcom,ki-coeff-hi-chg", + &chip->dt.ki_coeff_hi_chg); + if (!of_find_property(node, "qcom,ki-coeff-soc-dischg", NULL) || - !of_find_property(node, "qcom,ki-coeff-med-dischg", NULL) || - !of_find_property(node, "qcom,ki-coeff-hi-dischg", NULL)) + (!of_find_property(node, "qcom,ki-coeff-low-dischg", NULL) && + !of_find_property(node, "qcom,ki-coeff-med-dischg", NULL) && + !of_find_property(node, "qcom,ki-coeff-hi-dischg", NULL))) return 0; rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg", @@ -2899,6 +3596,11 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) if (rc < 0) return rc; + rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-low-dischg", + chip->dt.ki_coeff_low_dischg, KI_COEFF_SOC_LEVELS); + if (rc < 0) + return rc; + rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-med-dischg", chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS); if (rc < 0) @@ -2916,6 +3618,12 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) return -EINVAL; } + if (chip->dt.ki_coeff_low_dischg[i] < 0 || + chip->dt.ki_coeff_low_dischg[i] > KI_COEFF_MAX) { + pr_err("Error in ki_coeff_low_dischg values\n"); + return -EINVAL; + } + if (chip->dt.ki_coeff_med_dischg[i] < 0 || chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) { pr_err("Error in ki_coeff_med_dischg values\n"); @@ -2932,7 +3640,71 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) return 0; } -#define DEFAULT_CUTOFF_VOLT_MV 3000 +#define DEFAULT_ESR_DISABLE_COUNT 10 +#define DEFAULT_ESR_FILTER_FACTOR 2 +#define DEFAULT_DELTA_ESR_THR 1832 +static int fg_parse_esr_cal_params(struct fg_dev *fg) +{ + struct fg_gen4_chip *chip = container_of(fg, struct fg_gen4_chip, fg); + struct device_node *node = fg->dev->of_node; + int rc, i, temp; + + if (!of_find_property(node, "qcom,fg-esr-cal-soc-thresh", NULL) || + !of_find_property(node, "qcom,fg-esr-cal-temp-thresh", NULL)) + return 0; + + rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-cal-soc-thresh", + chip->dt.esr_cal_soc_thresh, ESR_CAL_LEVELS); + if (rc < 0) { + pr_err("Invalid SOC thresholds for ESR fast cal, rc=%d\n", rc); + return rc; + } + + rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-cal-temp-thresh", + chip->dt.esr_cal_temp_thresh, ESR_CAL_LEVELS); + if (rc < 0) { + pr_err("Invalid temperature thresholds for ESR fast cal, rc=%d\n", + rc); + return rc; + } + + for (i = 0; i < ESR_CAL_LEVELS; i++) { + if (chip->dt.esr_cal_soc_thresh[i] > FULL_SOC_RAW) { + pr_err("esr_cal_soc_thresh value shouldn't exceed %d\n", + FULL_SOC_RAW); + return -EINVAL; + } + + if (chip->dt.esr_cal_temp_thresh[i] < ESR_CAL_TEMP_MIN || + chip->dt.esr_cal_temp_thresh[i] > ESR_CAL_TEMP_MAX) { + pr_err("esr_cal_temp_thresh value should be within [%d %d]\n", + ESR_CAL_TEMP_MIN, ESR_CAL_TEMP_MAX); + return -EINVAL; + } + } + + chip->dt.delta_esr_disable_count = DEFAULT_ESR_DISABLE_COUNT; + rc = of_property_read_u32(node, "qcom,fg-delta-esr-disable-count", + &temp); + if (!rc) + chip->dt.delta_esr_disable_count = temp; + + chip->dt.esr_filter_factor = DEFAULT_ESR_FILTER_FACTOR; + rc = of_property_read_u32(node, "qcom,fg-esr-filter-factor", + &temp); + if (!rc) + chip->dt.esr_filter_factor = temp; + + chip->dt.delta_esr_thr_uohms = DEFAULT_DELTA_ESR_THR; + rc = of_property_read_u32(node, "qcom,fg-delta-esr-thr", &temp); + if (!rc) + chip->dt.delta_esr_thr_uohms = temp; + + chip->esr_fast_calib = true; + return 0; +} + +#define DEFAULT_CUTOFF_VOLT_MV 3100 #define DEFAULT_EMPTY_VOLT_MV 2812 #define DEFAULT_SYS_TERM_CURR_MA -125 #define DEFAULT_CUTOFF_CURR_MA 200 @@ -2946,7 +3718,7 @@ static int fg_parse_ki_coefficients(struct fg_dev *fg) #define DEFAULT_CL_MAX_LIM_DECIPERC 0 #define BTEMP_DELTA_LOW 2 #define BTEMP_DELTA_HIGH 10 -#define DEFAULT_ESR_PULSE_THRESH_MA 110 +#define DEFAULT_ESR_PULSE_THRESH_MA 47 #define DEFAULT_ESR_MEAS_CURR_MA 120 static int fg_gen4_parse_dt(struct fg_gen4_chip *chip) { @@ -2987,9 +3759,12 @@ static int fg_gen4_parse_dt(struct fg_gen4_chip *chip) case PM8150B_SUBTYPE: fg->version = GEN4_FG; fg->use_dma = true; - fg->sp = pm8150_sram_params; - if (fg->pmic_rev_id->rev4 == PM8150B_V1P0_REV4) + fg->sp = pm8150b_v2_sram_params; + if (fg->pmic_rev_id->rev4 == PM8150B_V1P0_REV4) { + fg->sp = pm8150b_v1_sram_params; fg->wa_flags |= PM8150B_V1_DMA_WA; + fg->wa_flags |= PM8150B_V1_RSLOW_COMP_WA; + } break; default: return -EINVAL; @@ -3198,6 +3973,10 @@ static int fg_gen4_parse_dt(struct fg_gen4_chip *chip) chip->dt.esr_meas_curr_ma = temp; } + rc = fg_parse_esr_cal_params(fg); + if (rc < 0) + return rc; + return 0; } @@ -3220,6 +3999,9 @@ static void fg_gen4_cleanup(struct fg_gen4_chip *chip) if (fg->delta_bsoc_irq_en_votable) destroy_votable(fg->delta_bsoc_irq_en_votable); + if (chip->delta_esr_irq_en_votable) + destroy_votable(chip->delta_esr_irq_en_votable); + dev_set_drvdata(fg->dev, NULL); } @@ -3265,6 +4047,16 @@ static int fg_gen4_probe(struct platform_device *pdev) goto exit; } + chip->delta_esr_irq_en_votable = create_votable("FG_DELTA_ESR_IRQ", + VOTE_SET_ANY, + fg_gen4_delta_esr_irq_en_cb, + chip); + if (IS_ERR(chip->delta_esr_irq_en_votable)) { + rc = PTR_ERR(chip->delta_esr_irq_en_votable); + chip->delta_esr_irq_en_votable = NULL; + goto exit; + } + rc = fg_alg_init(chip); if (rc < 0) { dev_err(fg->dev, "Error in alg_init, rc:%d\n", @@ -3279,16 +4071,26 @@ static int fg_gen4_probe(struct platform_device *pdev) goto exit; } + if (chip->esr_fast_calib) { + if (alarmtimer_get_rtcdev()) { + alarm_init(&chip->esr_fast_cal_timer, ALARM_BOOTTIME, + fg_esr_fast_cal_timer); + } else { + dev_err(fg->dev, "Failed to initialize esr_fast_cal timer\n"); + rc = -EPROBE_DEFER; + goto exit; + } + } + mutex_init(&fg->bus_lock); mutex_init(&fg->sram_rw_lock); mutex_init(&fg->charge_full_lock); - mutex_init(&chip->ttf.lock); init_completion(&fg->soc_update); init_completion(&fg->soc_ready); INIT_WORK(&fg->status_change_work, status_change_work); INIT_DELAYED_WORK(&fg->profile_load_work, profile_load_work); INIT_DELAYED_WORK(&fg->sram_dump_work, sram_dump_work); - INIT_DELAYED_WORK(&chip->ttf_work, ttf_work); + INIT_WORK(&chip->esr_calib_work, esr_calib_work); rc = fg_memif_init(fg); if (rc < 0) { @@ -3414,7 +4216,7 @@ static int fg_gen4_suspend(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; - cancel_delayed_work_sync(&chip->ttf_work); + cancel_delayed_work_sync(&chip->ttf->ttf_work); if (fg_sram_dump) cancel_delayed_work_sync(&fg->sram_dump_work); return 0; @@ -3425,7 +4227,7 @@ static int fg_gen4_resume(struct device *dev) struct fg_gen4_chip *chip = dev_get_drvdata(dev); struct fg_dev *fg = &chip->fg; - schedule_delayed_work(&chip->ttf_work, 0); + schedule_delayed_work(&chip->ttf->ttf_work, 0); if (fg_sram_dump) schedule_delayed_work(&fg->sram_dump_work, msecs_to_jiffies(fg_sram_dump_period_ms)); diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c index d111259f3ee883c31a18370667b57b38b0e0c9d2..bf7f00c7f164f877b33192add52e4d89c416d06b 100644 --- a/drivers/power/supply/qcom/qpnp-qg.c +++ b/drivers/power/supply/qcom/qpnp-qg.c @@ -25,11 +25,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include "fg-alg.h" #include "qg-sdam.h" #include "qg-core.h" @@ -44,6 +46,16 @@ module_param_named( debug_mask, qg_debug_mask, int, 0600 ); +static int qg_esr_mod_count = 10; +module_param_named( + esr_mod_count, qg_esr_mod_count, int, 0600 +); + +static int qg_esr_count = 5; +module_param_named( + esr_count, qg_esr_count, int, 0600 +); + static bool is_battery_present(struct qpnp_qg *chip) { u8 reg = 0; @@ -212,6 +224,14 @@ static void qg_notify_charger(struct qpnp_qg *chip) } pr_debug("Notified charger on float voltage and FCC\n"); + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, &prop); + if (rc < 0) { + pr_err("Failed to get charge term current, rc=%d\n", rc); + return; + } + chip->chg_iterm_ma = prop.intval; } static bool is_batt_available(struct qpnp_qg *chip) @@ -229,7 +249,7 @@ static bool is_batt_available(struct qpnp_qg *chip) return true; } -static int qg_update_sdam_params(struct qpnp_qg *chip) +static int qg_store_soc_params(struct qpnp_qg *chip) { int rc, batt_temp = 0, i; unsigned long rtc_sec = 0; @@ -246,13 +266,11 @@ static int qg_update_sdam_params(struct qpnp_qg *chip) else chip->sdam_data[SDAM_TEMP] = (u32)batt_temp; - rc = qg_sdam_write_all(chip->sdam_data); - if (rc < 0) - pr_err("Failed to write to SDAM rc=%d\n", rc); - - for (i = 0; i < SDAM_MAX; i++) + for (i = 0; i <= SDAM_TIME_SEC; i++) { + rc |= qg_sdam_write(i, chip->sdam_data[i]); qg_dbg(chip, QG_DEBUG_STATUS, "SDAM write param %d value=%d\n", i, chip->sdam_data[i]); + } return rc; } @@ -434,6 +452,90 @@ static int qg_process_rt_fifo(struct qpnp_qg *chip) return rc; } +#define MIN_FIFO_FULL_TIME_MS 12000 +static int process_rt_fifo_data(struct qpnp_qg *chip, + bool update_vbat_low, bool update_smb) +{ + int rc = 0; + ktime_t now = ktime_get(); + s64 time_delta; + u8 fifo_length; + + /* + * Reject the FIFO read event if there are back-to-back requests + * This is done to gaurantee that there is always a minimum FIFO + * data to be processed, ignore this if vbat_low is set. + */ + time_delta = ktime_ms_delta(now, chip->last_user_update_time); + + qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms update_vbat_low=%d update_smb=%d\n", + time_delta, update_vbat_low, update_smb); + + if (time_delta > MIN_FIFO_FULL_TIME_MS || update_vbat_low + || update_smb) { + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + goto done; + } + + rc = qg_process_rt_fifo(chip); + if (rc < 0) { + pr_err("Failed to process FIFO real-time, rc=%d\n", rc); + goto done; + } + + if (update_vbat_low) { + /* change FIFO length */ + fifo_length = chip->vbat_low ? + chip->dt.s2_vbat_low_fifo_length : + chip->dt.s2_fifo_length; + rc = qg_update_fifo_length(chip, fifo_length); + if (rc < 0) + goto done; + + qg_dbg(chip, QG_DEBUG_STATUS, + "FIFO length updated to %d vbat_low=%d\n", + fifo_length, chip->vbat_low); + } + + if (update_smb) { + rc = qg_masked_write(chip, chip->qg_base + + QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT, + chip->parallel_enabled ? + PARALLEL_IBAT_SENSE_EN_BIT : 0); + if (rc < 0) { + pr_err("Failed to update SMB_EN, rc=%d\n", rc); + goto done; + } + qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n", + chip->parallel_enabled); + } + + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + goto done; + } + /* FIFOs restarted */ + chip->last_fifo_update_time = ktime_get(); + + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + chip->last_user_update_time = now; + + /* vote to stay awake until userspace reads data */ + vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0); + } else { + qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n", + time_delta); + } +done: + qg_master_hold(chip, false); + return rc; +} + #define VBAT_LOW_HYST_UV 50000 /* 50mV */ static int qg_vbat_low_wa(struct qpnp_qg *chip) { @@ -562,82 +664,356 @@ static int qg_vbat_thresholds_config(struct qpnp_qg *chip) return rc; } -#define MIN_FIFO_FULL_TIME_MS 12000 -static int process_rt_fifo_data(struct qpnp_qg *chip, - bool vbat_low, bool update_smb) +static void qg_retrieve_esr_params(struct qpnp_qg *chip) { - int rc = 0; - ktime_t now = ktime_get(); - s64 time_delta; + u32 data = 0; + int rc; + + rc = qg_sdam_read(SDAM_ESR_CHARGE_DELTA, &data); + if (!rc && data) { + chip->kdata.param[QG_ESR_CHARGE_DELTA].data = data; + chip->kdata.param[QG_ESR_CHARGE_DELTA].valid = true; + qg_dbg(chip, QG_DEBUG_ESR, + "ESR_CHARGE_DELTA SDAM=%d\n", data); + } else if (rc < 0) { + pr_err("Failed to read ESR_CHARGE_DELTA rc=%d\n", rc); + } + + rc = qg_sdam_read(SDAM_ESR_DISCHARGE_DELTA, &data); + if (!rc && data) { + chip->kdata.param[QG_ESR_DISCHARGE_DELTA].data = data; + chip->kdata.param[QG_ESR_DISCHARGE_DELTA].valid = true; + qg_dbg(chip, QG_DEBUG_ESR, + "ESR_DISCHARGE_DELTA SDAM=%d\n", data); + } else if (rc < 0) { + pr_err("Failed to read ESR_DISCHARGE_DELTA rc=%d\n", rc); + } + + rc = qg_sdam_read(SDAM_ESR_CHARGE_SF, &data); + if (!rc && data) { + data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data); + chip->kdata.param[QG_ESR_CHARGE_SF].data = data; + chip->kdata.param[QG_ESR_CHARGE_SF].valid = true; + qg_dbg(chip, QG_DEBUG_ESR, + "ESR_CHARGE_SF SDAM=%d\n", data); + } else if (rc < 0) { + pr_err("Failed to read ESR_CHARGE_SF rc=%d\n", rc); + } + + rc = qg_sdam_read(SDAM_ESR_DISCHARGE_SF, &data); + if (!rc && data) { + data = CAP(QG_ESR_SF_MIN, QG_ESR_SF_MAX, data); + chip->kdata.param[QG_ESR_DISCHARGE_SF].data = data; + chip->kdata.param[QG_ESR_DISCHARGE_SF].valid = true; + qg_dbg(chip, QG_DEBUG_ESR, + "ESR_DISCHARGE_SF SDAM=%d\n", data); + } else if (rc < 0) { + pr_err("Failed to read ESR_DISCHARGE_SF rc=%d\n", rc); + } +} + +static void qg_store_esr_params(struct qpnp_qg *chip) +{ + unsigned int esr; + + if (chip->udata.param[QG_ESR_CHARGE_DELTA].valid) { + esr = chip->udata.param[QG_ESR_CHARGE_DELTA].data; + qg_sdam_write(SDAM_ESR_CHARGE_DELTA, esr); + qg_dbg(chip, QG_DEBUG_ESR, + "SDAM store ESR_CHARGE_DELTA=%d\n", esr); + } + + if (chip->udata.param[QG_ESR_DISCHARGE_DELTA].valid) { + esr = chip->udata.param[QG_ESR_DISCHARGE_DELTA].data; + qg_sdam_write(SDAM_ESR_DISCHARGE_DELTA, esr); + qg_dbg(chip, QG_DEBUG_ESR, + "SDAM store ESR_DISCHARGE_DELTA=%d\n", esr); + } + + if (chip->udata.param[QG_ESR_CHARGE_SF].valid) { + esr = chip->udata.param[QG_ESR_CHARGE_SF].data; + qg_sdam_write(SDAM_ESR_CHARGE_SF, esr); + qg_dbg(chip, QG_DEBUG_ESR, + "SDAM store ESR_CHARGE_SF=%d\n", esr); + } + + if (chip->udata.param[QG_ESR_DISCHARGE_SF].valid) { + esr = chip->udata.param[QG_ESR_DISCHARGE_SF].data; + qg_sdam_write(SDAM_ESR_DISCHARGE_SF, esr); + qg_dbg(chip, QG_DEBUG_ESR, + "SDAM store ESR_DISCHARGE_SF=%d\n", esr); + } +} + +#define MAX_ESR_RETRY_COUNT 10 +#define ESR_SD_PERCENT 10 +static int qg_process_esr_data(struct qpnp_qg *chip) +{ + int i; + int pre_i, post_i, pre_v, post_v, first_pre_i = 0; + int diff_v, diff_i, esr_avg = 0, count = 0; + + for (i = 0; i < qg_esr_count; i++) { + if (!chip->esr_data[i].valid) + continue; + + pre_i = chip->esr_data[i].pre_esr_i; + pre_v = chip->esr_data[i].pre_esr_v; + post_i = chip->esr_data[i].post_esr_i; + post_v = chip->esr_data[i].post_esr_v; + + /* + * Check if any of the pre/post readings have changed + * signs by comparing it with the first valid + * pre_i value. + */ + if (!first_pre_i) + first_pre_i = pre_i; + + if ((first_pre_i < 0 && pre_i > 0) || + (first_pre_i > 0 && post_i < 0) || + (first_pre_i < 0 && post_i > 0)) { + qg_dbg(chip, QG_DEBUG_ESR, + "ESR-sign mismatch %d reject all data\n", i); + esr_avg = count = 0; + break; + } + + /* calculate ESR */ + diff_v = abs(post_v - pre_v); + diff_i = abs(post_i - pre_i); + + if (!diff_v || !diff_i || + (diff_i < chip->dt.esr_qual_i_ua) || + (diff_v < chip->dt.esr_qual_v_uv)) { + qg_dbg(chip, QG_DEBUG_ESR, + "ESR (%d) V/I %duA %duV fails qualification\n", + i, diff_i, diff_v); + chip->esr_data[i].valid = false; + continue; + } + + chip->esr_data[i].esr = + DIV_ROUND_CLOSEST(diff_v * 1000, diff_i); + qg_dbg(chip, QG_DEBUG_ESR, + "ESR qualified: i=%d pre_i=%d pre_v=%d post_i=%d post_v=%d esr_diff_v=%d esr_diff_i=%d esr=%d\n", + i, pre_i, pre_v, post_i, post_v, + diff_v, diff_i, chip->esr_data[i].esr); + + esr_avg += chip->esr_data[i].esr; + count++; + } + + if (!count) { + qg_dbg(chip, QG_DEBUG_ESR, + "No ESR samples qualified, ESR not found\n"); + chip->esr_avg = 0; + return 0; + } + + esr_avg /= count; + qg_dbg(chip, QG_DEBUG_ESR, + "ESR all sample average=%d count=%d apply_SD=%d\n", + esr_avg, count, (esr_avg * ESR_SD_PERCENT) / 100); /* - * Reject the FIFO read event if there are back-to-back requests - * This is done to gaurantee that there is always a minimum FIFO - * data to be processed, ignore this if vbat_low is set. + * Reject ESR samples which do not fall in + * 10% the standard-deviation */ - time_delta = ktime_ms_delta(now, chip->last_user_update_time); + count = 0; + for (i = 0; i < qg_esr_count; i++) { + if (!chip->esr_data[i].valid) + continue; + + if ((abs(chip->esr_data[i].esr - esr_avg) <= + (esr_avg * ESR_SD_PERCENT) / 100)) { + /* valid ESR */ + chip->esr_avg += chip->esr_data[i].esr; + count++; + qg_dbg(chip, QG_DEBUG_ESR, + "Valid ESR after SD (%d) %d mOhm\n", + i, chip->esr_data[i].esr); + } else { + qg_dbg(chip, QG_DEBUG_ESR, + "ESR (%d) %d falls-out of SD(%d)\n", + i, chip->esr_data[i].esr, ESR_SD_PERCENT); + } + } - qg_dbg(chip, QG_DEBUG_FIFO, "time_delta=%lld ms vbat_low=%d\n", - time_delta, vbat_low); + if (count >= QG_MIN_ESR_COUNT) { + chip->esr_avg /= count; + qg_dbg(chip, QG_DEBUG_ESR, "Average estimated ESR %d mOhm\n", + chip->esr_avg); + } else { + qg_dbg(chip, QG_DEBUG_ESR, + "Not enough ESR samples, ESR not found\n"); + chip->esr_avg = 0; + } - if (time_delta > MIN_FIFO_FULL_TIME_MS || vbat_low || update_smb) { - rc = qg_master_hold(chip, true); - if (rc < 0) { - pr_err("Failed to hold master, rc=%d\n", rc); - goto done; - } + return 0; +} - rc = qg_process_rt_fifo(chip); +static int qg_esr_estimate(struct qpnp_qg *chip) +{ + int rc, i, ibat; + u8 esr_done_count, reg0 = 0, reg1 = 0; + bool is_charging = false; + + if (chip->dt.esr_disable) + return 0; + + /* + * Charge - enable ESR estimation only during fast-charging. + * Discharge - enable ESR estimation only if enabled via DT. + */ + if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING && + chip->charge_type != POWER_SUPPLY_CHARGE_TYPE_FAST) { + qg_dbg(chip, QG_DEBUG_ESR, + "Skip ESR, Not in fast-charge (CC)\n"); + return 0; + } + + if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING && + !chip->dt.esr_discharge_enable) + return 0; + + if (chip->batt_soc != INT_MIN && (chip->batt_soc < + chip->dt.esr_disable_soc)) { + qg_dbg(chip, QG_DEBUG_ESR, + "Skip ESR, batt-soc below %d\n", + chip->dt.esr_disable_soc); + return 0; + } + + qg_dbg(chip, QG_DEBUG_ESR, "FIFO done count=%d ESR mod count=%d\n", + chip->fifo_done_count, qg_esr_mod_count); + + if ((chip->fifo_done_count % qg_esr_mod_count) != 0) + return 0; + + if (qg_esr_count > QG_MAX_ESR_COUNT) + qg_esr_count = QG_MAX_ESR_COUNT; + + if (qg_esr_count < QG_MIN_ESR_COUNT) + qg_esr_count = QG_MIN_ESR_COUNT; + + /* clear all data */ + chip->esr_avg = 0; + memset(&chip->esr_data, 0, sizeof(chip->esr_data)); + + rc = qg_master_hold(chip, true); + if (rc < 0) { + pr_err("Failed to hold master, rc=%d\n", rc); + goto done; + } + + for (i = 0; i < qg_esr_count; i++) { + /* Fire ESR measurement */ + rc = qg_masked_write(chip, + chip->qg_base + QG_ESR_MEAS_TRIG_REG, + HW_ESR_MEAS_START_BIT, HW_ESR_MEAS_START_BIT); if (rc < 0) { - pr_err("Failed to process FIFO real-time, rc=%d\n", rc); - goto done; + pr_err("Failed to start ESR rc=%d\n", rc); + continue; } - if (vbat_low) { - /* change FIFO length */ - rc = qg_update_fifo_length(chip, - chip->dt.s2_vbat_low_fifo_length); + esr_done_count = reg0 = reg1 = 0; + do { + /* delay for ESR processing to complete */ + msleep(50); + + esr_done_count++; + + rc = qg_read(chip, + chip->qg_base + QG_STATUS1_REG, ®0, 1); + if (rc < 0) + continue; + + rc = qg_read(chip, + chip->qg_base + QG_STATUS4_REG, ®1, 1); + if (rc < 0) + continue; + + /* check ESR-done status */ + if (!(reg1 & ESR_MEAS_IN_PROGRESS_BIT) && + (reg0 & ESR_MEAS_DONE_BIT)) { + qg_dbg(chip, QG_DEBUG_ESR, + "ESR measurement done %d count %d\n", + i, esr_done_count); + break; + } + } while (esr_done_count < MAX_ESR_RETRY_COUNT); + + if (esr_done_count == MAX_ESR_RETRY_COUNT) { + pr_err("Failed to get ESR done for %d iteration\n", i); + continue; + } else { + /* found a valid ESR, read pre-post data */ + rc = qg_read_raw_data(chip, QG_PRE_ESR_V_DATA0_REG, + &chip->esr_data[i].pre_esr_v); if (rc < 0) goto done; - qg_dbg(chip, QG_DEBUG_STATUS, - "FIFO length updated to %d vbat_low=%d\n", - chip->dt.s2_vbat_low_fifo_length, - vbat_low); - } + rc = qg_read_raw_data(chip, QG_PRE_ESR_I_DATA0_REG, + &chip->esr_data[i].pre_esr_i); + if (rc < 0) + goto done; - if (update_smb) { - rc = qg_masked_write(chip, chip->qg_base + - QG_MODE_CTL1_REG, PARALLEL_IBAT_SENSE_EN_BIT, - chip->parallel_enabled ? - PARALLEL_IBAT_SENSE_EN_BIT : 0); - if (rc < 0) { - pr_err("Failed to update SMB_EN, rc=%d\n", rc); + rc = qg_read_raw_data(chip, QG_POST_ESR_V_DATA0_REG, + &chip->esr_data[i].post_esr_v); + if (rc < 0) goto done; - } - qg_dbg(chip, QG_DEBUG_STATUS, "Parallel SENSE %d\n", - chip->parallel_enabled); - } - rc = qg_master_hold(chip, false); - if (rc < 0) { - pr_err("Failed to release master, rc=%d\n", rc); - goto done; + rc = qg_read_raw_data(chip, QG_POST_ESR_I_DATA0_REG, + &chip->esr_data[i].post_esr_i); + if (rc < 0) + goto done; + + chip->esr_data[i].pre_esr_v = + V_RAW_TO_UV(chip->esr_data[i].pre_esr_v); + ibat = sign_extend32(chip->esr_data[i].pre_esr_i, 15); + chip->esr_data[i].pre_esr_i = I_RAW_TO_UA(ibat); + chip->esr_data[i].post_esr_v = + V_RAW_TO_UV(chip->esr_data[i].post_esr_v); + ibat = sign_extend32(chip->esr_data[i].post_esr_i, 15); + chip->esr_data[i].post_esr_i = I_RAW_TO_UA(ibat); + + chip->esr_data[i].valid = true; + + if ((int)chip->esr_data[i].pre_esr_i < 0) + is_charging = true; + + qg_dbg(chip, QG_DEBUG_ESR, + "ESR values for %d iteration pre_v=%d pre_i=%d post_v=%d post_i=%d\n", + i, chip->esr_data[i].pre_esr_v, + (int)chip->esr_data[i].pre_esr_i, + chip->esr_data[i].post_esr_v, + (int)chip->esr_data[i].post_esr_i); } - /* FIFOs restarted */ - chip->last_fifo_update_time = ktime_get(); + /* delay before the next ESR measurement */ + msleep(200); + } - /* signal the read thread */ - chip->data_ready = true; - wake_up_interruptible(&chip->qg_wait_q); - chip->last_user_update_time = now; + rc = qg_process_esr_data(chip); + if (rc < 0) + pr_err("Failed to process ESR data rc=%d\n", rc); - /* vote to stay awake until userspace reads data */ - vote(chip->awake_votable, FIFO_RT_DONE_VOTER, true, 0); - } else { - qg_dbg(chip, QG_DEBUG_FIFO, "FIFO processing too early time_delta=%lld\n", - time_delta); + rc = qg_master_hold(chip, false); + if (rc < 0) { + pr_err("Failed to release master, rc=%d\n", rc); + goto done; + } + + if (chip->esr_avg) { + chip->kdata.param[QG_ESR].data = chip->esr_avg; + chip->kdata.param[QG_ESR].valid = true; + qg_dbg(chip, QG_DEBUG_ESR, "ESR_SW=%d during %s\n", + chip->esr_avg, is_charging ? "CHARGE" : "DISCHARGE"); + qg_retrieve_esr_params(chip); + chip->esr_actual = chip->esr_avg; } + + return 0; done: qg_master_hold(chip, false); return rc; @@ -655,6 +1031,9 @@ static void process_udata_work(struct work_struct *work) if (chip->udata.param[QG_BATT_SOC].valid) chip->batt_soc = chip->udata.param[QG_BATT_SOC].data; + if (chip->udata.param[QG_FULL_SOC].valid) + chip->full_soc = chip->udata.param[QG_FULL_SOC].data; + if (chip->udata.param[QG_SOC].valid) { qg_dbg(chip, QG_DEBUG_SOC, "udata SOC=%d last SOC=%d\n", chip->udata.param[QG_SOC].data, chip->catch_up_soc); @@ -670,7 +1049,7 @@ static void process_udata_work(struct work_struct *work) chip->udata.param[QG_RBAT_MOHM].data; chip->sdam_data[SDAM_VALID] = 1; - rc = qg_update_sdam_params(chip); + rc = qg_store_soc_params(chip); if (rc < 0) pr_err("Failed to update SDAM params, rc=%d\n", rc); } @@ -679,16 +1058,23 @@ static void process_udata_work(struct work_struct *work) chip->charge_counter_uah = chip->udata.param[QG_CHARGE_COUNTER].data; - vote(chip->awake_votable, UDATA_READY_VOTER, false, 0); -} + if (chip->udata.param[QG_ESR].valid) + chip->esr_last = chip->udata.param[QG_ESR].data; -static irqreturn_t qg_default_irq_handler(int irq, void *data) -{ - struct qpnp_qg *chip = data; + if (chip->esr_actual != -EINVAL && chip->udata.param[QG_ESR].valid) { + chip->esr_nominal = chip->udata.param[QG_ESR].data; + if (chip->qg_psy) + power_supply_changed(chip->qg_psy); + } - qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + if (!chip->dt.esr_disable) + qg_store_esr_params(chip); - return IRQ_HANDLED; + qg_dbg(chip, QG_DEBUG_STATUS, "udata update: batt_soc=%d cc_soc=%d full_soc=%d qg_esr=%d\n", + (chip->batt_soc != INT_MIN) ? chip->batt_soc : -EINVAL, + (chip->cc_soc != INT_MIN) ? chip->cc_soc : -EINVAL, + chip->full_soc, chip->esr_last); + vote(chip->awake_votable, UDATA_READY_VOTER, false, 0); } #define MAX_FIFO_DELTA_PERCENT 10 @@ -718,6 +1104,9 @@ static irqreturn_t qg_fifo_update_done_handler(int irq, void *data) goto done; } + if (++chip->fifo_done_count == U32_MAX) + chip->fifo_done_count = 0; + rc = qg_vbat_thresholds_config(chip); if (rc < 0) pr_err("Failed to apply VBAT EMPTY config rc=%d\n", rc); @@ -728,6 +1117,12 @@ static irqreturn_t qg_fifo_update_done_handler(int irq, void *data) goto done; } + rc = qg_esr_estimate(chip); + if (rc < 0) { + pr_err("Failed to estimate ESR, rc=%d\n", rc); + goto done; + } + rc = get_fifo_done_time(chip, false, &hw_delta_ms); if (rc < 0) hw_delta_ms = 0; @@ -767,9 +1162,14 @@ static irqreturn_t qg_vbat_low_handler(int irq, void *data) pr_err("Failed to read RT status, rc=%d\n", rc); goto done; } + /* ignore VBAT low if battery is missing */ + if ((status & BATTERY_MISSING_INT_RT_STS_BIT) || + chip->battery_missing) + goto done; + chip->vbat_low = !!(status & VBAT_LOW_INT_RT_STS_BIT); - rc = process_rt_fifo_data(chip, chip->vbat_low, false); + rc = process_rt_fifo_data(chip, true, false); if (rc < 0) pr_err("Failed to process RT FIFO data, rc=%d\n", rc); @@ -783,8 +1183,20 @@ static irqreturn_t qg_vbat_empty_handler(int irq, void *data) { struct qpnp_qg *chip = data; u32 ocv_uv = 0; + int rc; + u8 status = 0; qg_dbg(chip, QG_DEBUG_IRQ, "IRQ triggered\n"); + + rc = qg_read(chip, chip->qg_base + QG_INT_RT_STS_REG, &status, 1); + if (rc < 0) + pr_err("Failed to read RT status rc=%d\n", rc); + + /* ignore VBAT empty if battery is missing */ + if ((status & BATTERY_MISSING_INT_RT_STS_BIT) || + chip->battery_missing) + return IRQ_HANDLED; + pr_warn("VBATT EMPTY SOC = 0\n"); chip->catch_up_soc = 0; @@ -795,7 +1207,7 @@ static irqreturn_t qg_vbat_empty_handler(int irq, void *data) chip->sdam_data[SDAM_OCV_UV] = ocv_uv; chip->sdam_data[SDAM_VALID] = 1; - qg_update_sdam_params(chip); + qg_store_soc_params(chip); if (chip->qg_psy) power_supply_changed(chip->qg_psy); @@ -845,7 +1257,6 @@ static irqreturn_t qg_good_ocv_handler(int irq, void *data) static struct qg_irq_info qg_irqs[] = { [QG_BATT_MISSING_IRQ] = { .name = "qg-batt-missing", - .handler = qg_default_irq_handler, }, [QG_VBATT_LOW_IRQ] = { .name = "qg-vbat-low", @@ -869,11 +1280,9 @@ static struct qg_irq_info qg_irqs[] = { }, [QG_FSM_STAT_CHG_IRQ] = { .name = "qg-fsm-state-chg", - .handler = qg_default_irq_handler, }, [QG_EVENT_IRQ] = { .name = "qg-event", - .handler = qg_default_irq_handler, }, }; @@ -972,7 +1381,7 @@ static int qg_get_learned_capacity(void *data, int64_t *learned_cap_uah) return -ENODEV; if (chip->battery_missing || !chip->profile_loaded) - return -EPERM; + return -ENODEV; rc = qg_sdam_multibyte_read(QG_SDAM_LEARNED_CAPACITY_OFFSET, (u8 *)&cc_mah, 2); @@ -997,7 +1406,7 @@ static int qg_store_learned_capacity(void *data, int64_t learned_cap_uah) return -ENODEV; if (chip->battery_missing || !learned_cap_uah) - return -EPERM; + return -ENODEV; cc_mah = div64_s64(learned_cap_uah, 1000); rc = qg_sdam_multibyte_write(QG_SDAM_LEARNED_CAPACITY_OFFSET, @@ -1037,7 +1446,7 @@ static int qg_restore_cycle_count(void *data, u16 *buf, int length) return -ENODEV; if (chip->battery_missing || !chip->profile_loaded) - return -EPERM; + return -ENODEV; if (!buf || length > BUCKET_COUNT) return -EINVAL; @@ -1065,7 +1474,7 @@ static int qg_store_cycle_count(void *data, u16 *buf, int id, int length) return -ENODEV; if (chip->battery_missing || !chip->profile_loaded) - return -EPERM; + return -ENODEV; if (!buf || length > BUCKET_COUNT * 2 || id < 0 || id > BUCKET_COUNT - 1 || @@ -1196,6 +1605,87 @@ static int qg_get_battery_capacity(struct qpnp_qg *chip, int *soc) return 0; } +static int qg_get_ttf_param(void *data, enum ttf_param param, int *val) +{ + union power_supply_propval prop = {0, }; + struct qpnp_qg *chip = data; + int rc = 0; + int64_t temp = 0; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !chip->profile_loaded) + return -ENODEV; + + switch (param) { + case TTF_MSOC: + rc = qg_get_battery_capacity(chip, val); + break; + case TTF_VBAT: + rc = qg_get_battery_voltage(chip, val); + break; + case TTF_IBAT: + rc = qg_get_battery_current(chip, val); + break; + case TTF_FCC: + if (chip->qg_psy) { + rc = power_supply_get_property(chip->qg_psy, + POWER_SUPPLY_PROP_CHARGE_FULL, &prop); + if (rc >= 0) { + temp = div64_u64(prop.intval, 1000); + *val = div64_u64(chip->full_soc * temp, + QG_SOC_FULL); + } + } + break; + case TTF_MODE: + *val = TTF_MODE_NORMAL; + break; + case TTF_ITERM: + if (chip->chg_iterm_ma == INT_MIN) + *val = 0; + else + *val = chip->chg_iterm_ma; + break; + case TTF_RBATT: + rc = qg_sdam_read(SDAM_RBAT_MOHM, val); + if (!rc) + *val *= 1000; + break; + case TTF_VFLOAT: + *val = chip->bp.float_volt_uv; + break; + case TTF_CHG_TYPE: + *val = chip->charge_type; + break; + case TTF_CHG_STATUS: + *val = chip->charge_status; + break; + default: + pr_err("Unsupported property %d\n", param); + rc = -EINVAL; + break; + } + + return rc; +} + +static int qg_ttf_awake_voter(void *data, bool val) +{ + struct qpnp_qg *chip = data; + + if (!chip) + return -ENODEV; + + if (chip->battery_missing || !chip->profile_loaded) + return -ENODEV; + + vote(chip->awake_votable, TTF_AWAKE_VOTER, val, 0); + + return 0; +} + static int qg_psy_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *pval) @@ -1223,6 +1713,17 @@ static int qg_psy_set_property(struct power_supply *psy, chip->cl->learned_cap_uah = pval->intval; mutex_unlock(&chip->cl->lock); break; + case POWER_SUPPLY_PROP_SOH: + chip->soh = pval->intval; + qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n", + chip->soh, chip->esr_actual, chip->esr_nominal); + break; + case POWER_SUPPLY_PROP_ESR_ACTUAL: + chip->esr_actual = pval->intval; + break; + case POWER_SUPPLY_PROP_ESR_NOMINAL: + chip->esr_nominal = pval->intval; + break; default: break; } @@ -1266,6 +1767,12 @@ static int qg_psy_get_property(struct power_supply *psy, if (!rc) pval->intval *= 1000; break; + case POWER_SUPPLY_PROP_RESISTANCE_NOW: + pval->intval = chip->esr_last; + break; + case POWER_SUPPLY_PROP_SOC_REPORTING_READY: + pval->intval = chip->soc_reporting_ready; + break; case POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE: pval->intval = chip->dt.rbat_conn_mohm; break; @@ -1308,6 +1815,23 @@ static int qg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CYCLE_COUNT: rc = get_cycle_count(chip->counter, &pval->intval); break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG: + rc = ttf_get_time_to_full(chip->ttf, &pval->intval); + break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: + rc = ttf_get_time_to_empty(chip->ttf, &pval->intval); + break; + case POWER_SUPPLY_PROP_ESR_ACTUAL: + pval->intval = (chip->esr_actual == -EINVAL) ? -EINVAL : + (chip->esr_actual * 1000); + break; + case POWER_SUPPLY_PROP_ESR_NOMINAL: + pval->intval = (chip->esr_nominal == -EINVAL) ? -EINVAL : + (chip->esr_nominal * 1000); + break; + case POWER_SUPPLY_PROP_SOH: + pval->intval = chip->soh; + break; default: pr_debug("Unsupported property %d\n", psp); break; @@ -1321,6 +1845,9 @@ static int qg_property_is_writeable(struct power_supply *psy, { switch (psp) { case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_ESR_ACTUAL: + case POWER_SUPPLY_PROP_ESR_NOMINAL: + case POWER_SUPPLY_PROP_SOH: return 1; default: break; @@ -1337,6 +1864,8 @@ static enum power_supply_property qg_psy_props[] = { POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_RESISTANCE, POWER_SUPPLY_PROP_RESISTANCE_ID, + POWER_SUPPLY_PROP_RESISTANCE_NOW, + POWER_SUPPLY_PROP_SOC_REPORTING_READY, POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE, POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_BATTERY_TYPE, @@ -1348,6 +1877,11 @@ static enum power_supply_property qg_psy_props[] = { POWER_SUPPLY_PROP_CYCLE_COUNTS, POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_ESR_ACTUAL, + POWER_SUPPLY_PROP_ESR_NOMINAL, + POWER_SUPPLY_PROP_SOH, }; static const struct power_supply_desc qg_psy_desc = { @@ -1434,6 +1968,7 @@ static int qg_parallel_status_update(struct qpnp_qg *chip) { int rc; bool parallel_enabled = is_parallel_enabled(chip); + bool update_smb = false; if (parallel_enabled == chip->parallel_enabled) return 0; @@ -1444,7 +1979,14 @@ static int qg_parallel_status_update(struct qpnp_qg *chip) mutex_lock(&chip->data_lock); - rc = process_rt_fifo_data(chip, false, true); + /* + * Parallel charger uses the same external sense, hence do not + * enable SMB sensing if PMI632 is configured for external sense. + */ + if (!chip->dt.qg_ext_sense) + update_smb = true; + + rc = process_rt_fifo_data(chip, false, update_smb); if (rc < 0) pr_err("Failed to process RT FIFO data, rc=%d\n", rc); @@ -1469,6 +2011,110 @@ static int qg_usb_status_update(struct qpnp_qg *chip) return 0; } +static int qg_handle_battery_removal(struct qpnp_qg *chip) +{ + int rc, length = QG_SDAM_MAX_OFFSET - QG_SDAM_VALID_OFFSET; + u8 *data; + + /* clear SDAM */ + data = kcalloc(length, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + rc = qg_sdam_multibyte_write(QG_SDAM_VALID_OFFSET, data, length); + if (rc < 0) + pr_err("Failed to clear SDAM rc=%d\n", rc); + + return rc; +} + +#define MAX_QG_OK_RETRIES 20 +static int qg_handle_battery_insertion(struct qpnp_qg *chip) +{ + int rc, count = 0; + u32 ocv_uv = 0, ocv_raw = 0; + u8 reg = 0; + + do { + rc = qg_read(chip, chip->qg_base + QG_STATUS1_REG, ®, 1); + if (rc < 0) { + pr_err("Failed to read STATUS1_REG rc=%d\n", rc); + return rc; + } + + if (reg & QG_OK_BIT) + break; + + msleep(200); + count++; + } while (count < MAX_QG_OK_RETRIES); + + if (count == MAX_QG_OK_RETRIES) { + qg_dbg(chip, QG_DEBUG_STATUS, "QG_OK not set!\n"); + return 0; + } + + /* read S7 PON OCV */ + rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S7_PON_OCV); + if (rc < 0) { + pr_err("Failed to read PON OCV rc=%d\n", rc); + return rc; + } + + qg_dbg(chip, QG_DEBUG_STATUS, + "S7_OCV on battery insertion = %duV\n", ocv_uv); + + chip->kdata.param[QG_GOOD_OCV_UV].data = ocv_uv; + chip->kdata.param[QG_GOOD_OCV_UV].valid = true; + /* clear all the userspace data */ + chip->kdata.param[QG_CLEAR_LEARNT_DATA].data = 1; + chip->kdata.param[QG_CLEAR_LEARNT_DATA].valid = true; + + vote(chip->awake_votable, GOOD_OCV_VOTER, true, 0); + /* signal the read thread */ + chip->data_ready = true; + wake_up_interruptible(&chip->qg_wait_q); + + return 0; +} + +static int qg_battery_status_update(struct qpnp_qg *chip) +{ + int rc; + union power_supply_propval prop = {0, }; + + if (!is_batt_available(chip)) + return 0; + + mutex_lock(&chip->data_lock); + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_PRESENT, &prop); + if (rc < 0) { + pr_err("Failed to get battery-present, rc=%d\n", rc); + goto done; + } + + if (chip->battery_missing && prop.intval) { + pr_warn("Battery inserted!\n"); + rc = qg_handle_battery_insertion(chip); + if (rc < 0) + pr_err("Failed in battery-insertion rc=%d\n", rc); + } else if (!chip->battery_missing && !prop.intval) { + pr_warn("Battery removed!\n"); + rc = qg_handle_battery_removal(chip); + if (rc < 0) + pr_err("Failed in battery-removal rc=%d\n", rc); + } + + chip->battery_missing = !prop.intval; + +done: + mutex_unlock(&chip->data_lock); + return rc; +} + + static void qg_status_change_work(struct work_struct *work) { struct qpnp_qg *chip = container_of(work, @@ -1481,6 +2127,17 @@ static void qg_status_change_work(struct work_struct *work) goto out; } + rc = qg_battery_status_update(chip); + if (rc < 0) + pr_err("Failed to process battery status update rc=%d\n", rc); + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_TYPE, &prop); + if (rc < 0) + pr_err("Failed to get charge-type, rc=%d\n", rc); + else + chip->charge_type = prop.intval; + rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, &prop); if (rc < 0) @@ -1527,6 +2184,8 @@ static void qg_status_change_work(struct work_struct *work) rc = qg_charge_full_update(chip); if (rc < 0) pr_err("Failed in charge_full_update, rc=%d\n", rc); + + ttf_update(chip->ttf, chip->usb_present); out: pm_relax(chip->dev); } @@ -1867,6 +2526,7 @@ static int qg_setup_battery(struct qpnp_qg *chip) qg_dbg(chip, QG_DEBUG_PROFILE, "Battery Missing!\n"); chip->battery_missing = true; chip->profile_loaded = false; + chip->soc_reporting_ready = true; } else { /* battery present */ rc = get_batt_id_ohm(chip, &chip->batt_id_ohm); @@ -1875,11 +2535,14 @@ static int qg_setup_battery(struct qpnp_qg *chip) chip->profile_loaded = false; } else { rc = qg_load_battery_profile(chip); - if (rc < 0) + if (rc < 0) { pr_err("Failed to load battery-profile rc=%d\n", rc); - else + chip->profile_loaded = false; + chip->soc_reporting_ready = true; + } else { chip->profile_loaded = true; + } } } @@ -1890,12 +2553,21 @@ static int qg_setup_battery(struct qpnp_qg *chip) return 0; } + +static struct ocv_all ocv[] = { + [S7_PON_OCV] = { 0, 0, "S7_PON_OCV"}, + [S3_GOOD_OCV] = { 0, 0, "S3_GOOD_OCV"}, + [S3_LAST_OCV] = { 0, 0, "S3_LAST_OCV"}, + [SDAM_PON_OCV] = { 0, 0, "SDAM_PON_OCV"}, +}; + +#define S7_ERROR_MARGIN_UV 20000 static int qg_determine_pon_soc(struct qpnp_qg *chip) { - int rc = 0, batt_temp = 0; + int rc = 0, batt_temp = 0, i; bool use_pon_ocv = true; unsigned long rtc_sec = 0; - u32 ocv_uv = 0, ocv_raw = 0, soc = 0, shutdown[SDAM_MAX] = {0}; + u32 ocv_uv = 0, soc = 0, shutdown[SDAM_MAX] = {0}; char ocv_type[20] = "NONE"; if (!chip->profile_loaded) { @@ -1944,33 +2616,47 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip) goto done; } - /* - * Read S3_LAST_OCV, if S3_LAST_OCV is invalid, - * read the SDAM_PON_OCV - * if SDAM is not-set, use S7_PON_OCV. - */ - strlcpy(ocv_type, "S3_LAST_SOC", 20); - rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, S3_LAST_OCV); - if (rc < 0) - goto done; - - if (ocv_raw == FIFO_V_RESET_VAL) { - /* S3_LAST_OCV is invalid */ - strlcpy(ocv_type, "SDAM_PON_SOC", 20); - rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, SDAM_PON_OCV); + /* read all OCVs */ + for (i = S7_PON_OCV; i < PON_OCV_MAX; i++) { + rc = qg_read_ocv(chip, &ocv[i].ocv_uv, + &ocv[i].ocv_raw, i); if (rc < 0) - goto done; + pr_err("Failed to read %s OCV rc=%d\n", + ocv[i].ocv_type, rc); + else + qg_dbg(chip, QG_DEBUG_PON, "%s OCV=%d\n", + ocv[i].ocv_type, ocv[i].ocv_uv); + } - if (!ocv_uv) { - /* SDAM_PON_OCV is not set */ + if (ocv[S3_LAST_OCV].ocv_raw == FIFO_V_RESET_VAL) { + if (!ocv[SDAM_PON_OCV].ocv_uv) { + strlcpy(ocv_type, "S7_PON_SOC", 20); + ocv_uv = ocv[S7_PON_OCV].ocv_uv; + } else if (ocv[SDAM_PON_OCV].ocv_uv <= + ocv[S7_PON_OCV].ocv_uv) { + strlcpy(ocv_type, "S7_PON_SOC", 20); + ocv_uv = ocv[S7_PON_OCV].ocv_uv; + } else if (!shutdown[SDAM_VALID] && + ((ocv[SDAM_PON_OCV].ocv_uv - + ocv[S7_PON_OCV].ocv_uv) > + S7_ERROR_MARGIN_UV)) { strlcpy(ocv_type, "S7_PON_SOC", 20); - rc = qg_read_ocv(chip, &ocv_uv, &ocv_raw, - S7_PON_OCV); - if (rc < 0) - goto done; + ocv_uv = ocv[S7_PON_OCV].ocv_uv; + } else { + strlcpy(ocv_type, "SDAM_PON_SOC", 20); + ocv_uv = ocv[SDAM_PON_OCV].ocv_uv; + } + } else { + if (ocv[S3_LAST_OCV].ocv_uv >= ocv[S7_PON_OCV].ocv_uv) { + strlcpy(ocv_type, "S3_LAST_SOC", 20); + ocv_uv = ocv[S3_LAST_OCV].ocv_uv; + } else { + strlcpy(ocv_type, "S7_PON_SOC", 20); + ocv_uv = ocv[S7_PON_OCV].ocv_uv; } } + ocv_uv = CAP(QG_MIN_OCV_UV, QG_MAX_OCV_UV, ocv_uv); rc = lookup_soc_ocv(&soc, ocv_uv, batt_temp, false); if (rc < 0) { pr_err("Failed to lookup SOC@PON rc=%d\n", rc); @@ -1996,13 +2682,16 @@ static int qg_determine_pon_soc(struct qpnp_qg *chip) if (rc < 0) pr_err("Failed to update MSOC register rc=%d\n", rc); - rc = qg_update_sdam_params(chip); + rc = qg_store_soc_params(chip); if (rc < 0) pr_err("Failed to update sdam params rc=%d\n", rc); pr_info("using %s @ PON ocv_uv=%duV soc=%d\n", ocv_type, ocv_uv, chip->msoc); + /* SOC reporting is now ready */ + chip->soc_reporting_ready = 1; + return 0; } @@ -2025,6 +2714,7 @@ static int qg_set_wa_flags(struct qpnp_qg *chip) return 0; } +#define ADC_CONV_DLY_512MS 0xA static int qg_hw_init(struct qpnp_qg *chip) { int rc, temp; @@ -2205,6 +2895,22 @@ static int qg_hw_init(struct qpnp_qg *chip) return rc; } + /* disable S5 */ + rc = qg_masked_write(chip, chip->qg_base + + QG_S5_OCV_VALIDATE_MEAS_CTL1_REG, + ALLOW_S5_BIT, 0); + if (rc < 0) + pr_err("Failed to disable S5 rc=%d\n", rc); + + /* change PON OCV time to 512ms */ + rc = qg_masked_write(chip, chip->qg_base + + QG_S7_PON_OCV_MEAS_CTL1_REG, + ADC_CONV_DLY_MASK, + ADC_CONV_DLY_512MS); + if (rc < 0) + pr_err("Failed to reconfigure S7-delay rc=%d\n", rc); + + return 0; } @@ -2224,6 +2930,10 @@ static int qg_post_init(struct qpnp_qg *chip) QG_INIT_STATE_IRQ_DISABLE, true, 0); } + /* restore ESR data */ + if (!chip->dt.esr_disable) + qg_retrieve_esr_params(chip); + return 0; } @@ -2298,10 +3008,12 @@ static int qg_request_irqs(struct qpnp_qg *chip) return 0; } +#define QG_TTF_ITERM_DELTA_MA 1 static int qg_alg_init(struct qpnp_qg *chip) { struct cycle_counter *counter; struct cap_learning *cl; + struct ttf *ttf; struct device_node *node = chip->dev->of_node; int rc; @@ -2324,6 +3036,28 @@ static int qg_alg_init(struct qpnp_qg *chip) chip->counter = counter; + ttf = devm_kzalloc(chip->dev, sizeof(*ttf), GFP_KERNEL); + if (!ttf) + return -ENOMEM; + + ttf->get_ttf_param = qg_get_ttf_param; + ttf->awake_voter = qg_ttf_awake_voter; + ttf->iterm_delta = QG_TTF_ITERM_DELTA_MA; + ttf->data = chip; + + rc = ttf_tte_init(ttf); + if (rc < 0) { + dev_err(chip->dev, "Error in initializing ttf, rc:%d\n", + rc); + ttf->data = NULL; + counter->data = NULL; + devm_kfree(chip->dev, ttf); + devm_kfree(chip->dev, counter); + return rc; + } + + chip->ttf = ttf; + chip->dt.cl_disable = of_property_read_bool(node, "qcom,cl-disable"); @@ -2348,6 +3082,7 @@ static int qg_alg_init(struct qpnp_qg *chip) counter->data = NULL; cl->data = NULL; devm_kfree(chip->dev, counter); + devm_kfree(chip->dev, ttf); devm_kfree(chip->dev, cl); return rc; } @@ -2373,10 +3108,13 @@ static int qg_alg_init(struct qpnp_qg *chip) #define DEFAULT_CL_MAX_START_SOC 15 #define DEFAULT_CL_MIN_TEMP_DECIDEGC 150 #define DEFAULT_CL_MAX_TEMP_DECIDEGC 500 -#define DEFAULT_CL_MAX_INC_DECIPERC 5 -#define DEFAULT_CL_MAX_DEC_DECIPERC 100 -#define DEFAULT_CL_MIN_LIM_DECIPERC 0 -#define DEFAULT_CL_MAX_LIM_DECIPERC 0 +#define DEFAULT_CL_MAX_INC_DECIPERC 10 +#define DEFAULT_CL_MAX_DEC_DECIPERC 20 +#define DEFAULT_CL_MIN_LIM_DECIPERC 500 +#define DEFAULT_CL_MAX_LIM_DECIPERC 100 +#define DEFAULT_ESR_QUAL_CURRENT_UA 130000 +#define DEFAULT_ESR_QUAL_VBAT_UV 7000 +#define DEFAULT_ESR_DISABLE_SOC 1000 static int qg_parse_dt(struct qpnp_qg *chip) { int rc = 0; @@ -2570,6 +3308,33 @@ static int qg_parse_dt(struct qpnp_qg *chip) else chip->dt.rbat_conn_mohm = temp; + /* esr */ + chip->dt.esr_disable = of_property_read_bool(node, + "qcom,esr-disable"); + + chip->dt.esr_discharge_enable = of_property_read_bool(node, + "qcom,esr-discharge-enable"); + + rc = of_property_read_u32(node, "qcom,esr-qual-current-ua", &temp); + if (rc < 0) + chip->dt.esr_qual_i_ua = DEFAULT_ESR_QUAL_CURRENT_UA; + else + chip->dt.esr_qual_i_ua = temp; + + rc = of_property_read_u32(node, "qcom,esr-qual-vbatt-uv", &temp); + if (rc < 0) + chip->dt.esr_qual_v_uv = DEFAULT_ESR_QUAL_VBAT_UV; + else + chip->dt.esr_qual_v_uv = temp; + + rc = of_property_read_u32(node, "qcom,esr-disable-soc", &temp); + if (rc < 0) + chip->dt.esr_disable_soc = DEFAULT_ESR_DISABLE_SOC; + else + chip->dt.esr_disable_soc = temp * 100; + + chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns"); + /* Capacity learning params*/ if (!chip->dt.cl_disable) { chip->dt.cl_feedback_on = of_property_read_bool(node, @@ -2629,9 +3394,9 @@ static int qg_parse_dt(struct qpnp_qg *chip) chip->cl->dt.min_start_soc, chip->cl->dt.max_start_soc, chip->cl->dt.min_temp, chip->cl->dt.max_temp); } - qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d\n", + qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n", chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv, - chip->dt.delta_soc); + chip->dt.delta_soc, chip->dt.qg_ext_sense); return 0; } @@ -2646,6 +3411,7 @@ static int process_suspend(struct qpnp_qg *chip) if (!chip->profile_loaded) return 0; + cancel_delayed_work_sync(&chip->ttf->ttf_work); /* disable GOOD_OCV IRQ in sleep */ vote(chip->good_ocv_irq_disable_votable, QG_INIT_STATE_IRQ_DISABLE, true, 0); @@ -2778,6 +3544,8 @@ static int process_resume(struct qpnp_qg *chip) chip->suspend_data = false; } + schedule_delayed_work(&chip->ttf->ttf_work, 0); + return rc; } @@ -2866,6 +3634,11 @@ static int qpnp_qg_probe(struct platform_device *pdev) chip->maint_soc = -EINVAL; chip->batt_soc = INT_MIN; chip->cc_soc = INT_MIN; + chip->full_soc = QG_SOC_FULL; + chip->chg_iterm_ma = INT_MIN; + chip->soh = -EINVAL; + chip->esr_actual = -EINVAL; + chip->esr_nominal = -EINVAL; rc = qg_alg_init(chip); if (rc < 0) { @@ -2931,6 +3704,7 @@ static int qpnp_qg_probe(struct platform_device *pdev) pr_err("Error in restoring cycle_count, rc=%d\n", rc); return rc; } + schedule_delayed_work(&chip->ttf->ttf_work, 10000); } rc = qg_determine_pon_soc(chip); diff --git a/drivers/power/supply/qcom/qpnp-qnovo5.c b/drivers/power/supply/qcom/qpnp-qnovo5.c new file mode 100644 index 0000000000000000000000000000000000000000..5037fa25e42c29933199b33399a50726138dc041 --- /dev/null +++ b/drivers/power/supply/qcom/qpnp-qnovo5.c @@ -0,0 +1,1401 @@ +/* Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QNOVO_PE_CTRL 0x45 +#define QNOVO_PTRAIN_EN_BIT BIT(7) + +#define QNOVO_NREST1_CTRL 0x4A +#define QNOVO_NPULS1_CTRL 0x4B +#define QNOVO_PREST1_CTRL 0x4C +#define QNOVO_NREST2_CTRL 0x4D +#define QNOVO_NPULS2_CTRL 0x4E +#define QNOVO_NREST3_CTRL 0x4F +#define QNOVO_NPULS3_CTRL 0x50 +#define QNOVO_ERROR_MASK 0x51 +#define QNOVO_VLIM1_LSB_CTRL 0x52 +#define QNOVO_VLIM1_MSB_CTRL 0x53 +#define QNOVO_VLIM2_LSB_CTRL 0x54 +#define QNOVO_VLIM2_MSB_CTRL 0x55 +#define QNOVO_PVOLT1_LSB 0x56 +#define QNOVO_PVOLT1_MSB 0x57 +#define QNOVO_RVOLT2_LSB 0x58 +#define QNOVO_RVOLT2_MSB 0x59 +#define QNOVO_PVOLT2_LSB 0x5A +#define QNOVO_PVOLT2_MSB 0x5B +#define QNOVO_PCURR1_LSB 0x5C +#define QNOVO_PCURR1_MSB 0x5D +#define QNOVO_PCURR2_LSB 0x5E +#define QNOVO_PCURR2_MSB 0x5F +#define QNOVO_PCURR1_SUM_LSB 0x60 +#define QNOVO_PCURR1_SUM_MSB 0x61 +#define QNOVO_PCURR1_TERMINAL_LSB 0x62 +#define QNOVO_PCURR1_TERMINAL_MSB 0x63 +#define QNOVO_PTTIME_LSB 0x64 +#define QNOVO_PTTIME_MSB 0x65 +#define QNOVO_PPCNT 0x66 +#define QNOVO_PPCNT_MAX_CTRL 0x67 +#define QNOVO_RVOLT3_VMAX_LSB 0x68 +#define QNOVO_RVOLT3_VMAX_MSB 0x69 +#define QNOVO_RVOLT3_VMAX_SNUM 0x6A +#define QNOVO_PTTIME_MAX_LSB 0x6C +#define QNOVO_PTTIME_MAX_MSB 0x6D +#define QNOVO_PHASE 0x6E +#define QNOVO_P2_TICK 0x6F +#define QNOVO_PTRAIN_STS 0x70 +#define QNOVO_ERROR_STS 0x71 + +/* QNOVO_ERROR_STS */ +#define ERR_CHARGING_DISABLED BIT(6) +#define ERR_JEITA_HARD_CONDITION BIT(5) +#define ERR_JEITA_SOFT_CONDITION BIT(4) +#define ERR_CV_MODE BIT(3) +#define ERR_SAFETY_TIMER_EXPIRED BIT(2) +#define ERR_BAT_OV BIT(1) +#define ERR_BATTERY_MISSING BIT(0) + +#define DRV_MAJOR_VERSION 1 +#define DRV_MINOR_VERSION 1 + +#define USER_VOTER "user_voter" +#define SHUTDOWN_VOTER "user_voter" +#define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter" + +#define QNOVO_VOTER "qnovo_voter" +#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER" +#define QNI_PT_VOTER "QNI_PT_VOTER" + +#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER" +#define CHG_READY_VOTER "CHG_READY_VOTER" +#define USB_READY_VOTER "USB_READY_VOTER" + +#define CLASS_ATTR_IDX_RO(_name, _func) \ +static ssize_t _name##_show(struct class *c, struct class_attribute *attr, \ + char *ubuf) \ +{ \ + return _func##_show(c, attr, ubuf); \ +}; \ +static CLASS_ATTR_RO(_name) + +#define CLASS_ATTR_IDX_RW(_name, _func) \ +static ssize_t _name##_show(struct class *c, struct class_attribute *attr, \ + char *ubuf) \ +{ \ + return _func##_show(c, attr, ubuf); \ +}; \ +static ssize_t _name##_store(struct class *c, struct class_attribute *attr, \ + const char *ubuf, size_t count) \ +{ \ + return _func##_store(c, attr, ubuf, count); \ +}; \ +static CLASS_ATTR_RW(_name) + +struct qnovo { + struct regmap *regmap; + struct device *dev; + struct mutex write_lock; + struct class qnovo_class; + struct power_supply *batt_psy; + struct power_supply *usb_psy; + struct notifier_block nb; + struct votable *disable_votable; + struct votable *pt_dis_votable; + struct votable *not_ok_to_qnovo_votable; + struct votable *chg_ready_votable; + struct votable *awake_votable; + struct work_struct status_change_work; + struct delayed_work usb_debounce_work; + int base; + int fv_uV_request; + int fcc_uA_request; + int usb_present; +}; + +static int debug_mask; +module_param_named(debug_mask, debug_mask, int, 0600); + +#define qnovo_dbg(chip, reason, fmt, ...) \ + do { \ + if (debug_mask & (reason)) \ + dev_info(chip->dev, fmt, ##__VA_ARGS__); \ + else \ + dev_dbg(chip->dev, fmt, ##__VA_ARGS__); \ + } while (0) + +static int qnovo5_read(struct qnovo *chip, u16 addr, u8 *buf, int len) +{ + return regmap_bulk_read(chip->regmap, chip->base + addr, buf, len); +} + +static int qnovo5_masked_write(struct qnovo *chip, u16 addr, u8 mask, u8 val) +{ + return regmap_update_bits(chip->regmap, chip->base + addr, mask, val); +} + +static int qnovo5_write(struct qnovo *chip, u16 addr, u8 *buf, int len) +{ + return regmap_bulk_write(chip->regmap, chip->base + addr, buf, len); +} + +static bool is_batt_available(struct qnovo *chip) +{ + if (!chip->batt_psy) + chip->batt_psy = power_supply_get_by_name("battery"); + + if (!chip->batt_psy) + return false; + + return true; +} + +static bool is_usb_available(struct qnovo *chip) +{ + if (!chip->usb_psy) + chip->usb_psy = power_supply_get_by_name("usb"); + + if (!chip->usb_psy) + return false; + + return true; +} + +static int qnovo_batt_psy_update(struct qnovo *chip, bool disable) +{ + union power_supply_propval pval = {0}; + int rc = 0; + + if (!is_batt_available(chip)) + return -EINVAL; + + if (chip->fv_uV_request != -EINVAL) { + pval.intval = disable ? -EINVAL : chip->fv_uV_request; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_VOLTAGE_QNOVO, + &pval); + if (rc < 0) { + pr_err("Couldn't set prop qnovo_fv rc = %d\n", rc); + return -EINVAL; + } + } + + if (chip->fcc_uA_request != -EINVAL) { + pval.intval = disable ? -EINVAL : chip->fcc_uA_request; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_CURRENT_QNOVO, + &pval); + if (rc < 0) { + pr_err("Couldn't set prop qnovo_fcc rc = %d\n", rc); + return -EINVAL; + } + } + + return rc; +} + +static int qnovo_disable_cb(struct votable *votable, void *data, int disable, + const char *client) +{ + struct qnovo *chip = data; + int rc; + + vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0); + rc = qnovo_batt_psy_update(chip, disable); + return rc; +} + +static int pt_dis_votable_cb(struct votable *votable, void *data, int disable, + const char *client) +{ + struct qnovo *chip = data; + int rc; + + rc = qnovo5_masked_write(chip, QNOVO_PE_CTRL, QNOVO_PTRAIN_EN_BIT, + (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n", + (bool)disable ? "disable" : "enable", rc); + return rc; + } + + return 0; +} + +static int not_ok_to_qnovo_cb(struct votable *votable, void *data, + int not_ok_to_qnovo, + const char *client) +{ + struct qnovo *chip = data; + + vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0); + if (not_ok_to_qnovo) + vote(chip->disable_votable, USER_VOTER, true, 0); + + kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); + return 0; +} + +static int chg_ready_cb(struct votable *votable, void *data, int ready, + const char *client) +{ + struct qnovo *chip = data; + + vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0); + + return 0; +} + +static int awake_cb(struct votable *votable, void *data, int awake, + const char *client) +{ + struct qnovo *chip = data; + + if (awake) + pm_stay_awake(chip->dev); + else + pm_relax(chip->dev); + + return 0; +} + +static int qnovo5_parse_dt(struct qnovo *chip) +{ + struct device_node *node = chip->dev->of_node; + int rc; + + if (!node) { + pr_err("device tree node missing\n"); + return -EINVAL; + } + + rc = of_property_read_u32(node, "reg", &chip->base); + if (rc < 0) { + pr_err("Couldn't read base rc = %d\n", rc); + return rc; + } + + return 0; +} + +enum { + VER = 0, + OK_TO_QNOVO, + QNOVO_ENABLE, + PT_ENABLE, + FV_REQUEST, + FCC_REQUEST, + PE_CTRL_REG, + PTRAIN_STS_REG, + ERR_STS_REG, + PREST1, + NREST1, + NPULS1, + PPCNT, + PPCNT_MAX, + VLIM1, + PVOLT1, + PCURR1, + PCURR1_SUM, + PCURR1_TERMINAL, + PTTIME, + PTTIME_MAX, + NREST2, + NPULS2, + VLIM2, + PVOLT2, + RVOLT2, + PCURR2, + NREST3, + NPULS3, + RVOLT3_VMAX, + RVOLT3_VMAX_SNUM, + VBATT, + IBATT, + BATTTEMP, + BATTSOC, + MAX_PROP +}; + +struct param_info { + char *name; + int start_addr; + int num_regs; + int reg_to_unit_multiplier; + int reg_to_unit_divider; + int reg_to_unit_offset; + int min_val; + int max_val; + char *units_str; +}; + +static struct param_info params[] = { + [FV_REQUEST] = { + .units_str = "uV", + }, + [FCC_REQUEST] = { + .units_str = "uA", + }, + [PE_CTRL_REG] = { + .name = "CTRL_REG", + .start_addr = QNOVO_PE_CTRL, + .num_regs = 1, + .units_str = "", + }, + [PTRAIN_STS_REG] = { + .name = "PTRAIN_STS", + .start_addr = QNOVO_PTRAIN_STS, + .num_regs = 1, + .units_str = "", + }, + [ERR_STS_REG] = { + .name = "RAW_CHGR_ERR", + .start_addr = QNOVO_ERROR_STS, + .num_regs = 1, + .units_str = "", + }, + [PREST1] = { + .name = "PREST1", + .start_addr = QNOVO_PREST1_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [NREST1] = { + .name = "NREST1", + .start_addr = QNOVO_NREST1_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [NPULS1] = { + .name = "NPULS1", + .start_addr = QNOVO_NPULS1_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [PPCNT] = { + .name = "PPCNT", + .start_addr = QNOVO_PPCNT, + .num_regs = 1, + .reg_to_unit_multiplier = 1, + .reg_to_unit_divider = 1, + .min_val = 1, + .max_val = 255, + .units_str = "pulses", + }, + [PPCNT_MAX] = { + .name = "PPCNT_MAX", + .start_addr = QNOVO_PPCNT_MAX_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 1, + .reg_to_unit_divider = 1, + .min_val = 1, + .max_val = 255, + .units_str = "pulses", + }, + [VLIM1] = { + .name = "VLIM1", + .start_addr = QNOVO_VLIM1_LSB_CTRL, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, /* converts to nV */ + .reg_to_unit_divider = 1, + .min_val = 2200000, + .max_val = 4500000, + .units_str = "uV", + }, + [PVOLT1] = { + .name = "PVOLT1", + .start_addr = QNOVO_PVOLT1_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, /* converts to nV */ + .reg_to_unit_divider = 1, + .units_str = "uV", + }, + [PCURR1] = { + .name = "PCURR1", + .start_addr = QNOVO_PCURR1_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 305185, /* converts to nA */ + .reg_to_unit_divider = 1, + .units_str = "uA", + }, + [PCURR1_SUM] = { + .name = "PCURR1_SUM", + .start_addr = QNOVO_PCURR1_SUM_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 305185, /* converts to nA */ + .reg_to_unit_divider = 1, + .units_str = "uA", + }, + [PCURR1_TERMINAL] = { + .name = "PCURR1_TERMINAL", + .start_addr = QNOVO_PCURR1_TERMINAL_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 305185, /* converts to nA */ + .reg_to_unit_divider = 1, + .units_str = "uA", + }, + [PTTIME] = { + .name = "PTTIME", + .start_addr = QNOVO_PTTIME_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 1, + .reg_to_unit_divider = 1, + .units_str = "S", + }, + [PTTIME_MAX] = { + .name = "PTTIME_MAX", + .start_addr = QNOVO_PTTIME_MAX_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 1, + .reg_to_unit_divider = 1, + .units_str = "S", + }, + [NREST2] = { + .name = "NREST2", + .start_addr = QNOVO_NREST2_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [NPULS2] = { + .name = "NPULS2", + .start_addr = QNOVO_NPULS2_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [VLIM2] = { + .name = "VLIM2", + .start_addr = QNOVO_VLIM2_LSB_CTRL, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, /* converts to nV */ + .reg_to_unit_divider = 1, + .min_val = 2200000, + .max_val = 4500000, + .units_str = "uV", + }, + [PVOLT2] = { + .name = "PVOLT2", + .start_addr = QNOVO_PVOLT2_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, /* converts to nV */ + .reg_to_unit_divider = 1, + .units_str = "uV", + }, + [RVOLT2] = { + .name = "RVOLT2", + .start_addr = QNOVO_RVOLT2_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, + .reg_to_unit_divider = 1, + .units_str = "uV", + }, + [PCURR2] = { + .name = "PCURR2", + .start_addr = QNOVO_PCURR2_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 305185, /* converts to nA */ + .reg_to_unit_divider = 1, + .units_str = "uA", + }, + [NREST3] = { + .name = "NREST3", + .start_addr = QNOVO_NREST3_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [NPULS3] = { + .name = "NPULS3", + .start_addr = QNOVO_NPULS3_CTRL, + .num_regs = 1, + .reg_to_unit_multiplier = 976650, + .reg_to_unit_divider = 1000, + .min_val = 0, + .max_val = 249135, + .units_str = "uS", + }, + [RVOLT3_VMAX] = { + .name = "RVOLT3_VMAX", + .start_addr = QNOVO_RVOLT3_VMAX_LSB, + .num_regs = 2, + .reg_to_unit_multiplier = 194637, /* converts to nV */ + .reg_to_unit_divider = 1, + .units_str = "uV", + }, + [RVOLT3_VMAX_SNUM] = { + .name = "SNUM", + .start_addr = QNOVO_RVOLT3_VMAX_SNUM, + .num_regs = 1, + .reg_to_unit_multiplier = 1, + .reg_to_unit_divider = 1, + .units_str = "pulses", + }, + [VBATT] = { + .name = "POWER_SUPPLY_PROP_VOLTAGE_NOW", + .start_addr = POWER_SUPPLY_PROP_VOLTAGE_NOW, + .units_str = "uV", + }, + [IBATT] = { + .name = "POWER_SUPPLY_PROP_CURRENT_NOW", + .start_addr = POWER_SUPPLY_PROP_CURRENT_NOW, + .units_str = "uA", + }, + [BATTTEMP] = { + .name = "POWER_SUPPLY_PROP_TEMP", + .start_addr = POWER_SUPPLY_PROP_TEMP, + .units_str = "uV", + }, + [BATTSOC] = { + .name = "POWER_SUPPLY_PROP_CAPACITY", + .start_addr = POWER_SUPPLY_PROP_CAPACITY, + .units_str = "%", + }, +}; + +static struct attribute *qnovo_class_attrs[]; + +static int __find_attr_idx(struct attribute *attr) +{ + int i; + + for (i = 0; i < MAX_PROP; i++) + if (attr == qnovo_class_attrs[i]) + break; + + if (i == MAX_PROP) + return -EINVAL; + + return i; +} + +static ssize_t version_show(struct class *c, struct class_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d.%d\n", + DRV_MAJOR_VERSION, DRV_MINOR_VERSION); +} +static CLASS_ATTR_RO(version); + +static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr, + char *buf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int val = get_effective_result(chip->not_ok_to_qnovo_votable); + + return snprintf(buf, PAGE_SIZE, "%d\n", !val); +} +static CLASS_ATTR_RO(ok_to_qnovo); + +static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int val = get_effective_result(chip->disable_votable); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", !val); +} + +static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + unsigned long val; + + if (kstrtoul(ubuf, 0, &val)) + return -EINVAL; + + vote(chip->disable_votable, USER_VOTER, !val, 0); + + return count; +} +static CLASS_ATTR_RW(qnovo_enable); + +static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int val = get_effective_result(chip->pt_dis_votable); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", !val); +} + +static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + unsigned long val; + + if (kstrtoul(ubuf, 0, &val)) + return -EINVAL; + + /* val being 0, userspace wishes to disable pt so vote true */ + vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0); + + return count; +} +static CLASS_ATTR_RW(pt_enable); + + +static ssize_t val_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int i; + int val = 0; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + if (i == FV_REQUEST) + val = chip->fv_uV_request; + + if (i == FCC_REQUEST) + val = chip->fcc_uA_request; + + return snprintf(ubuf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t val_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int i; + unsigned long val; + + if (kstrtoul(ubuf, 0, &val)) + return -EINVAL; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + if (i == FV_REQUEST) + chip->fv_uV_request = val; + + if (i == FCC_REQUEST) + chip->fcc_uA_request = val; + + if (!get_effective_result(chip->disable_votable)) + qnovo_batt_psy_update(chip, false); + + return count; +} + +static ssize_t reg_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + u16 regval; + int rc, i; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + rc = qnovo5_read(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't read %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + regval = buf[1] << 8 | buf[0]; + + return snprintf(ubuf, PAGE_SIZE, "0x%04x\n", regval); +} + +static ssize_t reg_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + unsigned long val; + int rc, i; + + if (kstrtoul(ubuf, 0, &val)) + return -EINVAL; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + buf[0] = val & 0xFF; + buf[1] = (val >> 8) & 0xFF; + + rc = qnovo5_write(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't write %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + return count; +} + +static ssize_t time_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + u16 regval; + int val; + int rc, i; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + rc = qnovo5_read(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't read %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + regval = buf[1] << 8 | buf[0]; + + val = ((regval * params[i].reg_to_unit_multiplier) + / params[i].reg_to_unit_divider) + - params[i].reg_to_unit_offset; + + return snprintf(ubuf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t time_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + u16 regval; + unsigned long val; + int rc, i; + + if (kstrtoul(ubuf, 0, &val)) + return -EINVAL; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + if (val < params[i].min_val || val > params[i].max_val) { + pr_err("Out of Range %d%s for %s\n", (int)val, + params[i].units_str, + params[i].name); + return -ERANGE; + } + + regval = (((int)val + params[i].reg_to_unit_offset) + * params[i].reg_to_unit_divider) + / params[i].reg_to_unit_multiplier; + buf[0] = regval & 0xFF; + buf[1] = (regval >> 8) & 0xFF; + + rc = qnovo5_write(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't write %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + + return count; +} + +static ssize_t current_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + int rc, i, regval_uA; + s64 regval_nA; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + rc = qnovo5_read(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't read %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + + regval_nA = (s16)(buf[1] << 8 | buf[0]); + regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier, + params[i].reg_to_unit_divider) + - params[i].reg_to_unit_offset; + + regval_uA = div_s64(regval_nA, 1000); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", regval_uA); +} + +static ssize_t current_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + int rc, i; + long val_uA; + s64 regval_nA; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + if (kstrtoul(ubuf, 0, &val_uA)) + return -EINVAL; + + if (val_uA < params[i].min_val || val_uA > params[i].max_val) { + pr_err("Out of Range %d%s for %s\n", (int)val_uA, + params[i].units_str, + params[i].name); + return -ERANGE; + } + + regval_nA = (s64)val_uA * 1000; + regval_nA = div_s64((regval_nA + params[i].reg_to_unit_offset) + * params[i].reg_to_unit_divider, + params[i].reg_to_unit_multiplier); + buf[0] = regval_nA & 0xFF; + buf[1] = (regval_nA >> 8) & 0xFF; + + rc = qnovo5_write(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't write %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + + return count; +} + +static ssize_t voltage_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + int rc, i, regval_uV; + s64 regval_nV; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + rc = qnovo5_read(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't read %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + regval_nV = buf[1] << 8 | buf[0]; + regval_nV = div_s64(regval_nV * params[i].reg_to_unit_multiplier, + params[i].reg_to_unit_divider) + - params[i].reg_to_unit_offset; + + regval_uV = div_s64(regval_nV, 1000); + + return snprintf(ubuf, PAGE_SIZE, "%d\n", regval_uV); +} + +static ssize_t voltage_store(struct class *c, struct class_attribute *attr, + const char *ubuf, size_t count) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + u8 buf[2] = {0, 0}; + int rc, i; + unsigned long val_uV; + s64 regval_nV; + + if (kstrtoul(ubuf, 0, &val_uV)) + return -EINVAL; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + if (val_uV < params[i].min_val || val_uV > params[i].max_val) { + pr_err("Out of Range %d%s for %s\n", (int)val_uV, + params[i].units_str, + params[i].name); + return -ERANGE; + } + + regval_nV = (s64)val_uV * 1000; + regval_nV = div_s64((regval_nV + params[i].reg_to_unit_offset) + * params[i].reg_to_unit_divider, + params[i].reg_to_unit_multiplier); + buf[0] = regval_nV & 0xFF; + buf[1] = ((u64)regval_nV >> 8) & 0xFF; + + rc = qnovo5_write(chip, params[i].start_addr, buf, params[i].num_regs); + if (rc < 0) { + pr_err("Couldn't write %s rc = %d\n", params[i].name, rc); + return -EINVAL; + } + + return count; +} + +static ssize_t batt_prop_show(struct class *c, struct class_attribute *attr, + char *ubuf) +{ + struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + union power_supply_propval pval = {0}; + int i, prop, rc = -EINVAL; + + if (!is_batt_available(chip)) + return -EINVAL; + + i = __find_attr_idx(&attr->attr); + if (i < 0) + return -EINVAL; + + prop = params[i].start_addr; + + rc = power_supply_get_property(chip->batt_psy, prop, &pval); + if (rc < 0) { + pr_err("Couldn't read battery prop %s rc = %d\n", + params[i].name, rc); + return -EINVAL; + } + + return snprintf(ubuf, PAGE_SIZE, "%d\n", pval.intval); +} + +CLASS_ATTR_IDX_RW(fv_uV_request, val); +CLASS_ATTR_IDX_RW(fcc_uA_request, val); +CLASS_ATTR_IDX_RW(PE_CTRL_REG, reg); +CLASS_ATTR_IDX_RO(PTRAIN_STS_REG, reg); +CLASS_ATTR_IDX_RO(ERR_STS_REG, reg); +CLASS_ATTR_IDX_RW(PREST1_uS, time); +CLASS_ATTR_IDX_RW(NREST1_uS, time); +CLASS_ATTR_IDX_RW(NPULS1_uS, time); +CLASS_ATTR_IDX_RO(PPCNT, time); +CLASS_ATTR_IDX_RW(PPCNT_MAX, time); +CLASS_ATTR_IDX_RW(VLIM1_uV, voltage); +CLASS_ATTR_IDX_RO(PVOLT1_uV, voltage); +CLASS_ATTR_IDX_RO(PCURR1_uA, current); +CLASS_ATTR_IDX_RO(PCURR1_SUM_uA, current); +CLASS_ATTR_IDX_RW(PCURR1_TERMINAL_uA, current); +CLASS_ATTR_IDX_RO(PTTIME_S, time); +CLASS_ATTR_IDX_RW(PTTIME_MAX_S, time); +CLASS_ATTR_IDX_RW(NREST2_uS, time); +CLASS_ATTR_IDX_RW(NPULS2_uS, time); +CLASS_ATTR_IDX_RW(VLIM2_uV, voltage); +CLASS_ATTR_IDX_RO(PVOLT2_uV, voltage); +CLASS_ATTR_IDX_RO(RVOLT2_uV, voltage); +CLASS_ATTR_IDX_RO(PCURR2_uA, current); +CLASS_ATTR_IDX_RW(NREST3_uS, time); +CLASS_ATTR_IDX_RW(NPULS3_uS, time); +CLASS_ATTR_IDX_RO(RVOLT3_VMAX_uV, voltage); +CLASS_ATTR_IDX_RO(RVOLT3_VMAX_SNUM, time); +CLASS_ATTR_IDX_RO(VBATT_uV, batt_prop); +CLASS_ATTR_IDX_RO(IBATT_uA, batt_prop); +CLASS_ATTR_IDX_RO(BATTTEMP_deciDegC, batt_prop); +CLASS_ATTR_IDX_RO(BATTSOC, batt_prop); + +static struct attribute *qnovo_class_attrs[] = { + [VER] = &class_attr_version.attr, + [OK_TO_QNOVO] = &class_attr_ok_to_qnovo.attr, + [QNOVO_ENABLE] = &class_attr_qnovo_enable.attr, + [PT_ENABLE] = &class_attr_pt_enable.attr, + [FV_REQUEST] = &class_attr_fv_uV_request.attr, + [FCC_REQUEST] = &class_attr_fcc_uA_request.attr, + [PE_CTRL_REG] = &class_attr_PE_CTRL_REG.attr, + [PTRAIN_STS_REG] = &class_attr_PTRAIN_STS_REG.attr, + [ERR_STS_REG] = &class_attr_ERR_STS_REG.attr, + [PREST1] = &class_attr_PREST1_uS.attr, + [NREST1] = &class_attr_NREST1_uS.attr, + [NPULS1] = &class_attr_NPULS1_uS.attr, + [PPCNT] = &class_attr_PPCNT.attr, + [PPCNT_MAX] = &class_attr_PPCNT_MAX.attr, + [VLIM1] = &class_attr_VLIM1_uV.attr, + [PVOLT1] = &class_attr_PVOLT1_uV.attr, + [PCURR1] = &class_attr_PCURR1_uA.attr, + [PCURR1_SUM] = &class_attr_PCURR1_SUM_uA.attr, + [PCURR1_TERMINAL] = &class_attr_PCURR1_TERMINAL_uA.attr, + [PTTIME] = &class_attr_PTTIME_S.attr, + [PTTIME_MAX] = &class_attr_PTTIME_MAX_S.attr, + [NREST2] = &class_attr_NREST2_uS.attr, + [NPULS2] = &class_attr_NPULS2_uS.attr, + [VLIM2] = &class_attr_VLIM2_uV.attr, + [PVOLT2] = &class_attr_PVOLT2_uV.attr, + [RVOLT2] = &class_attr_RVOLT2_uV.attr, + [PCURR2] = &class_attr_PCURR2_uA.attr, + [NREST3] = &class_attr_NREST3_uS.attr, + [NPULS3] = &class_attr_NPULS3_uS.attr, + [RVOLT3_VMAX] = &class_attr_RVOLT3_VMAX_uV.attr, + [RVOLT3_VMAX_SNUM] = &class_attr_RVOLT3_VMAX_SNUM.attr, + [VBATT] = &class_attr_VBATT_uV.attr, + [IBATT] = &class_attr_IBATT_uA.attr, + [BATTTEMP] = &class_attr_BATTTEMP_deciDegC.attr, + [BATTSOC] = &class_attr_BATTSOC.attr, + NULL, +}; +ATTRIBUTE_GROUPS(qnovo_class); + +static int qnovo5_update_status(struct qnovo *chip) +{ + u8 val = 0; + int rc; + bool hw_ok_to_qnovo; + + rc = qnovo5_read(chip, QNOVO_ERROR_STS, &val, 1); + if (rc < 0) { + pr_err("Couldn't read error sts rc = %d\n", rc); + hw_ok_to_qnovo = false; + } else { + /* + * For CV mode keep qnovo enabled, userspace is expected to + * disable it after few runs + */ + hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? + true : false; + } + + vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER, + !hw_ok_to_qnovo, 0); + return 0; +} + +static void usb_debounce_work(struct work_struct *work) +{ + struct qnovo *chip = container_of(work, + struct qnovo, usb_debounce_work.work); + + vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0); + vote(chip->awake_votable, USB_READY_VOTER, false, 0); +} + +#define DEBOUNCE_MS 15000 /* 15 seconds */ +static void status_change_work(struct work_struct *work) +{ + struct qnovo *chip = container_of(work, + struct qnovo, status_change_work); + union power_supply_propval pval; + bool usb_present = false; + int rc; + + if (is_usb_available(chip)) { + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + usb_present = (rc < 0) ? 0 : pval.intval; + } + + if (chip->usb_present && !usb_present) { + /* removal */ + chip->usb_present = 0; + cancel_delayed_work_sync(&chip->usb_debounce_work); + vote(chip->awake_votable, USB_READY_VOTER, false, 0); + vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0); + } else if (!chip->usb_present && usb_present) { + /* insertion */ + chip->usb_present = 1; + vote(chip->awake_votable, USB_READY_VOTER, true, 0); + schedule_delayed_work(&chip->usb_debounce_work, + msecs_to_jiffies(DEBOUNCE_MS)); + } + + qnovo5_update_status(chip); +} + +static int qnovo_notifier_call(struct notifier_block *nb, + unsigned long ev, void *v) +{ + struct power_supply *psy = v; + struct qnovo *chip = container_of(nb, struct qnovo, nb); + + if (ev != PSY_EVENT_PROP_CHANGED) + return NOTIFY_OK; + + if (strcmp(psy->desc->name, "battery") == 0 + || strcmp(psy->desc->name, "bms") == 0 + || strcmp(psy->desc->name, "usb") == 0) + schedule_work(&chip->status_change_work); + + return NOTIFY_OK; +} + +static irqreturn_t handle_ptrain_done(int irq, void *data) +{ + struct qnovo *chip = data; + + qnovo5_update_status(chip); + + /* + * hw resets pt_en bit once ptrain_done triggers. + * vote on behalf of QNI to disable it such that + * once QNI enables it, the votable state changes + * and the callback that sets it is indeed invoked + */ + vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0); + + kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); + return IRQ_HANDLED; +} + +static int qnovo5_hw_init(struct qnovo *chip) +{ + int rc; + u8 val; + + vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0); + + vote(chip->disable_votable, USER_VOTER, true, 0); + + vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0); + vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0); + + /* allow charger error conditions to disable qnovo, CV mode excluded */ + val = ERR_JEITA_SOFT_CONDITION | ERR_BAT_OV | + ERR_BATTERY_MISSING | ERR_SAFETY_TIMER_EXPIRED | + ERR_CHARGING_DISABLED | ERR_JEITA_HARD_CONDITION; + rc = qnovo5_write(chip, QNOVO_ERROR_MASK, &val, 1); + if (rc < 0) { + pr_err("Couldn't write QNOVO_ERROR_MASK rc = %d\n", rc); + return rc; + } + + return 0; +} + +static int qnovo5_register_notifier(struct qnovo *chip) +{ + int rc; + + chip->nb.notifier_call = qnovo_notifier_call; + rc = power_supply_reg_notifier(&chip->nb); + if (rc < 0) { + pr_err("Couldn't register psy notifier rc = %d\n", rc); + return rc; + } + + return 0; +} + +static int qnovo5_determine_initial_status(struct qnovo *chip) +{ + status_change_work(&chip->status_change_work); + return 0; +} + +static int qnovo5_request_interrupts(struct qnovo *chip) +{ + int rc = 0; + int irq_ptrain_done = of_irq_get_byname(chip->dev->of_node, + "ptrain-done"); + + rc = devm_request_threaded_irq(chip->dev, irq_ptrain_done, NULL, + handle_ptrain_done, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "ptrain-done", chip); + if (rc < 0) { + pr_err("Couldn't request irq %d rc = %d\n", + irq_ptrain_done, rc); + return rc; + } + + enable_irq_wake(irq_ptrain_done); + + return rc; +} + +static int qnovo5_probe(struct platform_device *pdev) +{ + struct qnovo *chip; + int rc = 0; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->fv_uV_request = -EINVAL; + chip->fcc_uA_request = -EINVAL; + chip->dev = &pdev->dev; + mutex_init(&chip->write_lock); + + chip->regmap = dev_get_regmap(chip->dev->parent, NULL); + if (!chip->regmap) { + pr_err("parent regmap is missing\n"); + return -EINVAL; + } + + rc = qnovo5_parse_dt(chip); + if (rc < 0) { + pr_err("Couldn't parse device tree rc=%d\n", rc); + return rc; + } + + /* set driver data before resources request it */ + platform_set_drvdata(pdev, chip); + + chip->disable_votable = create_votable("QNOVO_DISABLE", VOTE_SET_ANY, + qnovo_disable_cb, chip); + if (IS_ERR(chip->disable_votable)) { + rc = PTR_ERR(chip->disable_votable); + chip->disable_votable = NULL; + goto cleanup; + } + + chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY, + pt_dis_votable_cb, chip); + if (IS_ERR(chip->pt_dis_votable)) { + rc = PTR_ERR(chip->pt_dis_votable); + chip->pt_dis_votable = NULL; + goto destroy_disable_votable; + } + + chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK", + VOTE_SET_ANY, + not_ok_to_qnovo_cb, chip); + if (IS_ERR(chip->not_ok_to_qnovo_votable)) { + rc = PTR_ERR(chip->not_ok_to_qnovo_votable); + chip->not_ok_to_qnovo_votable = NULL; + goto destroy_pt_dis_votable; + } + + chip->chg_ready_votable = create_votable("QNOVO_CHG_READY", + VOTE_SET_ANY, + chg_ready_cb, chip); + if (IS_ERR(chip->chg_ready_votable)) { + rc = PTR_ERR(chip->chg_ready_votable); + chip->chg_ready_votable = NULL; + goto destroy_not_ok_to_qnovo_votable; + } + + chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY, + awake_cb, chip); + if (IS_ERR(chip->awake_votable)) { + rc = PTR_ERR(chip->awake_votable); + chip->awake_votable = NULL; + goto destroy_chg_ready_votable; + } + + INIT_WORK(&chip->status_change_work, status_change_work); + INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work); + + rc = qnovo5_hw_init(chip); + if (rc < 0) { + pr_err("Couldn't initialize hardware rc=%d\n", rc); + goto destroy_awake_votable; + } + + rc = qnovo5_register_notifier(chip); + if (rc < 0) { + pr_err("Couldn't register psy notifier rc = %d\n", rc); + goto unreg_notifier; + } + + rc = qnovo5_determine_initial_status(chip); + if (rc < 0) { + pr_err("Couldn't determine initial status rc=%d\n", rc); + goto unreg_notifier; + } + + rc = qnovo5_request_interrupts(chip); + if (rc < 0) { + pr_err("Couldn't request interrupts rc=%d\n", rc); + goto unreg_notifier; + } + chip->qnovo_class.name = "qnovo", + chip->qnovo_class.owner = THIS_MODULE, + chip->qnovo_class.class_groups = qnovo_class_groups; + + rc = class_register(&chip->qnovo_class); + if (rc < 0) { + pr_err("couldn't register qnovo sysfs class rc = %d\n", rc); + goto unreg_notifier; + } + + device_init_wakeup(chip->dev, true); + + return rc; + +unreg_notifier: + power_supply_unreg_notifier(&chip->nb); +destroy_awake_votable: + destroy_votable(chip->awake_votable); +destroy_chg_ready_votable: + destroy_votable(chip->chg_ready_votable); +destroy_not_ok_to_qnovo_votable: + destroy_votable(chip->not_ok_to_qnovo_votable); +destroy_pt_dis_votable: + destroy_votable(chip->pt_dis_votable); +destroy_disable_votable: + destroy_votable(chip->disable_votable); +cleanup: + platform_set_drvdata(pdev, NULL); + return rc; +} + +static int qnovo5_remove(struct platform_device *pdev) +{ + struct qnovo *chip = platform_get_drvdata(pdev); + + class_unregister(&chip->qnovo_class); + power_supply_unreg_notifier(&chip->nb); + destroy_votable(chip->chg_ready_votable); + destroy_votable(chip->not_ok_to_qnovo_votable); + destroy_votable(chip->pt_dis_votable); + destroy_votable(chip->disable_votable); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static void qnovo5_shutdown(struct platform_device *pdev) +{ + struct qnovo *chip = platform_get_drvdata(pdev); + + vote(chip->not_ok_to_qnovo_votable, SHUTDOWN_VOTER, true, 0); +} + +static const struct of_device_id match_table[] = { + { .compatible = "qcom,qpnp-qnovo5", }, + { }, +}; + +static struct platform_driver qnovo5_driver = { + .driver = { + .name = "qcom,qnovo5-driver", + .owner = THIS_MODULE, + .of_match_table = match_table, + }, + .probe = qnovo5_probe, + .remove = qnovo5_remove, + .shutdown = qnovo5_shutdown, +}; +module_platform_driver(qnovo5_driver); + +MODULE_DESCRIPTION("QPNP Qnovo5 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index e1cdf902097faf6afab2622b27bcd69b52e226eb..87798d56de77c53ac029d547652de288a508c63b 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -73,6 +73,13 @@ static struct smb_params smb5_pmi632_params = { .max_u = 1000000, .step_u = 250000, }, + .dc_icl = { + .name = "DC input current limit", + .reg = DCDC_CFG_REF_MAX_PSNS_REG, + .min_u = 0, + .max_u = 1500000, + .step_u = 50000, + }, .jeita_cc_comp_hot = { .name = "jeita fcc reduction", .reg = JEITA_CCCOMP_CFG_HOT_REG, @@ -140,6 +147,13 @@ static struct smb_params smb5_pm8150b_params = { .max_u = 3000000, .step_u = 500000, }, + .dc_icl = { + .name = "DC input current limit", + .reg = DCDC_CFG_REF_MAX_PSNS_REG, + .min_u = 0, + .max_u = 1500000, + .step_u = 50000, + }, .jeita_cc_comp_hot = { .name = "jeita fcc reduction", .reg = JEITA_CCCOMP_CFG_HOT_REG, @@ -362,6 +376,16 @@ static int smb5_parse_dt(struct smb5 *chip) } } + rc = of_property_read_u32(node, "qcom,charger-temp-max", + &chg->charger_temp_max); + if (rc < 0) + chg->charger_temp_max = -EINVAL; + + rc = of_property_read_u32(node, "qcom,smb-temp-max", + &chg->smb_temp_max); + if (rc < 0) + chg->smb_temp_max = -EINVAL; + rc = of_property_read_u32(node, "qcom,float-option", &chip->dt.float_option); if (!rc && (chip->dt.float_option < 0 || chip->dt.float_option > 4)) { @@ -482,6 +506,7 @@ static enum power_supply_property smb5_usb_props[] = { POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, POWER_SUPPLY_PROP_SDP_CURRENT_MAX, POWER_SUPPLY_PROP_CONNECTOR_TYPE, + POWER_SUPPLY_PROP_CONNECTOR_HEALTH, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_SMB_EN_MODE, POWER_SUPPLY_PROP_SCOPE, @@ -594,6 +619,12 @@ static int smb5_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CONNECTOR_TYPE: val->intval = chg->connector_type; break; + case POWER_SUPPLY_PROP_CONNECTOR_HEALTH: + if (chg->connector_health == -EINVAL) + val->intval = smblib_get_prop_connector_health(chg); + else + val->intval = chg->connector_health; + break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_UNKNOWN; rc = smblib_get_prop_usb_present(chg, &pval); @@ -663,6 +694,10 @@ static int smb5_usb_set_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: rc = smblib_set_prop_sdp_current_max(chg, val); break; + case POWER_SUPPLY_PROP_CONNECTOR_HEALTH: + chg->connector_health = val->intval; + power_supply_changed(chg->usb_psy); + break; default: pr_err("set prop %d is not supported\n", psp); rc = -EINVAL; @@ -677,6 +712,7 @@ static int smb5_usb_prop_is_writeable(struct power_supply *psy, { switch (psp) { case POWER_SUPPLY_PROP_CTM_CURRENT_MAX: + case POWER_SUPPLY_PROP_CONNECTOR_HEALTH: return 1; default: break; @@ -942,6 +978,7 @@ static enum power_supply_property smb5_dc_props[] = { POWER_SUPPLY_PROP_INPUT_SUSPEND, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_REAL_TYPE, }; @@ -963,6 +1000,10 @@ static int smb5_dc_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: rc = smblib_get_prop_dc_online(chg, val); break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + rc = smblib_get_charge_param(chg, &chg->param.dc_icl, + &val->intval); + break; case POWER_SUPPLY_PROP_REAL_TYPE: val->intval = POWER_SUPPLY_TYPE_WIPOWER; break; @@ -989,6 +1030,10 @@ static int smb5_dc_set_prop(struct power_supply *psy, rc = vote(chg->dc_suspend_votable, WBC_VOTER, (bool)val->intval, 0); break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + rc = smblib_set_charge_param(chg, &chg->param.dc_icl, + val->intval); + break; default: return -EINVAL; } @@ -1049,6 +1094,7 @@ static enum power_supply_property smb5_batt_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX, @@ -1115,6 +1161,9 @@ static int smb5_batt_get_prop(struct power_supply *psy, if (pval.intval) rc = smblib_get_prop_charger_temp(chg, val); break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + val->intval = chg->charger_temp_max; + break; case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED: rc = smblib_get_prop_input_current_limited(chg, val); break; @@ -1477,8 +1526,9 @@ static int smb5_configure_micro_usb(struct smb_charger *chg) static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip) { + u8 *buf; int rc = 0; - int raw_hi_thresh, raw_lo_thresh; + s16 raw_hi_thresh, raw_lo_thresh; struct smb_charger *chg = &chip->chg; if (chip->dt.term_current_thresh_hi_ma < -10000 || @@ -1491,13 +1541,16 @@ static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip) /* * Conversion: - * raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000) + * raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000) + * Note: raw needs to be converted to big-endian format. */ if (chip->dt.term_current_thresh_hi_ma) { raw_hi_thresh = ((chip->dt.term_current_thresh_hi_ma * ADC_CHG_TERM_MASK) / 10000); raw_hi_thresh = sign_extend32(raw_hi_thresh, 15); + buf = (u8 *)&raw_hi_thresh; + raw_hi_thresh = buf[1] | (buf[0] << 8); rc = smblib_batch_write(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, (u8 *)&raw_hi_thresh, 2); @@ -1512,6 +1565,8 @@ static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip) raw_lo_thresh = ((chip->dt.term_current_thresh_lo_ma * ADC_CHG_TERM_MASK) / 10000); raw_lo_thresh = sign_extend32(raw_lo_thresh, 15); + buf = (u8 *)&raw_lo_thresh; + raw_lo_thresh = buf[1] | (buf[0] << 8); rc = smblib_batch_write(chg, CHGR_ADC_ITERM_LO_THD_MSB_REG, (u8 *)&raw_lo_thresh, 2); @@ -1561,6 +1616,26 @@ static int smb5_init_hw(struct smb5 *chip) smblib_get_charge_param(chg, &chg->param.usb_icl, &chg->default_icl_ua); + if (chg->charger_temp_max == -EINVAL) { + rc = smblib_get_thermal_threshold(chg, + DIE_REG_H_THRESHOLD_MSB_REG, + &chg->charger_temp_max); + if (rc < 0) { + dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n", + rc); + return rc; + } + } + + /* Disable SMB Temperature ADC INT */ + rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG, + THERMREG_SMB_ADC_SRC_EN_BIT, 0); + if (rc < 0) { + dev_err(chg->dev, "Couldn't configure SMB thermal regulation rc=%d\n", + rc); + return rc; + } + /* Use SW based VBUS control, disable HW autonomous mode */ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG, HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, @@ -1624,6 +1699,13 @@ static int smb5_init_hw(struct smb5 *chip) smblib_rerun_apsd_if_required(chg); } + /* clear the ICL override if it is set */ + rc = smblib_icl_override(chg, false); + if (rc < 0) { + pr_err("Couldn't disable ICL override rc=%d\n", rc); + return rc; + } + /* vote 0mA on usb_icl for non battery platforms */ vote(chg->usb_icl_votable, DEFAULT_VOTER, chip->dt.no_battery, 0); @@ -1644,6 +1726,14 @@ static int smb5_init_hw(struct smb5 *chip) vote(chg->usb_icl_votable, HW_LIMIT_VOTER, chg->hw_max_icl_ua > 0, chg->hw_max_icl_ua); + /* set DC icl_max 1A */ + rc = smblib_set_charge_param(chg, &chg->param.dc_icl, 1000000); + if (rc < 0) { + dev_err(chg->dev, + "Couldn't set dc_icl rc=%d\n", rc); + return rc; + } + /* * AICL configuration: * start from min and AICL ADC disable, and enable aicl rerun @@ -2398,6 +2488,7 @@ static int smb5_probe(struct platform_device *pdev) chg->mode = PARALLEL_MASTER; chg->irq_info = smb5_irqs; chg->die_health = -EINVAL; + chg->connector_health = -EINVAL; chg->otg_present = false; chg->regmap = dev_get_regmap(chg->dev->parent, NULL); diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index fc0ba98cf341f8f974eff7b4112d3b27d5be624f..c61df73d99218afc4bb748e13b412ca95e90a160 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -350,7 +350,7 @@ static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz) * PROP_BUCK_FREQ property - they could be running * with a fixed frequency */ - power_supply_set_property(chg->pl.psy, + rc = power_supply_set_property(chg->pl.psy, POWER_SUPPLY_PROP_BUCK_FREQ, &pval); } diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c index 59f2466a48ba97cd002a65ca50f3d23f130317cd..0a2f99199134d12685eeab05b4f29be81026d27c 100644 --- a/drivers/power/supply/qcom/smb1355-charger.c +++ b/drivers/power/supply/qcom/smb1355-charger.c @@ -170,6 +170,7 @@ struct smb1355 { struct pmic_revid_data *pmic_rev_id; int c_health; + int c_charger_temp_max; }; static bool is_secure(struct smb1355 *chip, int addr) @@ -427,23 +428,6 @@ static int smb1355_get_prop_charger_temp(struct smb1355 *chip, return rc; } -static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip, - union power_supply_propval *val) -{ - int rc; - - if (!chip->iio.temp_max_chan || - PTR_ERR(chip->iio.temp_max_chan) == -EPROBE_DEFER) - chip->iio.temp_max_chan = devm_iio_channel_get(chip->dev, - "charger_temp_max"); - if (IS_ERR(chip->iio.temp_max_chan)) - return PTR_ERR(chip->iio.temp_max_chan); - - rc = iio_read_channel_processed(chip->iio.temp_max_chan, &val->intval); - val->intval /= 100; - return rc; -} - static int smb1355_parallel_get_prop(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) @@ -470,7 +454,7 @@ static int smb1355_parallel_get_prop(struct power_supply *psy, rc = smb1355_get_prop_charger_temp(chip, val); break; case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: - rc = smb1355_get_prop_charger_temp_max(chip, val); + val->intval = chip->c_charger_temp_max; break; case POWER_SUPPLY_PROP_INPUT_SUSPEND: rc = smb1355_get_parallel_charging(chip, &val->intval); @@ -560,6 +544,9 @@ static int smb1355_parallel_set_prop(struct power_supply *psy, chip->c_health = val->intval; power_supply_changed(chip->parallel_psy); break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + chip->c_charger_temp_max = val->intval; + break; default: pr_debug("parallel power supply set prop %d not supported\n", prop); @@ -903,6 +890,7 @@ static int smb1355_probe(struct platform_device *pdev) chip->dev = &pdev->dev; chip->param = v1_params; chip->c_health = -EINVAL; + chip->c_charger_temp_max = -EINVAL; chip->name = "smb1355"; mutex_init(&chip->write_lock); diff --git a/drivers/power/supply/qcom/smb1390-charger.c b/drivers/power/supply/qcom/smb1390-charger.c index 6ba5fffee61574e6fcd86c4c62f03e218c94afbb..a5ea43bf3f6d1926439dbb5e2487c6cd3d485410 100644 --- a/drivers/power/supply/qcom/smb1390-charger.c +++ b/drivers/power/supply/qcom/smb1390-charger.c @@ -67,6 +67,15 @@ #define CORE_FTRIM_ILIM_REG 0x1030 #define CFG_ILIM_MASK GENMASK(4, 0) +#define CORE_FTRIM_LVL_REG 0x1033 +#define CFG_WIN_HI_MASK GENMASK(3, 2) +#define WIN_OV_LVL_1000MV 0x08 + +#define CORE_FTRIM_MISC_REG 0x1034 +#define TR_WIN_1P5X_BIT BIT(0) +#define WINDOW_DETECTION_DELTA_X1P0 0 +#define WINDOW_DETECTION_DELTA_X1P5 1 + #define CP_VOTER "CP_VOTER" #define USER_VOTER "USER_VOTER" #define ILIM_VOTER "ILIM_VOTER" @@ -550,11 +559,30 @@ static void smb1390_destroy_votables(struct smb1390 *chip) static int smb1390_init_hw(struct smb1390 *chip) { + int rc; + /* * charge pump is initially disabled; this indirectly votes to allow * traditional parallel charging if present */ vote(chip->disable_votable, USER_VOTER, true, 0); + + /* + * Improve ILIM accuracy: + * - Configure window (Vin - 2Vout) OV level to 1000mV + * - Configure VOUT tracking value to 1.0 + */ + rc = smb1390_masked_write(chip, CORE_FTRIM_LVL_REG, + CFG_WIN_HI_MASK, WIN_OV_LVL_1000MV); + if (rc < 0) + return rc; + + rc = smb1390_masked_write(chip, CORE_FTRIM_MISC_REG, + TR_WIN_1P5X_BIT, WINDOW_DETECTION_DELTA_X1P0); + if (rc < 0) + return rc; + + return 0; } diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 2d63bfe57a89720942608109ce9558715fa68ade..2b1cb6ea0107368cde1bdb820c226af93cb08667 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -117,6 +117,19 @@ int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua) return 0; } +int smblib_icl_override(struct smb_charger *chg, bool override) +{ + int rc; + + rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG, + ICL_OVERRIDE_AFTER_APSD_BIT, + override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0); + if (rc < 0) + smblib_err(chg, "Couldn't override ICL rc=%d\n", rc); + + return rc; +} + static int smblib_select_sec_charger(struct smb_charger *chg, int sec_chg) { int rc; @@ -126,6 +139,8 @@ static int smblib_select_sec_charger(struct smb_charger *chg, int sec_chg) switch (sec_chg) { case POWER_SUPPLY_CHARGER_SEC_CP: + vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0); + /* select Charge Pump instead of slave charger */ rc = smblib_masked_write(chg, MISC_SMB_CFG_REG, SMB_EN_SEL_BIT, SMB_EN_SEL_BIT); @@ -158,9 +173,14 @@ static int smblib_select_sec_charger(struct smb_charger *chg, int sec_chg) rc); return rc; } + + vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, false, 0); + break; case POWER_SUPPLY_CHARGER_SEC_NONE: default: + vote(chg->pl_disable_votable, PL_SMB_EN_VOTER, true, 0); + /* SW override, disabling secondary charger(s) */ rc = smblib_write(chg, MISC_SMB_EN_CMD_REG, SMB_EN_OVERRIDE_BIT); @@ -252,6 +272,59 @@ int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend) return rc; } + +static const s16 therm_lookup_table[] = { + /* Index -30C~85C, ADC raw code */ + 0x6C92, 0x6C43, 0x6BF0, 0x6B98, 0x6B3A, 0x6AD8, 0x6A70, 0x6A03, + 0x6990, 0x6916, 0x6897, 0x6811, 0x6785, 0x66F2, 0x6658, 0x65B7, + 0x650F, 0x6460, 0x63AA, 0x62EC, 0x6226, 0x6159, 0x6084, 0x5FA8, + 0x5EC3, 0x5DD8, 0x5CE4, 0x5BE9, 0x5AE7, 0x59DD, 0x58CD, 0x57B5, + 0x5696, 0x5571, 0x5446, 0x5314, 0x51DD, 0x50A0, 0x4F5E, 0x4E17, + 0x4CCC, 0x4B7D, 0x4A2A, 0x48D4, 0x477C, 0x4621, 0x44C4, 0x4365, + 0x4206, 0x40A6, 0x3F45, 0x3DE6, 0x3C86, 0x3B28, 0x39CC, 0x3872, + 0x3719, 0x35C4, 0x3471, 0x3322, 0x31D7, 0x308F, 0x2F4C, 0x2E0D, + 0x2CD3, 0x2B9E, 0x2A6E, 0x2943, 0x281D, 0x26FE, 0x25E3, 0x24CF, + 0x23C0, 0x22B8, 0x21B5, 0x20B8, 0x1FC2, 0x1ED1, 0x1DE6, 0x1D01, + 0x1C22, 0x1B49, 0x1A75, 0x19A8, 0x18E0, 0x181D, 0x1761, 0x16A9, + 0x15F7, 0x154A, 0x14A2, 0x13FF, 0x1361, 0x12C8, 0x1234, 0x11A4, + 0x1119, 0x1091, 0x100F, 0x0F90, 0x0F15, 0x0E9E, 0x0E2B, 0x0DBC, + 0x0D50, 0x0CE8, 0x0C83, 0x0C21, 0x0BC3, 0x0B67, 0x0B0F, 0x0AB9, + 0x0A66, 0x0A16, 0x09C9, 0x097E, +}; + +int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val) +{ + u8 buff[2]; + s16 temp; + int rc = 0; + int i, lower, upper; + + rc = smblib_batch_read(chg, addr, buff, 2); + if (rc < 0) { + pr_err("failed to write to 0x%04X, rc=%d\n", addr, rc); + return rc; + } + + temp = buff[1] | buff[0] << 8; + + lower = 0; + upper = ARRAY_SIZE(therm_lookup_table) - 1; + while (lower <= upper) { + i = (upper + lower) / 2; + if (therm_lookup_table[i] < temp) + upper = i - 1; + else if (therm_lookup_table[i] > temp) + lower = i + 1; + else + break; + } + + /* index 0 corresonds to -30C */ + *val = (i - 30) * 10; + + return rc; +} + struct apsd_result { const char * const name; const u8 bit; @@ -916,7 +989,7 @@ static int get_sdp_current(struct smb_charger *chg, int *icl_ua) int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) { int rc = 0; - bool hc_mode = false; + bool hc_mode = false, override = false; /* suspend and return if 25mA or less is requested */ if (icl_ua <= USBIN_25MA) @@ -943,6 +1016,13 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) goto out; } hc_mode = true; + + /* + * Micro USB mode follows ICL register independent of override + * bit, configure override only for typeC mode. + */ + if (chg->connector_type == POWER_SUPPLY_CONNECTOR_TYPEC) + override = true; } set_mode: @@ -953,6 +1033,12 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) goto out; } + rc = smblib_icl_override(chg, override); + if (rc < 0) { + smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc); + goto out; + } + /* unsuspend after configuring current and override */ rc = smblib_set_usb_suspend(chg, false); if (rc < 0) { @@ -1475,7 +1561,7 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg, union power_supply_propval *val) { int rc, temp; - u8 stat; + u8 stat, buf[2]; /* * Currently, only ADC comparator-based termination is supported, @@ -1494,8 +1580,7 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg, return 0; } - rc = smblib_batch_read(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, - (u8 *)&temp, 2); + rc = smblib_batch_read(chg, CHGR_ADC_ITERM_UP_THD_MSB_REG, buf, 2); if (rc < 0) { smblib_err(chg, "Couldn't read CHGR_ADC_ITERM_UP_THD_MSB_REG rc=%d\n", @@ -1503,6 +1588,7 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg, return rc; } + temp = buf[1] | (buf[0] << 8); temp = sign_extend32(temp, 15); temp = DIV_ROUND_CLOSEST(temp * 10000, ADC_CHG_TERM_MASK); val->intval = temp; @@ -2238,6 +2324,30 @@ int smblib_get_prop_die_health(struct smb_charger *chg, return 0; } +int smblib_get_prop_connector_health(struct smb_charger *chg) +{ + int rc; + u8 stat; + + rc = smblib_read(chg, CONNECTOR_TEMP_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read CONNECTOR_TEMP_STATUS_REG, rc=%d\n", + rc); + return POWER_SUPPLY_HEALTH_UNKNOWN; + } + + if (stat & CONNECTOR_TEMP_RST_BIT) + return POWER_SUPPLY_HEALTH_OVERHEAT; + + if (stat & CONNECTOR_TEMP_UB_BIT) + return POWER_SUPPLY_HEALTH_HOT; + + if (stat & CONNECTOR_TEMP_LB_BIT) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; +} + #define SDP_CURRENT_UA 500000 #define CDP_CURRENT_UA 1500000 #define DCP_CURRENT_UA 1500000 @@ -3339,7 +3449,6 @@ static void typec_src_removal(struct smb_charger *chg) vote(chg->usb_icl_votable, PD_VOTER, false, 0); vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0); vote(chg->usb_icl_votable, DCP_VOTER, false, 0); - vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0); vote(chg->usb_icl_votable, OTG_VOTER, false, 0); vote(chg->usb_icl_votable, CTM_VOTER, false, 0); @@ -3741,8 +3850,31 @@ static void bms_update_work(struct work_struct *work) static void pl_update_work(struct work_struct *work) { + union power_supply_propval prop_val; struct smb_charger *chg = container_of(work, struct smb_charger, pl_update_work); + int rc; + + if (chg->smb_temp_max == -EINVAL) { + rc = smblib_get_thermal_threshold(chg, + SMB_REG_H_THRESHOLD_MSB_REG, + &chg->smb_temp_max); + if (rc < 0) { + dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n", + rc); + return; + } + } + + prop_val.intval = chg->smb_temp_max; + rc = power_supply_set_property(chg->pl.psy, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, + &prop_val); + if (rc < 0) { + dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_CHARGER_TEMP_MAX rc=%d\n", + rc); + return; + } if (chg->sec_chg_selected == POWER_SUPPLY_CHARGER_SEC_CP) return; @@ -3988,6 +4120,7 @@ static void smblib_iio_deinit(struct smb_charger *chg) int smblib_init(struct smb_charger *chg) { + union power_supply_propval prop_val; int rc = 0; mutex_init(&chg->lock); @@ -4034,13 +4167,36 @@ int smblib_init(struct smb_charger *chg) if (chg->sec_pl_present) { chg->pl.psy = power_supply_get_by_name("parallel"); - if (chg->sec_chg_selected != POWER_SUPPLY_CHARGER_SEC_CP - && chg->pl.psy) { - rc = smblib_select_sec_charger(chg, + if (chg->pl.psy) { + if (chg->sec_chg_selected + != POWER_SUPPLY_CHARGER_SEC_CP) { + rc = smblib_select_sec_charger(chg, POWER_SUPPLY_CHARGER_SEC_PL); + if (rc < 0) { + smblib_err(chg, "Couldn't config pl charger rc=%d\n", + rc); + return rc; + } + } + + if (chg->smb_temp_max == -EINVAL) { + rc = smblib_get_thermal_threshold(chg, + SMB_REG_H_THRESHOLD_MSB_REG, + &chg->smb_temp_max); + if (rc < 0) { + dev_err(chg->dev, "Couldn't get charger_temp_max rc=%d\n", + rc); + return rc; + } + } + + prop_val.intval = chg->smb_temp_max; + rc = power_supply_set_property(chg->pl.psy, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, + &prop_val); if (rc < 0) { - smblib_err(chg, "Couldn't config pl charger rc=%d\n", - rc); + dev_err(chg->dev, "Couldn't set POWER_SUPPLY_PROP_CHARGER_TEMP_MAX rc=%d\n", + rc); return rc; } } diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h index 8b33ec346dd66fea4eb5e32a8e9d992c61d442f4..f65dae81ed4cda8373ecc421c93a092be2a66747 100644 --- a/drivers/power/supply/qcom/smb5-lib.h +++ b/drivers/power/supply/qcom/smb5-lib.h @@ -33,22 +33,13 @@ enum print_reason { #define PD_VOTER "PD_VOTER" #define DCP_VOTER "DCP_VOTER" #define QC_VOTER "QC_VOTER" -#define PL_USBIN_USBIN_VOTER "PL_USBIN_USBIN_VOTER" #define USB_PSY_VOTER "USB_PSY_VOTER" #define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER" -#define PL_QNOVO_VOTER "PL_QNOVO_VOTER" #define USBIN_V_VOTER "USBIN_V_VOTER" #define CHG_STATE_VOTER "CHG_STATE_VOTER" -#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER" #define TAPER_END_VOTER "TAPER_END_VOTER" #define THERMAL_DAEMON_VOTER "THERMAL_DAEMON_VOTER" -#define CC_DETACHED_VOTER "CC_DETACHED_VOTER" -#define APSD_VOTER "APSD_VOTER" -#define PD_DISALLOWED_INDIRECT_VOTER "PD_DISALLOWED_INDIRECT_VOTER" -#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER" -#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER" #define BOOST_BACK_VOTER "BOOST_BACK_VOTER" -#define USBIN_USBIN_BOOST_VOTER "USBIN_USBIN_BOOST_VOTER" #define MICRO_USB_VOTER "MICRO_USB_VOTER" #define DEBUG_BOARD_VOTER "DEBUG_BOARD_VOTER" #define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER" @@ -66,6 +57,7 @@ enum print_reason { #define PL_FCC_LOW_VOTER "PL_FCC_LOW_VOTER" #define WBC_VOTER "WBC_VOTER" #define HW_LIMIT_VOTER "HW_LIMIT_VOTER" +#define PL_SMB_EN_VOTER "PL_SMB_EN_VOTER" #define BOOST_BACK_STORM_COUNT 3 #define WEAK_CHG_STORM_COUNT 8 @@ -243,6 +235,7 @@ struct smb_params { struct smb_chg_param icl_max_stat; struct smb_chg_param icl_stat; struct smb_chg_param otg_cl; + struct smb_chg_param dc_icl; struct smb_chg_param jeita_cc_comp_hot; struct smb_chg_param jeita_cc_comp_cold; struct smb_chg_param freq_switcher; @@ -368,6 +361,8 @@ struct smb_charger { int auto_recharge_soc; enum sink_src_mode sink_src_mode; bool jeita_configured; + int charger_temp_max; + int smb_temp_max; /* workaround flag */ u32 wa_flags; @@ -384,6 +379,7 @@ struct smb_charger { int pulse_cnt; int die_health; + int connector_health; /* flash */ u32 flash_derating_soc; @@ -522,6 +518,7 @@ int smblib_get_prop_charger_temp(struct smb_charger *chg, union power_supply_propval *val); int smblib_get_prop_die_health(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_prop_connector_health(struct smb_charger *chg); int smblib_set_prop_pd_current_max(struct smb_charger *chg, const union power_supply_propval *val); int smblib_set_prop_sdp_current_max(struct smb_charger *chg, @@ -544,6 +541,7 @@ void smblib_suspend_on_debug_battery(struct smb_charger *chg); int smblib_rerun_apsd_if_required(struct smb_charger *chg); int smblib_get_prop_fcc_delta(struct smb_charger *chg, union power_supply_propval *val); +int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val); int smblib_dp_dm(struct smb_charger *chg, int val); int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable); int smblib_rerun_aicl(struct smb_charger *chg); @@ -555,6 +553,7 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg, int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg, const union power_supply_propval *val); int smblib_configure_hvdcp_apsd(struct smb_charger *chg, bool enable); +int smblib_icl_override(struct smb_charger *chg, bool override); int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h index 20a864e513e65f04696598bc40d8b40fbdb5609e..d25d484681ed44eb022aa4a8a5316cc8e3a05ce4 100644 --- a/drivers/power/supply/qcom/smb5-reg.h +++ b/drivers/power/supply/qcom/smb5-reg.h @@ -138,6 +138,8 @@ enum { #define DCDC_OTG_CFG_REG (DCDC_BASE + 0x53) #define OTG_EN_SRC_CFG_BIT BIT(1) +#define DCDC_CFG_REF_MAX_PSNS_REG (DCDC_BASE + 0x8C) + /******************************** * BATIF Peripheral Registers * ********************************/ @@ -353,6 +355,12 @@ enum { #define TEMP_BELOW_RANGE_BIT BIT(1) #define THERMREG_DISABLED_BIT BIT(0) +#define CONNECTOR_TEMP_STATUS_REG (MISC_BASE + 0x09) +#define CONNECTOR_TEMP_SHDN_BIT BIT(3) +#define CONNECTOR_TEMP_RST_BIT BIT(2) +#define CONNECTOR_TEMP_UB_BIT BIT(1) +#define CONNECTOR_TEMP_LB_BIT BIT(0) + #define BARK_BITE_WDOG_PET_REG (MISC_BASE + 0x43) #define BARK_BITE_WDOG_PET_BIT BIT(0) @@ -379,6 +387,9 @@ enum { #define AICL_RERUN_TIME_CFG_REG (MISC_BASE + 0x61) #define AICL_RERUN_TIME_12S_VAL 0x01 +#define MISC_THERMREG_SRC_CFG_REG (MISC_BASE + 0x70) +#define THERMREG_SMB_ADC_SRC_EN_BIT BIT(5) + #define MISC_SMB_CFG_REG (MISC_BASE + 0x90) #define SMB_EN_SEL_BIT BIT(4) #define CP_EN_POLARITY_CFG_BIT BIT(3) @@ -386,4 +397,8 @@ enum { #define STAT_FUNCTION_CFG_BIT BIT(1) #define STAT_IRQ_PULSING_EN_BIT BIT(0) +#define DIE_REG_H_THRESHOLD_MSB_REG (MISC_BASE + 0xA0) + +#define SMB_REG_H_THRESHOLD_MSB_REG (MISC_BASE + 0XBC) + #endif /* __SMB5_CHARGER_REG_H */ diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c index a75cbbbce56f6de9347e060db16b3fb9c5516847..b87e19ca11e86f8878650d54d4ec8bd3ff79ceea 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.c +++ b/drivers/power/supply/qcom/step-chg-jeita.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,7 +20,6 @@ #include #include "step-chg-jeita.h" -#define MAX_STEP_CHG_ENTRIES 8 #define STEP_CHG_VOTER "STEP_CHG_VOTER" #define JEITA_VOTER "JEITA_VOTER" @@ -30,12 +29,6 @@ || ((left) <= (right) && (left) <= (value) \ && (value) <= (right))) -struct range_data { - u32 low_threshold; - u32 high_threshold; - u32 value; -}; - struct step_chg_cfg { u32 psy_prop; char *prop_name; @@ -118,12 +111,17 @@ static bool is_bms_available(struct step_chg_info *chip) return true; } -static int read_range_data_from_node(struct device_node *node, +int read_range_data_from_node(struct device_node *node, const char *prop_str, struct range_data *ranges, u32 max_threshold, u32 max_value) { int rc = 0, i, length, per_tuple_length, tuples; + if (!node || !prop_str || !ranges) { + pr_err("Invalid parameters passed\n"); + return -EINVAL; + } + rc = of_property_count_elems_of_size(node, prop_str, sizeof(u32)); if (rc < 0) { pr_err("Count %s failed, rc=%d\n", prop_str, rc); @@ -184,6 +182,7 @@ static int read_range_data_from_node(struct device_node *node, memset(ranges, 0, tuples * sizeof(struct range_data)); return rc; } +EXPORT_SYMBOL(read_range_data_from_node); static int get_step_chg_jeita_setting_from_profile(struct step_chg_info *chip) { @@ -348,17 +347,46 @@ static int get_val(struct range_data *range, int hysteresis, int current_index, int i; *new_index = -EINVAL; - /* first find the matching index without hysteresis */ - for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++) + + /* + * If the threshold is lesser than the minimum allowed range, + * return -ENODATA. + */ + if (threshold < range[0].low_threshold) + return -ENODATA; + + /* First try to find the matching index without hysteresis */ + for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++) { + if (!range[i].high_threshold && !range[i].low_threshold) { + /* First invalid table entry; exit loop */ + break; + } + if (is_between(range[i].low_threshold, range[i].high_threshold, threshold)) { *new_index = i; *val = range[i].value; + break; } + } + + /* + * If nothing was found, the threshold exceeds the max range for sure + * as the other case where it is lesser than the min range is handled + * at the very beginning of this function. Therefore, clip it to the + * max allowed range value, which is the one corresponding to the last + * valid entry in the battery profile data array. + */ + if (*new_index == -EINVAL) { + if (i == 0) { + /* Battery profile data array is completely invalid */ + return -ENODATA; + } + + *new_index = (i - 1); + *val = range[*new_index].value; + } - /* if nothing was found, return -ENODATA */ - if (*new_index == -EINVAL) - return -ENODATA; /* * If we don't have a current_index return this * newfound value. There is no hysterisis from out of range diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h index 2404b866b94ef9d75e46b2f6098deddd083b25ba..6760d66143f16ae5871117286eb5fc91cfec4e15 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.h +++ b/drivers/power/supply/qcom/step-chg-jeita.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -12,7 +12,19 @@ #ifndef __STEP_CHG_H__ #define __STEP_CHG_H__ + +#define MAX_STEP_CHG_ENTRIES 8 + +struct range_data { + u32 low_threshold; + u32 high_threshold; + u32 value; +}; + int qcom_step_chg_init(struct device *dev, bool step_chg_enable, bool sw_jeita_enable); void qcom_step_chg_deinit(void); +int read_range_data_from_node(struct device_node *node, + const char *prop_str, struct range_data *ranges, + u32 max_threshold, u32 max_value); #endif /* __STEP_CHG_H__ */ diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index 5d6ed1507d29284f2ba28f2cc781f4b797067f01..5561b9e190f84a63513ff3b86ecbeef7461404e8 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -74,6 +74,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev) return pwm_lpss_remove(lpwm); } +static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops, + pwm_lpss_suspend, + pwm_lpss_resume); + static const struct acpi_device_id pwm_lpss_acpi_match[] = { { "80860F09", (unsigned long)&pwm_lpss_byt_info }, { "80862288", (unsigned long)&pwm_lpss_bsw_info }, @@ -86,6 +90,7 @@ static struct platform_driver pwm_lpss_driver_platform = { .driver = { .name = "pwm-lpss", .acpi_match_table = pwm_lpss_acpi_match, + .pm = &pwm_lpss_platform_pm_ops, }, .probe = pwm_lpss_probe_platform, .remove = pwm_lpss_remove_platform, diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 8db0d40ccacde84a61d292936f6bbdeeed7ac358..4721a264bac2580cf8d21ee54396e0b494f1c9dc 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -32,10 +32,13 @@ /* Size of each PWM register space if multiple */ #define PWM_SIZE 0x400 +#define MAX_PWMS 4 + struct pwm_lpss_chip { struct pwm_chip chip; void __iomem *regs; const struct pwm_lpss_boardinfo *info; + u32 saved_ctrl[MAX_PWMS]; }; static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) @@ -177,6 +180,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, unsigned long c; int ret; + if (WARN_ON(info->npwm > MAX_PWMS)) + return ERR_PTR(-ENODEV); + lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL); if (!lpwm) return ERR_PTR(-ENOMEM); @@ -212,6 +218,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) } EXPORT_SYMBOL_GPL(pwm_lpss_remove); +int pwm_lpss_suspend(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + int i; + + for (i = 0; i < lpwm->info->npwm; i++) + lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM); + + return 0; +} +EXPORT_SYMBOL_GPL(pwm_lpss_suspend); + +int pwm_lpss_resume(struct device *dev) +{ + struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev); + int i; + + for (i = 0; i < lpwm->info->npwm; i++) + writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM); + + return 0; +} +EXPORT_SYMBOL_GPL(pwm_lpss_resume); + MODULE_DESCRIPTION("PWM driver for Intel LPSS"); MODULE_AUTHOR("Mika Westerberg "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index 98306bb02cfe71c0775eb430e7cf623fdc431889..7a4238ad1fcb1f25390032019170759c6666ae83 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -28,5 +28,7 @@ struct pwm_lpss_boardinfo { struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, const struct pwm_lpss_boardinfo *info); int pwm_lpss_remove(struct pwm_lpss_chip *lpwm); +int pwm_lpss_suspend(struct device *dev); +int pwm_lpss_resume(struct device *dev); #endif /* __PWM_LPSS_H */ diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c index 28859e35efd98d4dec40f13e03682322c2bf938e..fe543aecdcd6189430b8153d402447129821b1c3 100644 --- a/drivers/regulator/qpnp-lcdb-regulator.c +++ b/drivers/regulator/qpnp-lcdb-regulator.c @@ -167,6 +167,8 @@ #define PM660_BST_HEADROOM_DEFAULT_MV 200 #define BST_HEADROOM_DEFAULT_MV 150 +#define PMIC5_LCDB_OFF_ON_DELAY_US 20000 + struct ldo_regulator { struct regulator_desc rdesc; struct regulator_dev *rdev; @@ -1340,22 +1342,27 @@ static struct regulator_ops qpnp_lcdb_ncp_ops = { static int qpnp_lcdb_regulator_register(struct qpnp_lcdb *lcdb, u8 type) { - int rc = 0; + int rc = 0, off_on_delay = 0; struct regulator_init_data *init_data; struct regulator_config cfg = {}; struct regulator_desc *rdesc; struct regulator_dev *rdev; struct device_node *node; + if (lcdb->pmic_rev_id->pmic_subtype != PM660L_SUBTYPE) + off_on_delay = PMIC5_LCDB_OFF_ON_DELAY_US; + if (type == LDO) { node = lcdb->ldo.node; rdesc = &lcdb->ldo.rdesc; rdesc->ops = &qpnp_lcdb_ldo_ops; + rdesc->off_on_delay = off_on_delay; rdev = lcdb->ldo.rdev; } else if (type == NCP) { node = lcdb->ncp.node; rdesc = &lcdb->ncp.rdesc; rdesc->ops = &qpnp_lcdb_ncp_ops; + rdesc->off_on_delay = off_on_delay; rdev = lcdb->ncp.rdev; } else { pr_err("Invalid regulator type %d\n", type); diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c index af8e96f2ad8079f80ba0b5392e09daa8c8065a87..6778d7a27b803ed33afe80f36a28e03e2be268bd 100644 --- a/drivers/regulator/spm-regulator.c +++ b/drivers/regulator/spm-regulator.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -52,6 +52,7 @@ enum qpnp_regulator_uniq_type { QPNP_TYPE_FTS2p5, QPNP_TYPE_FTS426, QPNP_TYPE_ULT_HF, + QPNP_TYPE_HFS430, }; enum qpnp_regulator_type { @@ -68,6 +69,7 @@ enum qpnp_regulator_subtype { QPNP_FTS2p5_SUBTYPE = 0x09, QPNP_FTS426_SUBTYPE = 0x0A, QPNP_ULT_HF_SUBTYPE = 0x0D, + QPNP_HFS430_SUBTYPE = 0x0A, }; enum qpnp_logical_mode { @@ -82,6 +84,7 @@ static const struct voltage_range fts2p5_range0 static const struct voltage_range fts2p5_range1 = {160000, 700000, 2200000, 10000}; static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000}; +static const struct voltage_range hfs430_range = {0, 320000, 2040000, 8000}; static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500, 12500}; static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000, @@ -98,11 +101,11 @@ static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000, #define QPNP_SMPS_REG_STEP_CTRL 0x61 #define QPNP_SMPS_REG_UL_LL_CTRL 0x68 -/* FTS426 voltage control registers */ -#define QPNP_FTS426_REG_VOLTAGE_LB 0x40 -#define QPNP_FTS426_REG_VOLTAGE_UB 0x41 -#define QPNP_FTS426_REG_VOLTAGE_VALID_LB 0x42 -#define QPNP_FTS426_REG_VOLTAGE_VALID_UB 0x43 +/* FTS426/HFS430 voltage control registers */ +#define QPNP_FTS426_HFS430_REG_VOLTAGE_LB 0x40 +#define QPNP_FTS426_HFS430_REG_VOLTAGE_UB 0x41 +#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB 0x42 +#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_UB 0x43 /* HF voltage limit registers */ #define QPNP_HF_REG_VOLTAGE_ULS 0x69 @@ -112,9 +115,9 @@ static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000, #define QPNP_FTS_REG_VOLTAGE_ULS_VALID 0x6A #define QPNP_FTS_REG_VOLTAGE_LLS_VALID 0x6C -/* FTS426 voltage limit registers */ -#define QPNP_FTS426_REG_VOLTAGE_ULS_LB 0x68 -#define QPNP_FTS426_REG_VOLTAGE_ULS_UB 0x69 +/* FTS426/HFS430 voltage limit registers */ +#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB 0x68 +#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_UB 0x69 /* Common regulator UL & LL limits control register layout */ #define QPNP_COMMON_UL_EN_MASK 0x80 @@ -122,19 +125,20 @@ static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000, #define QPNP_SMPS_MODE_PWM 0x80 #define QPNP_SMPS_MODE_AUTO 0x40 -#define QPNP_FTS426_MODE_PWM 0x07 -#define QPNP_FTS426_MODE_AUTO 0x06 +#define QPNP_FTS426_HFS430_MODE_PWM 0x07 +#define QPNP_FTS426_HFS430_MODE_AUTO 0x06 #define QPNP_SMPS_STEP_CTRL_STEP_MASK 0x18 #define QPNP_SMPS_STEP_CTRL_STEP_SHIFT 3 #define QPNP_SMPS_STEP_CTRL_DELAY_MASK 0x07 #define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT 0 -#define QPNP_FTS426_STEP_CTRL_DELAY_MASK 0x03 -#define QPNP_FTS426_STEP_CTRL_DELAY_SHIFT 0 +#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK 0x03 +#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT 0 /* Clock rate in kHz of the FTS2 regulator reference clock. */ #define QPNP_SMPS_CLOCK_RATE 19200 #define QPNP_FTS426_CLOCK_RATE 4800 +#define QPNP_HFS430_CLOCK_RATE 1600 /* Time to delay in us to ensure that a mode change has completed. */ #define QPNP_FTS2_MODE_CHANGE_DELAY 50 @@ -145,7 +149,7 @@ static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000, /* Minimum voltage stepper delay for each step. */ #define QPNP_FTS2_STEP_DELAY 8 #define QPNP_HF_STEP_DELAY 20 -#define QPNP_FTS426_STEP_DELAY 2 +#define QPNP_FTS426_HFS430_STEP_DELAY 2 /* Arbitrarily large max step size used to avoid possible numerical overflow */ #define SPM_REGULATOR_MAX_STEP_UV 10000000 @@ -156,8 +160,8 @@ static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000, */ #define QPNP_FTS2_STEP_MARGIN_NUM 4 #define QPNP_FTS2_STEP_MARGIN_DEN 5 -#define QPNP_FTS426_STEP_MARGIN_NUM 10 -#define QPNP_FTS426_STEP_MARGIN_DEN 11 +#define QPNP_FTS426_HFS430_STEP_MARGIN_NUM 10 +#define QPNP_FTS426_HFS430_STEP_MARGIN_DEN 11 /* * Settling delay for FTS2.5 @@ -204,7 +208,8 @@ static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV) { int vlevel; - if (vreg->regulator_type == QPNP_TYPE_FTS426) + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) return roundup(uV, vreg->range->step_uV) / 1000; vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV); @@ -221,7 +226,8 @@ static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV) static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel) { - if (vreg->regulator_type == QPNP_TYPE_FTS426) + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) return vlevel * 1000; /* * Calculate ULT HF buck VSET based on range: @@ -243,6 +249,10 @@ static unsigned spm_regulator_vlevel_to_selector(struct spm_vreg *vreg, && vreg->range == &ult_hf_range1) vlevel &= ~ULT_SMPS_RANGE_SPLIT; + if (vreg->regulator_type == QPNP_TYPE_HFS430) + vlevel = spm_regulator_vlevel_to_uv(vreg, vlevel) + / vreg->range->step_uV; + return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV) / vreg->range->step_uV; } @@ -252,10 +262,12 @@ static int qpnp_smps_read_voltage(struct spm_vreg *vreg) int rc; u8 val[2] = {0}; - if (vreg->regulator_type == QPNP_TYPE_FTS426) { + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) { rc = regmap_bulk_read(vreg->regmap, - vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_VALID_LB, - val, 2); + vreg->spmi_base_addr + + QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB, + val, 2); if (rc) { dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n", __func__, rc); @@ -289,9 +301,11 @@ static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned vlevel) reg[0] = vlevel & 0xFF; reg[1] = (vlevel >> 8) & 0xFF; - if (vreg->regulator_type == QPNP_TYPE_FTS426) { + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) { rc = regmap_bulk_write(vreg->regmap, - vreg->spmi_base_addr + QPNP_FTS426_REG_VOLTAGE_LB, + vreg->spmi_base_addr + + QPNP_FTS426_HFS430_REG_VOLTAGE_LB, reg, 2); } else { rc = regmap_write(vreg->regmap, @@ -309,8 +323,9 @@ static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned vlevel) static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg, u8 regval) { - if (vreg->regulator_type == QPNP_TYPE_FTS426) - return (regval == QPNP_FTS426_MODE_PWM) + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) + return (regval == QPNP_FTS426_HFS430_MODE_PWM) ? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO; else return (regval & QPNP_SMPS_MODE_PWM) @@ -320,9 +335,11 @@ static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg, static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg, enum qpnp_logical_mode mode) { - if (vreg->regulator_type == QPNP_TYPE_FTS426) + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) return (mode == QPNP_LOGICAL_MODE_PWM) - ? QPNP_FTS426_MODE_PWM : QPNP_FTS426_MODE_AUTO; + ? QPNP_FTS426_HFS430_MODE_PWM + : QPNP_FTS426_HFS430_MODE_AUTO; else return (mode == QPNP_LOGICAL_MODE_PWM) ? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO; @@ -748,6 +765,9 @@ static int qpnp_smps_check_type(struct spm_vreg *vreg) } else if (type[0] == QPNP_FTS426_TYPE && type[1] == QPNP_FTS426_SUBTYPE) { vreg->regulator_type = QPNP_TYPE_FTS426; + } else if (type[0] == QPNP_HF_TYPE + && type[1] == QPNP_HFS430_SUBTYPE) { + vreg->regulator_type = QPNP_TYPE_HFS430; } else if (type[0] == QPNP_ULT_HF_TYPE && type[1] == QPNP_ULT_HF_SUBTYPE) { vreg->regulator_type = QPNP_TYPE_ULT_HF; @@ -901,16 +921,20 @@ static int qpnp_smps_init_step_rate(struct spm_vreg *vreg) /* ULT and FTS426 bucks do not support steps */ if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type != - QPNP_TYPE_FTS426) + QPNP_TYPE_FTS426 && vreg->regulator_type != QPNP_TYPE_HFS430) step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK) >> QPNP_SMPS_STEP_CTRL_STEP_SHIFT; - if (vreg->regulator_type == QPNP_TYPE_FTS426) { - delay = (reg & QPNP_FTS426_STEP_CTRL_DELAY_MASK) - >> QPNP_FTS426_STEP_CTRL_DELAY_SHIFT; + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) { + delay = (reg & QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK) + >> QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT; /* step_rate has units of uV/us. */ - vreg->step_rate = QPNP_FTS426_CLOCK_RATE * vreg->range->step_uV; + vreg->step_rate = ((vreg->regulator_type == QPNP_TYPE_FTS426) + ? QPNP_FTS426_CLOCK_RATE + : QPNP_HFS430_CLOCK_RATE) + * vreg->range->step_uV; } else { delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK) >> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT; @@ -923,14 +947,18 @@ static int qpnp_smps_init_step_rate(struct spm_vreg *vreg) if ((vreg->regulator_type == QPNP_TYPE_ULT_HF) || (vreg->regulator_type == QPNP_TYPE_HF)) vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay); - else if (vreg->regulator_type == QPNP_TYPE_FTS426) - vreg->step_rate /= 1000 * (QPNP_FTS426_STEP_DELAY << delay); + else if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) + vreg->step_rate /= 1000 * (QPNP_FTS426_HFS430_STEP_DELAY + << delay); else vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay); - if (vreg->regulator_type == QPNP_TYPE_FTS426) - vreg->step_rate = vreg->step_rate * QPNP_FTS426_STEP_MARGIN_NUM - / QPNP_FTS426_STEP_MARGIN_DEN; + if (vreg->regulator_type == QPNP_TYPE_FTS426 + || vreg->regulator_type == QPNP_TYPE_HFS430) + vreg->step_rate = vreg->step_rate + * QPNP_FTS426_HFS430_STEP_MARGIN_NUM + / QPNP_FTS426_HFS430_STEP_MARGIN_DEN; else vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM / QPNP_FTS2_STEP_MARGIN_DEN; @@ -994,8 +1022,9 @@ static int qpnp_smps_check_constraints(struct spm_vreg *vreg, break; case QPNP_TYPE_FTS426: + case QPNP_TYPE_HFS430: rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr - + QPNP_FTS426_REG_VOLTAGE_ULS_LB, + + QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB, reg, 2); if (rc) { dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n", @@ -1167,6 +1196,8 @@ static int spm_regulator_probe(struct platform_device *pdev) rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1); else if (vreg->regulator_type == QPNP_TYPE_FTS426) vreg->range = &fts426_range; + else if (vreg->regulator_type == QPNP_TYPE_HFS430) + vreg->range = &hfs430_range; else if (vreg->regulator_type == QPNP_TYPE_HF) rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1); else if (vreg->regulator_type == QPNP_TYPE_ULT_HF) diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 2d3d5ac92c060260a35bfe8dd45c4809289ce77f..81ec9b6805fcdaf1d461463040d3a372bf22fb70 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -915,6 +915,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } + of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -932,6 +933,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } + of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index a3b621d37ff55b1cdd052cf6ba8bfa7674cee042..299dbc6f3e67df7f471fdf898acc26fa751ad0b1 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -73,4 +73,13 @@ config RPMSG_VIRTIO select RPMSG select VIRTIO +config MSM_RPM_SMD + bool "RPM driver using SMD protocol" + help + RPM is the dedicated hardware engine for managing shared SoC + resources. This config adds driver support for using SMD as a + transport layer communication with RPM hardware. It also selects + the MSM_MPM config that programs the MPM module to monitor interrupts + during sleep modes. + endmenu diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile index 1680763bb8b5580a89301e398ef105f6b2f47671..131052abeaf5f3c51728a845f19a0b9497cc6aff 100644 --- a/drivers/rpmsg/Makefile +++ b/drivers/rpmsg/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_RPMSG_QCOM_GLINK_SPSS) += qcom_glink_spss.o obj-$(CONFIG_RPMSG_QCOM_GLINK_SPI) += qcom_glink_spi.o obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o +obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index b01774e9fac0f0f0242c4cdf8864c9a70c6b2f1d..f1a2147a6d842aafa997160f64dce76fd95c5e04 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1043,12 +1043,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed void *info; int ret; - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); + channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return ERR_PTR(-ENOMEM); channel->edge = edge; - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); + channel->name = kstrdup(name, GFP_KERNEL); if (!channel->name) return ERR_PTR(-ENOMEM); @@ -1098,8 +1098,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed return channel; free_name_and_channel: - devm_kfree(&edge->dev, channel->name); - devm_kfree(&edge->dev, channel); + kfree(channel->name); + kfree(channel); return ERR_PTR(ret); } @@ -1320,13 +1320,13 @@ static int qcom_smd_parse_edge(struct device *dev, */ static void qcom_smd_edge_release(struct device *dev) { - struct qcom_smd_channel *channel; + struct qcom_smd_channel *channel, *tmp; struct qcom_smd_edge *edge = to_smd_edge(dev); - list_for_each_entry(channel, &edge->channels, list) { - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_RX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); + list_for_each_entry_safe(channel, tmp, &edge->channels, list) { + list_del(&channel->list); + kfree(channel->name); + kfree(channel); } kfree(edge); diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/rpmsg/rpm-smd.c similarity index 68% rename from drivers/soc/qcom/rpm-smd.c rename to drivers/rpmsg/rpm-smd.c index c04916cfbd7740b60bf2d0d7c6a30012cf8113e7..be2e69dcdc036dd70cf025910f95083b1bf9f427 100644 --- a/drivers/soc/qcom/rpm-smd.c +++ b/drivers/rpmsg/rpm-smd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -34,53 +34,19 @@ #include #include #include +#include #include #include #include #include +#include #define CREATE_TRACE_POINTS #include -/* Debug Definitions */ -enum { - MSM_RPM_LOG_REQUEST_PRETTY = BIT(0), - MSM_RPM_LOG_REQUEST_RAW = BIT(1), - MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2), -}; - -static int msm_rpm_debug_mask; -module_param_named( - debug_mask, msm_rpm_debug_mask, int, 0644 -); - -struct msm_rpm_driver_data { - const char *ch_name; - uint32_t ch_type; - struct smd_channel *ch_info; - struct work_struct work; - spinlock_t smd_lock_write; - spinlock_t smd_lock_read; - struct completion smd_open; -}; - -struct glink_apps_rpm_data { - const char *name; - const char *edge; - const char *xprt; - void *glink_handle; - struct glink_link_info *link_info; - struct glink_open_config *open_cfg; - struct work_struct work; -}; - -static bool glink_enabled; -static struct glink_apps_rpm_data *glink_data; - #define DEFAULT_BUFFER_SIZE 256 #define DEBUG_PRINT_BUFFER_SIZE 512 #define MAX_SLEEP_BUFFER 128 -#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_NOIO) #define INV_RSC "resource does not exist" #define ERR "err\0" #define MAX_ERR_BUFFER_SIZE 128 @@ -107,10 +73,55 @@ static struct glink_apps_rpm_data *glink_data; sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr)) #define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset)) +#define for_each_kvp(buf, k) \ + for (k = (struct kvp *)get_first_kvp(buf); \ + ((void *)k - (void *)get_first_kvp(buf)) < \ + get_data_len(buf);\ + k = get_next_kvp(k)) + + +/* Debug Definitions */ +enum { + MSM_RPM_LOG_REQUEST_PRETTY = BIT(0), + MSM_RPM_LOG_REQUEST_RAW = BIT(1), + MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2), +}; + +static int msm_rpm_debug_mask; +module_param_named( + debug_mask, msm_rpm_debug_mask, int, 0644 +); + +static uint32_t rpm_msg_fmt_ver; +module_param_named( + rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444 +); + +struct msm_rpm_driver_data { + const char *ch_name; + uint32_t ch_type; + struct smd_channel *ch_info; + struct work_struct work; + spinlock_t smd_lock_write; + spinlock_t smd_lock_read; + struct completion smd_open; +}; + +struct qcom_smd_rpm { + struct rpmsg_endpoint *rpm_channel; + struct device *dev; + int irq; + struct completion ack; + struct mutex lock; + int ack_status; +}; + +struct qcom_smd_rpm *rpm; +struct qcom_smd_rpm priv_rpm; + static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier); static bool standalone; static int probe_status = -EPROBE_DEFER; -static int msm_rpm_read_smd_data(char *buf); static void msm_rpm_process_ack(uint32_t msg_id, int errno); int msm_rpm_register_notifier(struct notifier_block *nb) @@ -123,8 +134,6 @@ int msm_rpm_unregister_notifier(struct notifier_block *nb) return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb); } -static struct workqueue_struct *msm_rpm_smd_wq; - enum { MSM_RPM_MSG_REQUEST_TYPE = 0, MSM_RPM_MSG_TYPE_NR, @@ -204,15 +213,8 @@ enum rpm_msg_fmts { RPM_MSG_V1_FMT }; -static uint32_t rpm_msg_fmt_ver; -module_param_named( - rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444 -); - static struct rb_root tr_root = RB_ROOT; -static int (*msm_rpm_send_buffer)(char *buf, uint32_t size, bool noirq); -static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq); -static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq); +static int msm_rpm_send_smd_buffer(char *buf, uint32_t size); static uint32_t msm_rpm_get_next_msg_id(void); static inline uint32_t get_offset_value(uint32_t val, uint32_t offset, @@ -242,6 +244,7 @@ static inline void set_offset_value(uint32_t *val, uint32_t offset, *val &= CLEAR_FIELD(offset, size); *val |= ((val1 & mask) << offset); } + static uint32_t get_msg_id(char *buf) { if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) @@ -344,9 +347,9 @@ static void set_msg_ver(char *buf, uint32_t val) static void set_req_len(char *buf, uint32_t val) { - if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) { + if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ((struct rpm_message_header_v0 *)buf)->hdr.request_len = val; - } else { + else { set_offset_value(&((struct rpm_message_header_v1 *)buf)-> hdr.request_hdr, RPM_REQ_LEN_OFFSET, RPM_REQ_LEN_SIZE, val); @@ -355,9 +358,9 @@ static void set_req_len(char *buf, uint32_t val) static void change_req_len(char *buf, int32_t val) { - if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) { + if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ((struct rpm_message_header_v0 *)buf)->hdr.request_len += val; - } else { + else { change_offset_value(&((struct rpm_message_header_v1 *)buf)-> hdr.request_hdr, RPM_REQ_LEN_OFFSET, RPM_REQ_LEN_SIZE, val); @@ -366,10 +369,10 @@ static void change_req_len(char *buf, int32_t val) static void set_msg_type(char *buf, uint32_t val) { - if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) { + if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ((struct rpm_message_header_v0 *)buf)->hdr.service_type = msm_rpm_request_service_v1[val]; - } else { + else { set_offset_value(&((struct rpm_message_header_v1 *)buf)-> hdr.request_hdr, RPM_MSG_TYPE_OFFSET, RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE); @@ -414,6 +417,7 @@ static void set_set_type(char *buf, uint32_t val) request_details, RPM_SET_TYPE_OFFSET, RPM_SET_TYPE_SIZE, val); } + static void set_msg_id(char *buf, uint32_t val) { if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) @@ -456,7 +460,6 @@ static inline void *get_data(struct kvp *k) return (void *)k + sizeof(*k); } - static void delete_kvp(char *buf, struct kvp *d) { struct kvp *n; @@ -550,13 +553,6 @@ static int tr_insert(struct rb_root *root, struct slp_buf *slp) return 0; } -#define for_each_kvp(buf, k) \ - for (k = (struct kvp *)get_first_kvp(buf); \ - ((void *)k - (void *)get_first_kvp(buf)) < \ - get_data_len(buf);\ - k = get_next_kvp(k)) - - static void tr_update(struct slp_buf *s, char *buf) { struct kvp *e, *n; @@ -615,14 +611,10 @@ struct msm_rpm_wait_data { struct completion ack; bool delete_on_ack; }; -DEFINE_SPINLOCK(msm_rpm_list_lock); - - +DEFINE_SPINLOCK(msm_rpm_list_lock); LIST_HEAD(msm_rpm_ack_list); -static struct tasklet_struct data_tasklet; - static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf) { return get_ack_msg_id(buf); @@ -718,46 +710,6 @@ static struct msm_rpm_driver_data msm_rpm_data = { .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open), }; -static int msm_rpm_glink_rx_poll(void *glink_handle) -{ - int ret; - - ret = glink_rpm_rx_poll(glink_handle); - if (ret >= 0) - /* - * Sleep for 50us at a time before checking - * for packet availability. The 50us is based - * on the the time rpm could take to process - * and send an ack for the sleep set request. - */ - udelay(50); - else - pr_err("Not receieve an ACK from RPM. ret = %d\n", ret); - - return ret; -} - -/* - * Returns - * = 0 on successful reads - * > 0 on successful reads with no further data - * standard Linux error codes on failure. - */ -static int msm_rpm_read_sleep_ack(void) -{ - int ret; - char buf[MAX_ERR_BUFFER_SIZE] = {0}; - - if (glink_enabled) - ret = msm_rpm_glink_rx_poll(glink_data->glink_handle); - else { - ret = msm_rpm_read_smd_data(buf); - if (!ret) - ret = smd_is_pkt_avail(msm_rpm_data.ch_info); - } - return ret; -} - static int msm_rpm_flush_requests(bool print) { struct rb_node *t; @@ -775,14 +727,9 @@ static int msm_rpm_flush_requests(bool print) set_msg_id(s->buf, msm_rpm_get_next_msg_id()); - if (!glink_enabled) - ret = msm_rpm_send_smd_buffer(s->buf, - get_buf_len(s->buf), true); - else - ret = msm_rpm_glink_send_buffer(s->buf, - get_buf_len(s->buf), true); - - WARN_ON(ret != get_buf_len(s->buf)); + ret = msm_rpm_send_smd_buffer(s->buf, + get_buf_len(s->buf)); + WARN_ON(ret != 0); trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id); s->valid = false; @@ -796,12 +743,9 @@ static int msm_rpm_flush_requests(bool print) * process these sleep set acks. */ if (count >= MAX_WAIT_ON_ACK) { - int ret = msm_rpm_read_sleep_ack(); - - if (ret >= 0) - count--; - else - return ret; + pr_err("Error: more than %d requests are buffered\n", + MAX_WAIT_ON_ACK); + return -ENOSPC; } } return 0; @@ -821,7 +765,7 @@ static void msm_rpm_notify_sleep_chain(char *buf, } static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle, - uint32_t key, const uint8_t *data, int size, bool noirq) + uint32_t key, const uint8_t *data, int size) { uint32_t i; uint32_t data_size, msg_size; @@ -862,12 +806,13 @@ static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle, handle->write_idx++; if (!handle->kvp[i].value) { - handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq)); + handle->kvp[i].value = kzalloc(data_size, GFP_NOIO); if (!handle->kvp[i].value) return -ENOMEM; } else { - /* We enter the else case, if a key already exists but the + /* + * We enter the else case, if a key already exists but the * data doesn't match. In which case, we should zero the data * out. */ @@ -891,7 +836,7 @@ static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle, static struct msm_rpm_request *msm_rpm_create_request_common( enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id, - int num_elements, bool noirq) + int num_elements) { struct msm_rpm_request *cdata; uint32_t buf_size; @@ -899,20 +844,17 @@ static struct msm_rpm_request *msm_rpm_create_request_common( if (probe_status) return ERR_PTR(probe_status); - cdata = kzalloc(sizeof(struct msm_rpm_request), - GFP_FLAG(noirq)); + cdata = kzalloc(sizeof(struct msm_rpm_request), GFP_NOIO); - if (!cdata) { - pr_err("Cannot allocate memory for client data\n"); + if (!cdata) goto cdata_alloc_fail; - } if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) buf_size = sizeof(struct rpm_message_header_v0); else buf_size = sizeof(struct rpm_message_header_v1); - cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq)); + cdata->client_buf = kzalloc(buf_size, GFP_NOIO); if (!cdata->client_buf) goto client_buf_alloc_fail; @@ -925,7 +867,7 @@ static struct msm_rpm_request *msm_rpm_create_request_common( cdata->write_idx = 0; cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data), - GFP_FLAG(noirq)); + GFP_NOIO); if (!cdata->kvp) { pr_warn("%s(): Cannot allocate memory for key value data\n", @@ -933,7 +875,7 @@ static struct msm_rpm_request *msm_rpm_create_request_common( goto kvp_alloc_fail; } - cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq)); + cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_NOIO); if (!cdata->buf) goto buf_alloc_fail; @@ -972,23 +914,14 @@ struct msm_rpm_request *msm_rpm_create_request( uint32_t rsc_id, int num_elements) { return msm_rpm_create_request_common(set, rsc_type, rsc_id, - num_elements, false); + num_elements); } EXPORT_SYMBOL(msm_rpm_create_request); -struct msm_rpm_request *msm_rpm_create_request_noirq( - enum msm_rpm_set set, uint32_t rsc_type, - uint32_t rsc_id, int num_elements) -{ - return msm_rpm_create_request_common(set, rsc_type, rsc_id, - num_elements, true); -} -EXPORT_SYMBOL(msm_rpm_create_request_noirq); - int msm_rpm_add_kvp_data(struct msm_rpm_request *handle, uint32_t key, const uint8_t *data, int size) { - return msm_rpm_add_kvp_data_common(handle, key, data, size, false); + return msm_rpm_add_kvp_data_common(handle, key, data, size); } EXPORT_SYMBOL(msm_rpm_add_kvp_data); @@ -996,37 +929,10 @@ EXPORT_SYMBOL(msm_rpm_add_kvp_data); int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle, uint32_t key, const uint8_t *data, int size) { - return msm_rpm_add_kvp_data_common(handle, key, data, size, true); -} -EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq); - -/* Runs in interrupt context */ -static void msm_rpm_notify(void *data, unsigned int event) -{ - struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data; - - WARN_ON(!pdata); - - if (!(pdata->ch_info)) - return; - - switch (event) { - case SMD_EVENT_DATA: - tasklet_schedule(&data_tasklet); - trace_rpm_smd_interrupt_notify("interrupt notification"); - break; - case SMD_EVENT_OPEN: - complete(&pdata->smd_open); - break; - case SMD_EVENT_CLOSE: - case SMD_EVENT_STATUS: - case SMD_EVENT_REOPEN_READY: - break; - default: - pr_info("Unknown SMD event\n"); + return msm_rpm_add_kvp_data_common(handle, key, data, size); - } } +EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq); bool msm_rpm_waiting_for_ack(void) { @@ -1054,6 +960,7 @@ static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id) break; elem = NULL; } + spin_unlock_irqrestore(&msm_rpm_list_lock, flags); return elem; } @@ -1131,10 +1038,12 @@ static void msm_rpm_process_ack(uint32_t msg_id, int errno) break; } } - /* Special case where the sleep driver doesn't + /* + * Special case where the sleep driver doesn't * wait for ACKs. This would decrease the latency involved with * entering RPM assisted power collapse. */ + if (!elem) trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF); @@ -1147,62 +1056,6 @@ struct msm_rpm_kvp_packet { uint32_t val; }; -static int msm_rpm_read_smd_data(char *buf) -{ - int pkt_sz; - int bytes_read = 0; - - pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info); - - if (!pkt_sz) - return -EAGAIN; - - if (pkt_sz > MAX_ERR_BUFFER_SIZE) { - pr_err("rpm_smd pkt_sz is greater than max size\n"); - goto error; - } - - if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info)) - return -EAGAIN; - - do { - int len; - - len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz); - pkt_sz -= len; - bytes_read += len; - - } while (pkt_sz > 0); - - if (pkt_sz < 0) { - pr_err("rpm_smd pkt_sz is less than zero\n"); - goto error; - } - return 0; -error: - WARN_ON(1); - - return 0; -} - -static void data_fn_tasklet(unsigned long data) -{ - uint32_t msg_id; - int errno; - char buf[MAX_ERR_BUFFER_SIZE] = {0}; - - spin_lock(&msm_rpm_data.smd_lock_read); - while (smd_is_pkt_avail(msm_rpm_data.ch_info)) { - if (msm_rpm_read_smd_data(buf)) - break; - msg_id = msm_rpm_get_msg_id_from_ack(buf); - errno = msm_rpm_get_error_from_ack(buf); - trace_rpm_smd_ack_recvd(0, msg_id, errno); - msm_rpm_process_ack(msg_id, errno); - } - spin_unlock(&msm_rpm_data.smd_lock_read); -} - static void msm_rpm_log_request(struct msm_rpm_request *cdata) { char buf[DEBUG_PRINT_BUFFER_SIZE]; @@ -1337,74 +1190,19 @@ static void msm_rpm_log_request(struct msm_rpm_request *cdata) printk(buf); } -static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq) +static int msm_rpm_send_smd_buffer(char *buf, uint32_t size) { unsigned long flags; int ret; spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags); - ret = smd_write_avail(msm_rpm_data.ch_info); - - while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) { - if (ret < 0) - break; - if (!noirq) { - spin_unlock_irqrestore( - &msm_rpm_data.smd_lock_write, flags); - cpu_relax(); - spin_lock_irqsave( - &msm_rpm_data.smd_lock_write, flags); - } else - udelay(5); - } - - if (ret < 0) { - pr_err("SMD not initialized\n"); - spin_unlock_irqrestore( - &msm_rpm_data.smd_lock_write, flags); - return ret; - } - - ret = smd_write(msm_rpm_data.ch_info, buf, size); + ret = rpmsg_send(rpm->rpm_channel, buf, size); spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags); return ret; } -static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq) -{ - int ret; - unsigned long flags; - int timeout = 50; - - spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags); - do { - ret = glink_tx(glink_data->glink_handle, buf, buf, - size, GLINK_TX_SINGLE_THREADED); - if (ret == -EBUSY || ret == -ENOSPC) { - if (!noirq) { - spin_unlock_irqrestore( - &msm_rpm_data.smd_lock_write, flags); - cpu_relax(); - spin_lock_irqsave( - &msm_rpm_data.smd_lock_write, flags); - } else { - udelay(5); - } - timeout--; - } else { - ret = 0; - } - } while (ret && timeout); - spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags); - - if (!timeout) - return 0; - else - return size; -} - static int msm_rpm_send_data(struct msm_rpm_request *cdata, - int msg_type, bool noirq, bool noack) + int msg_type, bool noack) { uint8_t *tmpbuff; int ret; @@ -1415,11 +1213,14 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, uint32_t set = get_set_type(cdata->client_buf); uint32_t msg_id; - if (probe_status) + if (probe_status) { + pr_err("probe failed\n"); return probe_status; - - if (!data_len) + } + if (!data_len) { + pr_err("no data len\n"); return 1; + } msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) : sizeof(struct rpm_message_header_v0); @@ -1434,7 +1235,7 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, if (msg_size > cdata->numbytes) { kfree(cdata->buf); cdata->numbytes = msg_size; - cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq)); + cdata->buf = kzalloc(msg_size, GFP_NOIO); } if (!cdata->buf) { @@ -1469,9 +1270,9 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz); if ((set == MSM_RPM_CTX_SLEEP_SET) && - !msm_rpm_smd_buffer_request(cdata, msg_size, - GFP_FLAG(noirq))) + !msm_rpm_smd_buffer_request(cdata, msg_size, GFP_NOIO)) { return 1; + } msg_id = msm_rpm_get_next_msg_id(); /* Set the version bit for new protocol */ @@ -1494,9 +1295,9 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, msm_rpm_add_wait_list(msg_id, noack); - ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq); + ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size); - if (ret == msg_size) { + if (!ret) { for (i = 0; (i < cdata->write_idx); i++) cdata->kvp[i].valid = false; set_data_len(cdata->client_buf, 0); @@ -1504,7 +1305,7 @@ static int msm_rpm_send_data(struct msm_rpm_request *cdata, trace_rpm_smd_send_active_set(msg_id, get_rsc_type(cdata->client_buf), get_rsc_id(cdata->client_buf)); - } else if (ret < msg_size) { + } else if (ret < 0) { struct msm_rpm_wait_data *rc; ret = 0; @@ -1523,23 +1324,23 @@ static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack) static DEFINE_MUTEX(send_mtx); mutex_lock(&send_mtx); - ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false, noack); + ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, noack); mutex_unlock(&send_mtx); return ret; } -int msm_rpm_send_request(struct msm_rpm_request *handle) +int msm_rpm_send_request_noirq(struct msm_rpm_request *handle) { return _msm_rpm_send_request(handle, false); } -EXPORT_SYMBOL(msm_rpm_send_request); +EXPORT_SYMBOL(msm_rpm_send_request_noirq); -int msm_rpm_send_request_noirq(struct msm_rpm_request *handle) +int msm_rpm_send_request(struct msm_rpm_request *handle) { - return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true, false); + return _msm_rpm_send_request(handle, false); } -EXPORT_SYMBOL(msm_rpm_send_request_noirq); +EXPORT_SYMBOL(msm_rpm_send_request); void *msm_rpm_send_request_noack(struct msm_rpm_request *handle) { @@ -1581,94 +1382,9 @@ int msm_rpm_wait_for_ack(uint32_t msg_id) } EXPORT_SYMBOL(msm_rpm_wait_for_ack); -static void msm_rpm_smd_read_data_noirq(uint32_t msg_id) -{ - uint32_t id = 0; - - while (id != msg_id) { - if (smd_is_pkt_avail(msm_rpm_data.ch_info)) { - int errno; - char buf[MAX_ERR_BUFFER_SIZE] = {}; - - msm_rpm_read_smd_data(buf); - id = msm_rpm_get_msg_id_from_ack(buf); - errno = msm_rpm_get_error_from_ack(buf); - trace_rpm_smd_ack_recvd(1, msg_id, errno); - msm_rpm_process_ack(id, errno); - } - } -} - -static void msm_rpm_glink_read_data_noirq(struct msm_rpm_wait_data *elem) -{ - int ret; - - /* Use rx_poll method to read the message from RPM */ - while (elem->errno) { - ret = glink_rpm_rx_poll(glink_data->glink_handle); - if (ret >= 0) { - /* - * We might have receieve the notification. - * Now we have to check whether the notification - * received is what we are interested? - * Wait for few usec to get the notification - * before re-trying the poll again. - */ - udelay(50); - } else { - pr_err("rx poll return error = %d\n", ret); - } - } -} - int msm_rpm_wait_for_ack_noirq(uint32_t msg_id) { - struct msm_rpm_wait_data *elem; - unsigned long flags; - int rc = 0; - - if (!msg_id) { - pr_err("Invalid msg id\n"); - return -ENOMEM; - } - - if (msg_id == 1) - return 0; - - if (standalone) - return 0; - - spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags); - - elem = msm_rpm_get_entry_from_msg_id(msg_id); - - if (!elem) - /* Should this be a bug - * Is it ok for another thread to read the msg? - */ - goto wait_ack_cleanup; - - if (elem->errno != INIT_ERROR) { - rc = elem->errno; - msm_rpm_free_list_entry(elem); - goto wait_ack_cleanup; - } - - if (!glink_enabled) - msm_rpm_smd_read_data_noirq(msg_id); - else - msm_rpm_glink_read_data_noirq(elem); - - rc = elem->errno; - - msm_rpm_free_list_entry(elem); -wait_ack_cleanup: - spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags); - - if (!glink_enabled) - if (smd_is_pkt_avail(msm_rpm_data.ch_info)) - tasklet_schedule(&data_tasklet); - return rc; + return msm_rpm_wait_for_ack(msg_id); } EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq); @@ -1677,8 +1393,7 @@ void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type, { int i, rc; struct msm_rpm_request *req = - msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems, - false); + msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems); if (IS_ERR(req)) return req; @@ -1728,31 +1443,37 @@ int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type, EXPORT_SYMBOL(msm_rpm_send_message); int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type, - uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) + uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems) { - int i, rc; - struct msm_rpm_request *req = - msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems); - if (IS_ERR(req)) - return PTR_ERR(req); + return msm_rpm_send_message(set, rsc_type, rsc_id, kvp, nelems); +} +EXPORT_SYMBOL(msm_rpm_send_message_noirq); - if (!req) - return -ENOMEM; +static int smd_mask_receive_interrupt(bool mask, + const struct cpumask *cpumask) +{ + struct irq_chip *irq_chip; + struct irq_data *irq_data; - for (i = 0; i < nelems; i++) { - rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key, - kvp[i].data, kvp[i].length); - if (rc) - goto bail; + irq_data = irq_get_irq_data(rpm->irq); + if (!irq_data) + return -ENODEV; + + irq_chip = irq_data->chip; + if (!irq_chip) + return -ENODEV; + + if (mask) { + irq_chip->irq_mask(irq_data); + if (cpumask) + irq_set_affinity(rpm->irq, cpumask); + } else { + irq_chip->irq_unmask(irq_data); } - rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req)); -bail: - msm_rpm_free_request(req); - return rc; + return 0; } -EXPORT_SYMBOL(msm_rpm_send_message_noirq); /** * During power collapse, the rpm driver disables the SMD interrupts to make @@ -1765,24 +1486,12 @@ int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask) if (standalone) return 0; - if (!glink_enabled) - ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info, - true, cpumask); - else - ret = glink_rpm_mask_rx_interrupt(glink_data->glink_handle, - true, (void *)cpumask); + ret = smd_mask_receive_interrupt(true, cpumask); if (!ret) { ret = msm_rpm_flush_requests(print); - - if (ret) { - if (!glink_enabled) - smd_mask_receive_interrupt( - msm_rpm_data.ch_info, false, NULL); - else - glink_rpm_mask_rx_interrupt( - glink_data->glink_handle, false, NULL); - } + if (ret) + smd_mask_receive_interrupt(false, NULL); } return ret; } @@ -1794,29 +1503,16 @@ EXPORT_SYMBOL(msm_rpm_enter_sleep); */ void msm_rpm_exit_sleep(void) { - int ret; if (standalone) return; - do { - ret = msm_rpm_read_sleep_ack(); - } while (ret > 0); - - if (!glink_enabled) - smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL); - else - glink_rpm_mask_rx_interrupt(glink_data->glink_handle, - false, NULL); + smd_mask_receive_interrupt(false, NULL); } EXPORT_SYMBOL(msm_rpm_exit_sleep); -/* - * Whenever there is a data from RPM, notify_rx will be called. - * This function is invoked either interrupt OR polling context. - */ -static void msm_rpm_trans_notify_rx(void *handle, const void *priv, - const void *pkt_priv, const void *ptr, size_t size) +static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, void *ptr, + int size, void *priv, u32 addr) { uint32_t msg_id; int errno; @@ -1826,7 +1522,7 @@ static void msm_rpm_trans_notify_rx(void *handle, const void *priv, unsigned long flags; if (!size) - return; + return -EINVAL; WARN_ON(size > MAX_ERR_BUFFER_SIZE); @@ -1844,219 +1540,39 @@ static void msm_rpm_trans_notify_rx(void *handle, const void *priv, */ if (!elem) { spin_unlock_irqrestore(&rx_notify_lock, flags); - glink_rx_done(handle, ptr, 0); - return; + return 0; } msm_rpm_process_ack(msg_id, errno); spin_unlock_irqrestore(&rx_notify_lock, flags); - glink_rx_done(handle, ptr, 0); -} - -static void msm_rpm_trans_notify_state(void *handle, const void *priv, - unsigned int event) -{ - switch (event) { - case GLINK_CONNECTED: - glink_data->glink_handle = handle; - - if (IS_ERR_OR_NULL(glink_data->glink_handle)) { - pr_err("glink_handle %d\n", - (int)PTR_ERR(glink_data->glink_handle)); - WARN_ON(1); - } - - /* - * Do not allow clients to send data to RPM until glink - * is fully open. - */ - probe_status = 0; - pr_info("glink config params: transport=%s, edge=%s, name=%s\n", - glink_data->xprt, - glink_data->edge, - glink_data->name); - break; - default: - pr_err("Unrecognized event %d\n", event); - break; - }; -} - -static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv, - const void *pkt_priv, const void *ptr) -{ -} - -static void msm_rpm_glink_open_work(struct work_struct *work) -{ - pr_debug("Opening glink channel\n"); - glink_data->glink_handle = glink_open(glink_data->open_cfg); - - if (IS_ERR_OR_NULL(glink_data->glink_handle)) { - pr_err("Error: glink_open failed %d\n", - (int)PTR_ERR(glink_data->glink_handle)); - WARN_ON(1); - } -} - -static void msm_rpm_glink_notifier_cb(struct glink_link_state_cb_info *cb_info, - void *priv) -{ - struct glink_open_config *open_config; - static bool first = true; - - if (!cb_info) { - pr_err("Missing callback data\n"); - return; - } - - switch (cb_info->link_state) { - case GLINK_LINK_STATE_UP: - if (first) - first = false; - else - break; - open_config = kzalloc(sizeof(*open_config), GFP_KERNEL); - if (!open_config) { - pr_err("Could not allocate memory\n"); - break; - } - - glink_data->open_cfg = open_config; - pr_debug("glink link state up cb receieved\n"); - INIT_WORK(&glink_data->work, msm_rpm_glink_open_work); - - open_config->priv = glink_data; - open_config->name = glink_data->name; - open_config->edge = glink_data->edge; - open_config->notify_rx = msm_rpm_trans_notify_rx; - open_config->notify_tx_done = msm_rpm_trans_notify_tx_done; - open_config->notify_state = msm_rpm_trans_notify_state; - schedule_work(&glink_data->work); - break; - default: - pr_err("Unrecognised state = %d\n", cb_info->link_state); - break; - }; -} - -static int msm_rpm_glink_dt_parse(struct platform_device *pdev, - struct glink_apps_rpm_data *glink_data) -{ - char *key = NULL; - int ret; - - if (of_device_is_compatible(pdev->dev.of_node, "qcom,rpm-glink")) { - glink_enabled = true; - } else { - pr_warn("qcom,rpm-glink compatible not matches\n"); - ret = -EINVAL; - return ret; - } - - key = "qcom,glink-edge"; - ret = of_property_read_string(pdev->dev.of_node, key, - &glink_data->edge); - if (ret) { - pr_err("Failed to read node: %s, key=%s\n", - pdev->dev.of_node->full_name, key); - return ret; - } - - key = "rpm-channel-name"; - ret = of_property_read_string(pdev->dev.of_node, key, - &glink_data->name); - if (ret) - pr_err("%s(): Failed to read node: %s, key=%s\n", __func__, - pdev->dev.of_node->full_name, key); - - return ret; -} - -static int msm_rpm_glink_link_setup(struct glink_apps_rpm_data *glink_data, - struct platform_device *pdev) -{ - struct glink_link_info *link_info; - void *link_state_cb_handle; - struct device *dev = &pdev->dev; - int ret = 0; - - link_info = devm_kzalloc(dev, sizeof(struct glink_link_info), - GFP_KERNEL); - if (!link_info) { - ret = -ENOMEM; - return ret; - } - - glink_data->link_info = link_info; - - /* - * Setup link info parameters - */ - link_info->edge = glink_data->edge; - link_info->glink_link_state_notif_cb = - msm_rpm_glink_notifier_cb; - link_state_cb_handle = glink_register_link_state_cb(link_info, NULL); - if (IS_ERR_OR_NULL(link_state_cb_handle)) { - pr_err("Could not register cb\n"); - ret = PTR_ERR(link_state_cb_handle); - return ret; - } - - spin_lock_init(&msm_rpm_data.smd_lock_read); - spin_lock_init(&msm_rpm_data.smd_lock_write); - - return ret; -} - -static int msm_rpm_dev_glink_probe(struct platform_device *pdev) -{ - int ret = -ENOMEM; - struct device *dev = &pdev->dev; - - glink_data = devm_kzalloc(dev, sizeof(*glink_data), GFP_KERNEL); - if (!glink_data) - return ret; - - ret = msm_rpm_glink_dt_parse(pdev, glink_data); - if (ret < 0) { - devm_kfree(dev, glink_data); - return ret; - } - - ret = msm_rpm_glink_link_setup(glink_data, pdev); - if (ret < 0) { - /* - * If the glink setup fails there is no - * fall back mechanism to SMD. - */ - pr_err("GLINK setup fail ret = %d\n", ret); - WARN_ON(1); - } - - return ret; + return 0; } -static int msm_rpm_dev_probe(struct platform_device *pdev) +static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { char *key = NULL; + struct device_node *p; int ret = 0; + int irq; void __iomem *reg_base; uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */ - /* - * Check for standalone support - */ + p = of_find_compatible_node(NULL, NULL, "qcom,rpm-smd"); + if (!p) { + pr_err("Unable to find rpm-smd\n"); + probe_status = -ENODEV; + goto fail; + } + key = "rpm-standalone"; - standalone = of_property_read_bool(pdev->dev.of_node, key); + standalone = of_property_read_bool(p, key); if (standalone) { probe_status = ret; goto skip_init; } - reg_base = of_iomap(pdev->dev.of_node, 0); - + reg_base = of_iomap(p, 0); if (reg_base) { version = readq_relaxed(reg_base); iounmap(reg_base); @@ -2065,70 +1581,34 @@ static int msm_rpm_dev_probe(struct platform_device *pdev) if (version == V1_PROTOCOL_VERSION) rpm_msg_fmt_ver = RPM_MSG_V1_FMT; - pr_debug("RPM-SMD running version %d/n", rpm_msg_fmt_ver); + pr_info("RPM-SMD running version %d\n", rpm_msg_fmt_ver); - ret = msm_rpm_dev_glink_probe(pdev); - if (!ret) { - pr_info("APSS-RPM communication over GLINK\n"); - msm_rpm_send_buffer = msm_rpm_glink_send_buffer; - of_platform_populate(pdev->dev.of_node, NULL, NULL, - &pdev->dev); - return ret; - } - msm_rpm_send_buffer = msm_rpm_send_smd_buffer; - - key = "rpm-channel-name"; - ret = of_property_read_string(pdev->dev.of_node, key, - &msm_rpm_data.ch_name); - if (ret) { - pr_err("%s(): Failed to read node: %s, key=%s\n", __func__, - pdev->dev.of_node->full_name, key); + irq = of_irq_get(p, 0); + if (!irq) { + pr_err("Unable to get rpm-smd interrupt number\n"); + probe_status = -ENODEV; goto fail; } - key = "rpm-channel-type"; - ret = of_property_read_u32(pdev->dev.of_node, key, - &msm_rpm_data.ch_type); - if (ret) { - pr_err("%s(): Failed to read node: %s, key=%s\n", __func__, - pdev->dev.of_node->full_name, key); + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); + if (!rpm) { + probe_status = -ENOMEM; goto fail; } - ret = smd_named_open_on_edge(msm_rpm_data.ch_name, - msm_rpm_data.ch_type, - &msm_rpm_data.ch_info, - &msm_rpm_data, - msm_rpm_notify); - if (ret) { - if (ret != -EPROBE_DEFER) { - pr_err("%s: Cannot open RPM channel %s %d\n", - __func__, msm_rpm_data.ch_name, - msm_rpm_data.ch_type); - } - goto fail; - } + rpm->dev = &rpdev->dev; + rpm->rpm_channel = rpdev->ept; + dev_set_drvdata(&rpdev->dev, rpm); + priv_rpm = *rpm; + rpm->irq = irq; + mutex_init(&rpm->lock); + init_completion(&rpm->ack); spin_lock_init(&msm_rpm_data.smd_lock_write); spin_lock_init(&msm_rpm_data.smd_lock_read); - tasklet_init(&data_tasklet, data_fn_tasklet, 0); - - wait_for_completion(&msm_rpm_data.smd_open); - - smd_disable_read_intr(msm_rpm_data.ch_info); - - msm_rpm_smd_wq = alloc_workqueue("rpm-smd", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); - if (!msm_rpm_smd_wq) { - pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__); - ret = -EINVAL; - goto fail; - } - queue_work(msm_rpm_smd_wq, &msm_rpm_data.work); - probe_status = ret; skip_init: - of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + probe_status = of_platform_populate(p, NULL, NULL, &rpdev->dev); if (standalone) pr_info("RPM running in standalone mode\n"); @@ -2136,30 +1616,35 @@ static int msm_rpm_dev_probe(struct platform_device *pdev) return probe_status; } -static const struct of_device_id msm_rpm_match_table[] = { - {.compatible = "qcom,rpm-smd"}, - {.compatible = "qcom,rpm-glink"}, - {}, +static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) +{ + of_platform_depopulate(&rpdev->dev); +} + +static struct rpmsg_device_id rpmsg_driver_rpm_id_table[] = { + { .name = "rpm_requests" }, + { }, }; -static struct platform_driver msm_rpm_device_driver = { - .probe = msm_rpm_dev_probe, - .driver = { - .name = "rpm-smd", +static struct rpmsg_driver qcom_smd_rpm_driver = { + .probe = qcom_smd_rpm_probe, + .remove = qcom_smd_rpm_remove, + .callback = qcom_smd_rpm_callback, + .id_table = rpmsg_driver_rpm_id_table, + .drv = { + .name = "qcom_rpm_smd", .owner = THIS_MODULE, - .of_match_table = msm_rpm_match_table, }, }; int __init msm_rpm_driver_init(void) { - static bool registered; + unsigned int ret = 0; - if (registered) - return 0; - registered = true; + ret = register_rpmsg_driver(&qcom_smd_rpm_driver); + if (ret) + pr_err("register_rpmsg_driver: failed with err %d\n", ret); - return platform_driver_register(&msm_rpm_device_driver); + return ret; } -EXPORT_SYMBOL(msm_rpm_driver_init); -arch_initcall(msm_rpm_driver_init); +postcore_initcall(msm_rpm_driver_init); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index e0996fce3963969316fc1889a1c69e857c884c5a..6a5b5b16145e3c546a73aa658728e632a9fad579 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void) unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); } module_exit(rpmsg_chrdev_exit); + +MODULE_ALIAS("rpmsg:rpmsg_chrdev"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 3d2216ccd860c6fef173882e3e24a92e2309eca2..8eb2b6dd36fea45cde68e10b93188ef05c08307d 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -74,7 +74,7 @@ #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0) #define SUN6I_LOSC_OUT_GATING 0x0060 -#define SUN6I_LOSC_OUT_GATING_EN BIT(0) +#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0 /* * Get date values @@ -253,7 +253,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node) &clkout_name); rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name, 0, rtc->base + SUN6I_LOSC_OUT_GATING, - SUN6I_LOSC_OUT_GATING_EN, 0, + SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0, &rtc->lock); if (IS_ERR(rtc->ext_losc)) { pr_crit("Couldn't register the LOSC external gate\n"); diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index a851d34c642b5d26866fafdde925eb48ddf61003..04674ce961f1d7a639b948847526658e6fde7641 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -189,7 +189,7 @@ static struct device_driver smsg_driver = { static void __exit smsg_exit(void) { - cpcmd("SET SMSG IUCV", NULL, 0, NULL); + cpcmd("SET SMSG OFF", NULL, 0, NULL); device_unregister(smsg_dev); iucv_unregister(&smsg_handler, 1); driver_unregister(&smsg_driver); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 18c4f933e8b9a82c51fa20e113b6f6ca20566311..b415ba42ca73a7430d387d75f578cbac00761563 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -664,6 +664,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, spin_unlock_irqrestore(&dbf->scsi_lock, flags); } +/** + * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks. + * @tag: Identifier for event. + * @adapter: Pointer to zfcp adapter as context for this event. + * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF). + * @ret: Return value of calling function. + * + * This SCSI trace variant does not depend on any of: + * scsi_cmnd, zfcp_fsf_req, scsi_device. + */ +void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; + unsigned long flags; + static int const level = 1; + + if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level))) + return; + + spin_lock_irqsave(&dbf->scsi_lock, flags); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_SCSI_CMND; + rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */ + rec->scsi_retries = ~0; + rec->scsi_allowed = ~0; + rec->fcp_rsp_info = ~0; + rec->scsi_id = scsi_id; + rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN; + rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32); + rec->host_scribble = ~0; + memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE); + + debug_event(dbf->scsi, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); +} + static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) { struct debug_info *d; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index cbb8156bf5e0c4979896b3f1edd8004c93837cfb..7aa243a6cdbfa8c04f6293eb9a07cc0c2c2165cd 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -35,11 +35,28 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_LUN_OPENING = 0x2000, }; +/** + * enum zfcp_erp_act_type - Type of ERP action object. + * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery. + * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery. + * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery. + * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that an ERP action could not be + * set up despite a detected need for some recovery. + * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with + * either of the first four enum values. + * Used to indicate that ERP not needed because + * the object has ZFCP_STATUS_COMMON_ERP_FAILED. + */ enum zfcp_erp_act_type { ZFCP_ERP_ACTION_REOPEN_LUN = 1, ZFCP_ERP_ACTION_REOPEN_PORT = 2, ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, + ZFCP_ERP_ACTION_NONE = 0xc0, + ZFCP_ERP_ACTION_FAILED = 0xe0, }; enum zfcp_erp_act_state { @@ -126,6 +143,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) } } +static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter, + struct zfcp_port *port, + struct scsi_device *sdev) +{ + int need = want; + struct zfcp_scsi_dev *zsdev; + + switch (want) { + case ZFCP_ERP_ACTION_REOPEN_LUN: + zsdev = sdev_to_zfcp(sdev); + if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + need = 0; + break; + case ZFCP_ERP_ACTION_REOPEN_PORT: + if (atomic_read(&port->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_port_status( + port, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + case ZFCP_ERP_ACTION_REOPEN_ADAPTER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_COMMON_ERP_FAILED) { + need = 0; + /* ensure propagation of failed status to new devices */ + zfcp_erp_set_adapter_status( + adapter, ZFCP_STATUS_COMMON_ERP_FAILED); + } + break; + default: + need = 0; + break; + } + + return need; +} + static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) @@ -249,16 +309,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, int retval = 1, need; struct zfcp_erp_action *act; - if (!adapter->erp_thread) - return -EIO; + need = zfcp_erp_handle_failed(want, adapter, port, sdev); + if (!need) { + need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */ + goto out; + } + + if (!adapter->erp_thread) { + need = ZFCP_ERP_ACTION_NONE; /* marker for trace */ + retval = -EIO; + goto out; + } need = zfcp_erp_required_act(want, adapter, port, sdev); if (!need) goto out; act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); - if (!act) + if (!act) { + need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */ goto out; + } atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); @@ -269,18 +340,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, return retval; } +void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter, + u64 port_name, u32 port_id) +{ + unsigned long flags; + static /* don't waste stack */ struct zfcp_port tmpport; + + write_lock_irqsave(&adapter->erp_lock, flags); + /* Stand-in zfcp port with fields just good enough for + * zfcp_dbf_rec_trig() and zfcp_dbf_set_common(). + * Under lock because tmpport is static. + */ + atomic_set(&tmpport.status, -1); /* unknown */ + tmpport.wwpn = port_name; + tmpport.d_id = port_id; + zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL, + ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, + ZFCP_ERP_ACTION_NONE); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear_mask, char *id) { zfcp_erp_adapter_block(adapter, clear_mask); zfcp_scsi_schedule_rports_block(adapter); - /* ensure propagation of failed status to new devices */ - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, NULL, NULL, id, 0); } @@ -299,12 +384,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) zfcp_scsi_schedule_rports_block(adapter); write_lock_irqsave(&adapter->erp_lock, flags); - if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - zfcp_erp_set_adapter_status(adapter, - ZFCP_STATUS_COMMON_ERP_FAILED); - else - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, - NULL, NULL, id, 0); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, + NULL, NULL, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } @@ -345,9 +426,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, port->adapter, port, NULL, id, 0); } @@ -373,12 +451,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) zfcp_erp_port_block(port, clear); zfcp_scsi_schedule_rport_block(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - /* ensure propagation of failed status to new devices */ - zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); - return -EIO; - } - return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, port->adapter, port, NULL, id, 0); } @@ -418,9 +490,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, zfcp_erp_lun_block(sdev, clear); - if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, zfcp_sdev->port, sdev, id, act_status); } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index b1cbb14fb2ae531b6f9ca52860421304c9a03214..c1092a11e728163733cc764db924bb1f2eb20471 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, struct zfcp_fsf_req *); +extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter, + unsigned int scsi_id, int ret); /* zfcp_erp.c */ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_port_forced_no_port_dbf(char *id, + struct zfcp_adapter *adapter, + u64 port_name, u32 port_id); extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 22f9562f415cbb09a098a83318818c49217a8237..0b6f51424745bc0f2e021c28411dbd3dc12b9ce7 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (abrt_req) break; + zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) { @@ -277,6 +278,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (fsf_req) break; + zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) { @@ -323,15 +325,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; - int ret; + int ret = SUCCESS, fc_ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); - ret = fc_block_scsi_eh(scpnt); - if (ret) - return ret; + fc_ret = fc_block_scsi_eh(scpnt); + if (fc_ret) + ret = fc_ret; - return SUCCESS; + zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret); + return ret; } struct scsi_transport_template *zfcp_scsi_transport_template; @@ -602,6 +605,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) if (port) { zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); put_device(&port->dev); + } else { + zfcp_erp_port_forced_no_port_dbf( + "sctrpin", adapter, + rport->port_name /* zfcp_scsi_rport_register */, + rport->port_id /* zfcp_scsi_rport_register */); } } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 5fbaf13781b6c0803c4c2fa962a9492f92d982b4..604a39dba5d0c7d0a0a54ed4267efeb09ae88b0e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8638,7 +8638,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) kfree(options); } -static void hpsa_shutdown(struct pci_dev *pdev) +static void __hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; @@ -8653,6 +8653,12 @@ static void hpsa_shutdown(struct pci_dev *pdev) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } +static void hpsa_shutdown(struct pci_dev *pdev) +{ + __hpsa_shutdown(pdev); + pci_disable_device(pdev); +} + static void hpsa_free_device_info(struct ctlr_info *h) { int i; @@ -8696,7 +8702,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) scsi_remove_host(h->scsi_host); /* init_one 8 */ /* includes hpsa_free_irqs - init_one 4 */ /* includes hpsa_disable_interrupt_mode - pci_init 2 */ - hpsa_shutdown(pdev); + __hpsa_shutdown(pdev); hpsa_free_device_info(h); /* scan */ diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index ac879745ef8007ab2a4973aff26e3c950e8f1dba..18a409bb9e0ca4955eee05c2ef07eb7020427707 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, * Note: We have not moved the current phy_index so we will actually * compare the startting phy with itself. * This is expected and required to add the phy to the port. */ - while (phy_index < SCI_MAX_PHYS) { + for (; phy_index < SCI_MAX_PHYS; phy_index++) { if ((phy_mask & (1 << phy_index)) == 0) continue; sci_phy_get_sas_address(&ihost->phys[phy_index], @@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); - phy_index++; } } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 4bf406df051bbf410047188a4adec492fb9776d2..72a919179d06268ba8d123d21273f3074b1e6154 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -903,7 +903,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) goto fail_fw_init; } - ret = 0; + return 0; fail_fw_init: megasas_return_cmd(instance, cmd); @@ -913,8 +913,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) IOCInitMessage, ioc_init_handle); fail_get_cmd: dev_err(&instance->pdev->dev, - "Init cmd return status %s for SCSI host %d\n", - ret ? "FAILED" : "SUCCESS", instance->host->host_no); + "Init cmd return status FAILED for SCSI host %d\n", + instance->host->host_no); return ret; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e24f57946a17249c9857b7a8c46fdf486c887d83..bcde6130f12149026b29314b68a811af94905810 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -4627,7 +4627,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) return; if (fcport->fp_speed == PORT_SPEED_UNKNOWN || - fcport->fp_speed > ha->link_data_rate) + fcport->fp_speed > ha->link_data_rate || + !ha->flags.gpsc_supported) return; rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed, diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 13a00a42b3ca637737b280de4939cf4af1e6a0e7..e073eb16f8a4aa24242b909b60f0562ab20e5334 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2454,8 +2454,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); /* Valid values of the retry delay timer are 0x1-0xffef */ - if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) - retry_delay = sts24->retry_delay; + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { + retry_delay = sts24->retry_delay & 0x3fff; + ql_dbg(ql_dbg_io, sp->vha, 0x3033, + "%s: scope=%#x retry_delay=%#x\n", __func__, + sts24->retry_delay >> 14, retry_delay); + } } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 7404d26895f5b7de916f65e86c549790cf444d96..f6542c159ed637504ff3e5d2c157ebfed2dac196 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) return nlmsg_multicast(nls, skb, 0, group, gfp); } +static int +iscsi_unicast_skb(struct sk_buff *skb, u32 portid) +{ + return nlmsg_unicast(nls, skb, portid); +} + int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { @@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); static int -iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(u32 portid, int type, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = nlmsg_total_size(size); - int flags = multi ? NLM_F_MULTI : 0; - int t = done ? NLMSG_DONE : type; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); - nlh->nlmsg_flags = flags; + nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0); memcpy(nlmsg_data(nlh), payload, size); - return iscsi_multicast_skb(skb, group, GFP_ATOMIC); + return iscsi_unicast_skb(skb, portid); } static int @@ -3470,6 +3472,7 @@ static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; + u32 portid; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) if (!try_module_get(transport->owner)) return -EINVAL; + portid = NETLINK_CB(skb).portid; + switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_session.initial_cmdsn, ev->u.c_session.cmds_max, ev->u.c_session.queue_depth); @@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) } err = iscsi_if_create_session(priv, ep, ev, - NETLINK_CB(skb).portid, + portid, ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); @@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) static void iscsi_if_rx(struct sk_buff *skb) { + u32 portid = NETLINK_CB(skb).portid; + mutex_lock(&rx_queue_mutex); while (skb->len >= NLMSG_HDRLEN) { int err; @@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; - err = iscsi_if_send_reply(group, nlh->nlmsg_seq, - nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); + err = iscsi_if_send_reply(portid, nlh->nlmsg_type, + ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 2eb61d54bbb48fd73426767c4c4f7c5ebde0504f..ea9e1e0ed5b8502be262940459d3cad7d4ec0392 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -423,9 +423,18 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, #define SD_ZBC_BUF_SIZE 131072 -static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) +/** + * sd_zbc_check_zone_size - Check the device zone sizes + * @sdkp: Target disk + * + * Check that all zones of the device are equal. The last zone can however + * be smaller. The zone size must also be a power of two number of LBAs. + * + * Returns the zone size in bytes upon success or an error code upon failure. + */ +static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) { - u64 zone_blocks; + u64 zone_blocks = 0; sector_t block = 0; unsigned char *buf; unsigned char *rec; @@ -434,8 +443,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) int ret; u8 same; - sdkp->zone_blocks = 0; - /* Get a buffer */ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL); if (!buf) @@ -443,10 +450,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) /* Do a report zone to get the same field */ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); - if (ret) { - zone_blocks = 0; - goto out; - } + if (ret) + goto out_free; same = buf[4] & 0x0f; if (same > 0) { @@ -472,16 +477,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) /* Parse zone descriptors */ while (rec < buf + buf_len) { - zone_blocks = get_unaligned_be64(&rec[8]); - if (sdkp->zone_blocks == 0) { - sdkp->zone_blocks = zone_blocks; - } else if (zone_blocks != sdkp->zone_blocks && - (block + zone_blocks < sdkp->capacity - || zone_blocks > sdkp->zone_blocks)) { + u64 this_zone_blocks = get_unaligned_be64(&rec[8]); + + if (zone_blocks == 0) { + zone_blocks = this_zone_blocks; + } else if (this_zone_blocks != zone_blocks && + (block + this_zone_blocks < sdkp->capacity + || this_zone_blocks > zone_blocks)) { zone_blocks = 0; goto out; } - block += zone_blocks; + block += this_zone_blocks; rec += 64; } @@ -489,61 +495,77 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, block); if (ret) - return ret; + goto out_free; } } while (block < sdkp->capacity); - zone_blocks = sdkp->zone_blocks; - out: - kfree(buf); - if (!zone_blocks) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Devices with non constant zone " "size are not supported\n"); - return -ENODEV; - } - - if (!is_power_of_2(zone_blocks)) { + ret = -ENODEV; + } else if (!is_power_of_2(zone_blocks)) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Devices with non power of 2 zone " "size are not supported\n"); - return -ENODEV; - } - - if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { + ret = -ENODEV; + } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Zone size too large\n"); - return -ENODEV; + ret = -ENODEV; + } else { + ret = zone_blocks; } - sdkp->zone_blocks = zone_blocks; +out_free: + kfree(buf); - return 0; + return ret; } -static int sd_zbc_setup(struct scsi_disk *sdkp) +static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks) { + struct request_queue *q = sdkp->disk->queue; + u32 zone_shift = ilog2(zone_blocks); + u32 nr_zones; /* chunk_sectors indicates the zone size */ - blk_queue_chunk_sectors(sdkp->disk->queue, - logical_to_sectors(sdkp->device, sdkp->zone_blocks)); - sdkp->zone_shift = ilog2(sdkp->zone_blocks); - sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift; - if (sdkp->capacity & (sdkp->zone_blocks - 1)) - sdkp->nr_zones++; - - if (!sdkp->zones_wlock) { - sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones), - sizeof(unsigned long), - GFP_KERNEL); - if (!sdkp->zones_wlock) - return -ENOMEM; + blk_queue_chunk_sectors(q, + logical_to_sectors(sdkp->device, zone_blocks)); + nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift; + + /* + * Initialize the disk zone write lock bitmap if the number + * of zones changed. + */ + if (nr_zones != sdkp->nr_zones) { + unsigned long *zones_wlock = NULL; + + if (nr_zones) { + zones_wlock = kcalloc(BITS_TO_LONGS(nr_zones), + sizeof(unsigned long), + GFP_KERNEL); + if (!zones_wlock) + return -ENOMEM; + } + + blk_mq_freeze_queue(q); + sdkp->zone_blocks = zone_blocks; + sdkp->zone_shift = zone_shift; + sdkp->nr_zones = nr_zones; + swap(sdkp->zones_wlock, zones_wlock); + blk_mq_unfreeze_queue(q); + + kfree(zones_wlock); + + /* READ16/WRITE16 is mandatory for ZBC disks */ + sdkp->device->use_16_for_rw = 1; + sdkp->device->use_10_for_rw = 0; } return 0; @@ -552,6 +574,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp) int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) { + int64_t zone_blocks; int ret; if (!sd_is_zoned(sdkp)) @@ -589,19 +612,19 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, * Check zone size: only devices with a constant zone size (except * an eventual last runt zone) that is a power of 2 are supported. */ - ret = sd_zbc_check_zone_size(sdkp); - if (ret) + zone_blocks = sd_zbc_check_zone_size(sdkp); + ret = -EFBIG; + if (zone_blocks != (u32)zone_blocks) + goto err; + ret = zone_blocks; + if (ret < 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ - ret = sd_zbc_setup(sdkp); + ret = sd_zbc_setup(sdkp, zone_blocks); if (ret) goto err; - /* READ16/WRITE16 is mandatory for ZBC disks */ - sdkp->device->use_16_for_rw = 1; - sdkp->device->use_10_for_rw = 0; - return 0; err: @@ -614,6 +637,7 @@ void sd_zbc_remove(struct scsi_disk *sdkp) { kfree(sdkp->zones_wlock); sdkp->zones_wlock = NULL; + sdkp->nr_zones = 0; } void sd_zbc_print_zones(struct scsi_disk *sdkp) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index d374b16c9a6112877c9c4ce359e58d7ec647ae22..7ad17aca9b8292b10231cd10906569a0cc3d4440 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -498,7 +498,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) old_hdr->result = EIO; break; case DID_ERROR: - old_hdr->result = (srp->sense_b[0] == 0 && + old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: @@ -891,8 +891,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; + mutex_lock(&sfp->parentdp->open_rel_lock); result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); + mutex_unlock(&sfp->parentdp->open_rel_lock); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, @@ -993,8 +995,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) result = get_user(val, ip); if (result) return result; - if (val < 0) - return -EINVAL; + if (val < 0) + return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); mutex_lock(&sfp->f_mutex); @@ -1004,9 +1006,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) mutex_unlock(&sfp->f_mutex); return -EBUSY; } - + mutex_lock(&sfp->parentdp->open_rel_lock); sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); + mutex_unlock(&sfp->parentdp->open_rel_lock); } mutex_unlock(&sfp->f_mutex); return 0; @@ -1132,14 +1135,14 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon return -ENXIO; sdev = sdp->device; - if (sdev->host->hostt->compat_ioctl) { + if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } - + return -ENOIOCTLCMD; } #endif @@ -1635,7 +1638,7 @@ init_sg(void) else def_reserved_size = sg_big_buff; - rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), + rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; @@ -2295,7 +2298,7 @@ static const struct file_operations adio_fops = { }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); -static ssize_t sg_proc_write_dressz(struct file *filp, +static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, @@ -2435,7 +2438,7 @@ static int sg_proc_single_open_adio(struct inode *inode, struct file *file) return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } -static ssize_t +static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { @@ -2456,7 +2459,7 @@ static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } -static ssize_t +static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index c44de0b4a995a9d5b546ecfac4f435f8b448680e..beb585ddc07dcd4594d5e290e32e1fe47980bace 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1725,11 +1725,14 @@ static int storvsc_probe(struct hv_device *device, max_targets = STORVSC_MAX_TARGETS; max_channels = STORVSC_MAX_CHANNELS; /* - * On Windows8 and above, we support sub-channels for storage. + * On Windows8 and above, we support sub-channels for storage + * on SCSI and FC controllers. * The number of sub-channels offerred is based on the number of * VCPUs in the guest. */ - max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); + if (!dev_is_ide) + max_sub_channels = + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; } scsi_driver.can_queue = (max_outstanding_req_per_channel * diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index d759ddc272e43bd805005d2db5a40644f4dd17a7..fb42b567a1b791a2f2cf34f9e2c49338a3a22e23 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017, Linux Foundation. All rights reserved. + * Copyright (c) 2013-2018, Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -834,8 +834,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) */ if (!ufs_qcom_is_link_active(hba)) { ufs_qcom_disable_lane_clks(host); - phy_power_off(phy); - + if (host->is_phy_pwr_on) { + phy_power_off(phy); + host->is_phy_pwr_on = false; + } if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba)) ret = ufs_qcom_disable_vreg(hba->dev, host->vddp_ref_clk); @@ -859,13 +861,15 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) struct phy *phy = host->generic_phy; int err; - err = phy_power_on(phy); - if (err) { - dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", - __func__, err); - goto out; + if (!host->is_phy_pwr_on) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", + __func__, err); + goto out; + } + host->is_phy_pwr_on = true; } - if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 || hba->spm_lvl > UFS_PM_LVL_3)) ufs_qcom_enable_vreg(hba->dev, @@ -1590,8 +1594,10 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, return 0; if (on && (status == POST_CHANGE)) { - phy_power_on(host->generic_phy); - + if (!host->is_phy_pwr_on) { + phy_power_on(host->generic_phy); + host->is_phy_pwr_on = true; + } /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); @@ -1614,7 +1620,10 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, ufs_qcom_dev_ref_clk_ctrl(host, false); /* powering off PHY during aggressive clk gating */ - phy_power_off(host->generic_phy); + if (host->is_phy_pwr_on) { + phy_power_off(host->generic_phy); + host->is_phy_pwr_on = false; + } } } @@ -2077,8 +2086,22 @@ static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name, dev_err(dev, "%s: %s get failed, err=%d\n", __func__, vreg->name, ret); } - vreg->min_uV = VDDP_REF_CLK_MIN_UV; - vreg->max_uV = VDDP_REF_CLK_MAX_UV; + + snprintf(prop_name, MAX_PROP_SIZE, "%s-min-uV", name); + ret = of_property_read_u32(np, prop_name, &vreg->min_uV); + if (ret) { + dev_dbg(dev, "%s: unable to find %s err %d, using default\n", + __func__, prop_name, ret); + vreg->min_uV = VDDP_REF_CLK_MIN_UV; + } + + snprintf(prop_name, MAX_PROP_SIZE, "%s-max-uV", name); + ret = of_property_read_u32(np, prop_name, &vreg->max_uV); + if (ret) { + dev_dbg(dev, "%s: unable to find %s err %d, using default\n", + __func__, prop_name, ret); + vreg->max_uV = VDDP_REF_CLK_MAX_UV; + } out: if (!ret) @@ -2221,15 +2244,13 @@ static int ufs_qcom_init(struct ufs_hba *hba) err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk", &host->vddp_ref_clk); phy_init(host->generic_phy); - err = phy_power_on(host->generic_phy); - if (err) - goto out_unregister_bus; + if (host->vddp_ref_clk) { err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk); if (err) { dev_err(dev, "%s: failed enabling ref clk supply: %d\n", __func__, err); - goto out_disable_phy; + goto out_unregister_bus; } } @@ -2262,8 +2283,6 @@ static int ufs_qcom_init(struct ufs_hba *hba) out_disable_vddp: if (host->vddp_ref_clk) ufs_qcom_disable_vreg(dev, host->vddp_ref_clk); -out_disable_phy: - phy_power_off(host->generic_phy); out_unregister_bus: phy_exit(host->generic_phy); msm_bus_scale_unregister_client(host->bus_vote.client_handle); @@ -2280,7 +2299,10 @@ static void ufs_qcom_exit(struct ufs_hba *hba) msm_bus_scale_unregister_client(host->bus_vote.client_handle); ufs_qcom_disable_lane_clks(host); - phy_power_off(host->generic_phy); + if (host->is_phy_pwr_on) { + phy_power_off(host->generic_phy); + host->is_phy_pwr_on = false; + } phy_exit(host->generic_phy); ufs_qcom_pm_qos_remove(host); } diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 84f5cbd8b4772428056b17e9f29bf1bedfd6ce54..f3f60c2a4dbb9beae7fb4548f4bc6c316cfe0fc5 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -383,6 +383,7 @@ struct ufs_qcom_host { struct request *req_pending; struct ufs_vreg *vddp_ref_clk; bool work_pending; + bool is_phy_pwr_on; }; static inline u32 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9dbc1d97339aabf537c239509b32968a8a5978bf..00fc4bf8aead563e0ecb414b396e0f9525d310b4 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7074,8 +7074,8 @@ static void ufshcd_rls_handler(struct work_struct *work) u32 mode; hba = container_of(work, struct ufs_hba, rls_work); - ufshcd_scsi_block_requests(hba); pm_runtime_get_sync(hba->dev); + ufshcd_scsi_block_requests(hba); down_write(&hba->lock); ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX); if (ret) { diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index c374e3b5c678d215bfa9e7ed33e2d033e5d4bfb3..777e5f1e52d10968d5f23e0e316db05b8209511d 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, break; case BTSTAT_ABORTQUEUE: - cmd->result = (DID_ABORT << 16); + cmd->result = (DID_BUS_BUSY << 16); break; case BTSTAT_SCSIPARITY: diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c index b7ef0ae7581448b47e907d889ad0002e986ec1f4..c44c851f3fd654732265a4dfbf6ec6808a79ea4f 100644 --- a/drivers/slimbus/slimbus.c +++ b/drivers/slimbus/slimbus.c @@ -333,6 +333,20 @@ static void slim_report(struct work_struct *work) } } +static void slim_device_reset(struct work_struct *work) +{ + struct slim_driver *sbdrv; + struct slim_device *sbdev = + container_of(work, struct slim_device, device_reset); + + if (!sbdev->dev.driver) + return; + + sbdrv = to_slim_driver(sbdev->dev.driver); + if (sbdrv && sbdrv->reset_device) + sbdrv->reset_device(sbdev); +} + /* * slim_add_device: Add a new device without register board info. * @ctrl: Controller to which this device is to be added to. @@ -353,6 +367,7 @@ int slim_add_device(struct slim_controller *ctrl, struct slim_device *sbdev) INIT_LIST_HEAD(&sbdev->mark_suspend); INIT_LIST_HEAD(&sbdev->mark_removal); INIT_WORK(&sbdev->wd, slim_report); + INIT_WORK(&sbdev->device_reset, slim_device_reset); mutex_lock(&ctrl->m_ctrl); list_add_tail(&sbdev->dev_list, &ctrl->devs); mutex_unlock(&ctrl->m_ctrl); @@ -684,16 +699,9 @@ void slim_framer_booted(struct slim_controller *ctrl) mutex_unlock(&ctrl->sched.m_reconf); mutex_lock(&ctrl->m_ctrl); list_for_each_safe(pos, next, &ctrl->devs) { - struct slim_driver *sbdrv; - sbdev = list_entry(pos, struct slim_device, dev_list); - mutex_unlock(&ctrl->m_ctrl); - if (sbdev && sbdev->dev.driver) { - sbdrv = to_slim_driver(sbdev->dev.driver); - if (sbdrv->reset_device) - sbdrv->reset_device(sbdev); - } - mutex_lock(&ctrl->m_ctrl); + if (sbdev) + queue_work(ctrl->wq, &sbdev->device_reset); } mutex_unlock(&ctrl->m_ctrl); } diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c index fe96a8b956fbd54424bea059af5b0be1240c91b3..f7ed1187518b9d2b47bedd29ae6b5a1e3ab566fd 100644 --- a/drivers/soc/bcm/raspberrypi-power.c +++ b/drivers/soc/bcm/raspberrypi-power.c @@ -45,7 +45,7 @@ struct rpi_power_domains { struct rpi_power_domain_packet { u32 domain; u32 on; -} __packet; +}; /* * Asks the firmware to enable or disable power on a specific power diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 4ddf289ab1699640e0555d6189f64d6c0b0236ad..25dbb32e2dae3a5bf39a44e974ef3403dc03812b 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -479,14 +479,6 @@ config MINIDUMP_MAX_ENTRIES help This defines maximum number of entries to be allocated for application subsytem in Minidump table. -config MSM_RPM_SMD - bool "RPM driver using SMD protocol" - help - RPM is the dedicated hardware engine for managing shared SoC - resources. This config adds driver support for using SMD as a - transport layer communication with RPM hardware. It also selects - the MSM_MPM config that programs the MPM module to monitor interrupts - during sleep modes. config QCOM_BUS_SCALING bool "Bus scaling driver" @@ -768,4 +760,13 @@ config QCOM_SMP2P_SLEEPSTATE When this option is enabled, notifications are sent to remote procs for the power state changes on the local processor. The notifications are sent through the smp2p framework. + +config QCOM_CDSP_RM + bool "CDSP request manager" + depends on QCOM_GLINK + help + This driver serves CDSP requests for CPU L3 clock and CPU QoS thus + improving CDSP performance. Using this driver, CDSP can set appropriate + CPU L3 clock for improving IO-Coherent throughput and opt for QoS mode + to improve RPC latency. endmenu diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 4a8af4747d08e637b43f99af9cf342ddbdbc63c7..d26ad6184979ff4b203d665fe23fed7dd99dcf8a 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -64,6 +64,7 @@ ifdef CONFIG_MSM_SUBSYSTEM_RESTART obj-y += subsystem_notif.o obj-y += subsystem_restart.o obj-y += ramdump.o + obj-y += microdump_collector.o endif obj-$(CONFIG_QCOM_EUD) += eud.o obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o @@ -84,9 +85,9 @@ ifdef CONFIG_MSM_RPM_SMD endif obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o obj-$(CONFIG_MSM_PERFORMANCE) += msm_performance.o -obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o ifdef CONFIG_DEBUG_FS obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o endif obj-$(CONFIG_QCOM_SMP2P_SLEEPSTATE) += smp2p_sleepstate.o obj-$(CONFIG_QCOM_MEM_OFFLINE) += mem-offline.o +obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o diff --git a/drivers/soc/qcom/cdsprm.c b/drivers/soc/qcom/cdsprm.c new file mode 100644 index 0000000000000000000000000000000000000000..d23b3be2f9eab35f74576eb11e076a6450402935 --- /dev/null +++ b/drivers/soc/qcom/cdsprm.c @@ -0,0 +1,487 @@ +/* + * CDSP Request Manager + * + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* This module uses rpmsg to communicate with CDSP and receive requests + * for CPU L3 frequency and QoS. + */ + +#define pr_fmt(fmt) "cdsprm: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SYSMON_CDSP_FEATURE_L3 1 +#define SYSMON_CDSP_FEATURE_RM 2 +#define SYSMON_CDSP_QOS_FLAG_IGNORE 0 +#define SYSMON_CDSP_QOS_FLAG_ENABLE 1 +#define SYSMON_CDSP_QOS_FLAG_DISABLE 2 +#define QOS_LATENCY_DISABLE_VALUE -1 +#define SYS_CLK_TICKS_PER_MS 19200 +#define CDSPRM_MSG_QUEUE_DEPTH 10 + +struct sysmon_l3_msg { + unsigned int l3_clock_khz; +}; + +struct sysmon_rm_msg { + unsigned int b_qos_flag; + unsigned int timetick_low; + unsigned int timetick_high; +}; + +struct sysmon_msg { + unsigned int feature_id; + union { + struct sysmon_l3_msg l3_struct; + struct sysmon_rm_msg rm_struct; + } feature_struct; + unsigned int size; +}; + +enum delay_state { + CDSP_DELAY_THREAD_NOT_STARTED = 0, + CDSP_DELAY_THREAD_STARTED = 1, + CDSP_DELAY_THREAD_BEFORE_SLEEP = 2, + CDSP_DELAY_THREAD_AFTER_SLEEP = 3, + CDSP_DELAY_THREAD_EXITING = 4, +}; + +struct cdsprm_request { + struct list_head node; + struct sysmon_msg msg; + bool busy; +}; + +struct cdsprm { + unsigned int event; + struct completion msg_avail; + struct cdsprm_request msg_queue[CDSPRM_MSG_QUEUE_DEPTH]; + unsigned int msg_queue_idx; + struct workqueue_struct *work_queue; + struct workqueue_struct *delay_work_queue; + struct work_struct cdsprm_work; + struct work_struct cdsprm_delay_work; + struct mutex rm_lock; + spinlock_t l3_lock; + spinlock_t list_lock; + struct rpmsg_device *rpmsgdev; + enum delay_state dt_state; + enum delay_state work_state; + unsigned long long timestamp; + struct pm_qos_request pm_qos_req; + unsigned int qos_latency_us; + unsigned int qos_max_ms; + bool qos_request; + bool b_rpmsg_register; + bool b_qosinitdone; + int latency_request; + int (*set_l3_freq)(unsigned int freq_khz); + int (*set_l3_freq_cached)(unsigned int freq_khz); +}; + +static struct cdsprm gcdsprm; +static LIST_HEAD(cdsprm_list); + +/** + * cdsprm_register_cdspl3gov() - Register a method to set L3 clock + * frequency + * @arg: cdsprm_l3 structure with set L3 clock frequency method + * + * Note: To be called from cdspl3 governor only. Called when the governor is + * started. + */ +void cdsprm_register_cdspl3gov(struct cdsprm_l3 *arg) +{ + if (!arg) + return; + + spin_lock(&gcdsprm.l3_lock); + gcdsprm.set_l3_freq = arg->set_l3_freq; + spin_unlock(&gcdsprm.l3_lock); +} +EXPORT_SYMBOL(cdsprm_register_cdspl3gov); + +/** + * cdsprm_unregister_cdspl3gov() - Unregister the method to set L3 clock + * frequency + * + * Note: To be called from cdspl3 governor only. Called when the governor is + * stopped + */ +void cdsprm_unregister_cdspl3gov(void) +{ + spin_lock(&gcdsprm.l3_lock); + gcdsprm.set_l3_freq = NULL; + spin_unlock(&gcdsprm.l3_lock); +} +EXPORT_SYMBOL(cdsprm_unregister_cdspl3gov); + +static void set_qos_latency(int latency) +{ + if (!gcdsprm.qos_request) { + pm_qos_add_request(&gcdsprm.pm_qos_req, + PM_QOS_CPU_DMA_LATENCY, latency); + gcdsprm.qos_request = true; + } else { + pm_qos_update_request(&gcdsprm.pm_qos_req, + latency); + } +} + +static void process_rm_request(struct sysmon_msg *msg) +{ + struct sysmon_rm_msg *rm_msg; + + if (!msg) + return; + + if (msg->feature_id == SYSMON_CDSP_FEATURE_RM) { + mutex_lock(&gcdsprm.rm_lock); + rm_msg = &msg->feature_struct.rm_struct; + if (rm_msg->b_qos_flag == + SYSMON_CDSP_QOS_FLAG_ENABLE) { + if (gcdsprm.latency_request != + gcdsprm.qos_latency_us) { + set_qos_latency(gcdsprm.qos_latency_us); + gcdsprm.latency_request = + gcdsprm.qos_latency_us; + pr_debug("Set qos latency to %d\n", + gcdsprm.latency_request); + } + gcdsprm.timestamp = ((rm_msg->timetick_low) | + ((unsigned long long)rm_msg->timetick_high << 32)); + if (gcdsprm.dt_state >= CDSP_DELAY_THREAD_AFTER_SLEEP) { + flush_workqueue(gcdsprm.delay_work_queue); + if (gcdsprm.dt_state == + CDSP_DELAY_THREAD_EXITING) { + gcdsprm.dt_state = + CDSP_DELAY_THREAD_STARTED; + queue_work(gcdsprm.delay_work_queue, + &gcdsprm.cdsprm_delay_work); + } + } else if (gcdsprm.dt_state == + CDSP_DELAY_THREAD_NOT_STARTED) { + gcdsprm.dt_state = CDSP_DELAY_THREAD_STARTED; + queue_work(gcdsprm.delay_work_queue, + &gcdsprm.cdsprm_delay_work); + } + } else if ((rm_msg->b_qos_flag == + SYSMON_CDSP_QOS_FLAG_DISABLE) && + (gcdsprm.latency_request != + QOS_LATENCY_DISABLE_VALUE)) { + set_qos_latency(QOS_LATENCY_DISABLE_VALUE); + gcdsprm.latency_request = QOS_LATENCY_DISABLE_VALUE; + pr_debug("Set qos latency to %d\n", + gcdsprm.latency_request); + } + mutex_unlock(&gcdsprm.rm_lock); + } else { + pr_err("Received incorrect msg on rm queue: %d\n", + msg->feature_id); + } +} + +static void process_delayed_rm_request(struct work_struct *work) +{ + unsigned long long timestamp, curr_timestamp; + unsigned int time_ms = 0; + + mutex_lock(&gcdsprm.rm_lock); + + timestamp = gcdsprm.timestamp; + curr_timestamp = arch_counter_get_cntvct(); + + while ((gcdsprm.latency_request == + gcdsprm.qos_latency_us) && + (curr_timestamp < timestamp)) { + if ((timestamp - curr_timestamp) < + (gcdsprm.qos_max_ms * SYS_CLK_TICKS_PER_MS)) + time_ms = (timestamp - curr_timestamp) / + SYS_CLK_TICKS_PER_MS; + else + break; + gcdsprm.dt_state = CDSP_DELAY_THREAD_BEFORE_SLEEP; + + mutex_unlock(&gcdsprm.rm_lock); + usleep_range(time_ms * 1000, (time_ms + 2) * 1000); + mutex_lock(&gcdsprm.rm_lock); + + gcdsprm.dt_state = CDSP_DELAY_THREAD_AFTER_SLEEP; + timestamp = gcdsprm.timestamp; + curr_timestamp = arch_counter_get_cntvct(); + } + + set_qos_latency(QOS_LATENCY_DISABLE_VALUE); + gcdsprm.latency_request = QOS_LATENCY_DISABLE_VALUE; + pr_debug("Set qos latency to %d\n", gcdsprm.latency_request); + gcdsprm.dt_state = CDSP_DELAY_THREAD_EXITING; + + mutex_unlock(&gcdsprm.rm_lock); +} + +static void process_cdsp_request(struct work_struct *work) +{ + struct cdsprm_request *req = NULL; + struct sysmon_msg *msg = NULL; + unsigned int l3_clock_khz; + + while (gcdsprm.work_state == + CDSP_DELAY_THREAD_STARTED) { + req = list_first_entry_or_null(&cdsprm_list, + struct cdsprm_request, node); + if (req) { + msg = &req->msg; + if (!msg) { + spin_lock(&gcdsprm.list_lock); + list_del(&req->node); + req->busy = false; + spin_unlock(&gcdsprm.list_lock); + continue; + } + if ((msg->feature_id == SYSMON_CDSP_FEATURE_RM) && + gcdsprm.b_qosinitdone) { + process_rm_request(msg); + } else if (msg->feature_id == SYSMON_CDSP_FEATURE_L3) { + l3_clock_khz = + msg->feature_struct.l3_struct.l3_clock_khz; + spin_lock(&gcdsprm.l3_lock); + gcdsprm.set_l3_freq_cached = + gcdsprm.set_l3_freq; + spin_unlock(&gcdsprm.l3_lock); + if (gcdsprm.set_l3_freq_cached) { + gcdsprm.set_l3_freq_cached( + l3_clock_khz); + pr_debug("Set L3 clock %d done\n", + l3_clock_khz); + } + } + spin_lock(&gcdsprm.list_lock); + list_del(&req->node); + req->busy = false; + spin_unlock(&gcdsprm.list_lock); + } else { + wait_for_completion(&gcdsprm.msg_avail); + } + } +} + +static int cdsprm_rpmsg_probe(struct rpmsg_device *dev) +{ + /* Populate child nodes as platform devices */ + of_platform_populate(dev->dev.of_node, NULL, NULL, &dev->dev); + gcdsprm.rpmsgdev = dev; + dev_dbg(&dev->dev, "rpmsg probe called for cdsp\n"); + return 0; +} + +static void cdsprm_rpmsg_remove(struct rpmsg_device *dev) +{ + gcdsprm.rpmsgdev = NULL; +} + +static int cdsprm_rpmsg_callback(struct rpmsg_device *dev, void *data, + int len, void *priv, u32 addr) +{ + struct sysmon_msg *msg = (struct sysmon_msg *)data; + bool b_valid = false; + struct cdsprm_request *req; + + if (!data || (len < sizeof(*msg))) { + dev_err(&dev->dev, + "Invalid message in rpmsg callback, length: %d, expected: %d\n", + len, sizeof(*msg)); + return -EINVAL; + } + + if ((msg->feature_id == SYSMON_CDSP_FEATURE_RM) && + gcdsprm.b_qosinitdone) { + dev_dbg(&dev->dev, "Processing RM request\n"); + b_valid = true; + } else if (msg->feature_id == SYSMON_CDSP_FEATURE_L3) { + dev_dbg(&dev->dev, "Processing L3 request\n"); + spin_lock(&gcdsprm.l3_lock); + gcdsprm.set_l3_freq_cached = gcdsprm.set_l3_freq; + spin_unlock(&gcdsprm.l3_lock); + if (gcdsprm.set_l3_freq_cached) + b_valid = true; + } + + if (b_valid) { + spin_lock(&gcdsprm.list_lock); + if (!gcdsprm.msg_queue[gcdsprm.msg_queue_idx].busy) { + req = &gcdsprm.msg_queue[gcdsprm.msg_queue_idx]; + req->busy = true; + req->msg = *msg; + if (gcdsprm.msg_queue_idx < + (CDSPRM_MSG_QUEUE_DEPTH - 1)) + gcdsprm.msg_queue_idx++; + else + gcdsprm.msg_queue_idx = 0; + } else { + spin_unlock(&gcdsprm.list_lock); + dev_err(&dev->dev, + "Unable to queue cdsp request, no memory\n"); + return -ENOMEM; + } + list_add_tail(&req->node, &cdsprm_list); + spin_unlock(&gcdsprm.list_lock); + if (gcdsprm.work_state == + CDSP_DELAY_THREAD_NOT_STARTED) { + gcdsprm.work_state = + CDSP_DELAY_THREAD_STARTED; + queue_work(gcdsprm.work_queue, + &gcdsprm.cdsprm_work); + } else { + complete(&gcdsprm.msg_avail); + } + } + + return 0; +} + +static int cdsp_rm_driver_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (of_property_read_u32(dev->of_node, + "qcom,qos-latency-us", &gcdsprm.qos_latency_us)) { + return -EINVAL; + } + + if (of_property_read_u32(dev->of_node, + "qcom,qos-maxhold-ms", &gcdsprm.qos_max_ms)) { + return -EINVAL; + } + + dev_info(dev, "CDSP request manager driver probe called\n"); + gcdsprm.b_qosinitdone = true; + + return 0; +} + +static const struct rpmsg_device_id cdsprm_rpmsg_match[] = { + { "cdsprmglink-apps-dsp" }, + { }, +}; + +static const struct of_device_id cdsprm_rpmsg_of_match[] = { + { .compatible = "qcom,msm-cdsprm-rpmsg" }, + { }, +}; +MODULE_DEVICE_TABLE(of, cdsprm_rpmsg_of_match); + +static struct rpmsg_driver cdsprm_rpmsg_client = { + .id_table = cdsprm_rpmsg_match, + .probe = cdsprm_rpmsg_probe, + .remove = cdsprm_rpmsg_remove, + .callback = cdsprm_rpmsg_callback, + .drv = { + .name = "qcom,msm_cdsprm_rpmsg", + .of_match_table = cdsprm_rpmsg_of_match, + }, +}; + +static const struct of_device_id cdsp_rm_match_table[] = { + { .compatible = "qcom,msm-cdsp-rm" }, + { }, +}; + +static struct platform_driver cdsp_rm = { + .probe = cdsp_rm_driver_probe, + .driver = { + .name = "msm_cdsp_rm", + .of_match_table = cdsp_rm_match_table, + }, +}; + +static int __init cdsprm_init(void) +{ + int err; + + mutex_init(&gcdsprm.rm_lock); + spin_lock_init(&gcdsprm.l3_lock); + spin_lock_init(&gcdsprm.list_lock); + init_completion(&gcdsprm.msg_avail); + gcdsprm.work_queue = create_singlethread_workqueue("cdsprm-wq"); + if (!gcdsprm.work_queue) { + pr_err("Failed to create rm work queue\n"); + return -ENOMEM; + } + + gcdsprm.delay_work_queue = + create_singlethread_workqueue("cdsprm-wq-delay"); + if (!gcdsprm.delay_work_queue) { + err = -ENOMEM; + pr_err("Failed to create rm delay work queue\n"); + goto err_wq; + } + + INIT_WORK(&gcdsprm.cdsprm_delay_work, process_delayed_rm_request); + INIT_WORK(&gcdsprm.cdsprm_work, process_cdsp_request); + err = platform_driver_register(&cdsp_rm); + if (err) { + pr_err("Failed to register cdsprm platform driver: %d\n", + err); + goto bail; + } + + err = register_rpmsg_driver(&cdsprm_rpmsg_client); + if (err) { + pr_err("Failed registering rpmsg driver with return %d\n", + err); + goto bail; + } + + gcdsprm.b_rpmsg_register = true; + pr_debug("Init successful\n"); + return 0; +bail: + destroy_workqueue(gcdsprm.delay_work_queue); +err_wq: + destroy_workqueue(gcdsprm.work_queue); + return err; +} + +static void __exit cdsprm_exit(void) +{ + if (gcdsprm.b_rpmsg_register) + unregister_rpmsg_driver(&cdsprm_rpmsg_client); + + platform_driver_unregister(&cdsp_rm); + gcdsprm.work_state = CDSP_DELAY_THREAD_NOT_STARTED; + complete(&gcdsprm.msg_avail); + destroy_workqueue(gcdsprm.work_queue); + destroy_workqueue(gcdsprm.delay_work_queue); +} + +module_init(cdsprm_init); +module_exit(cdsprm_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index 252bd21086815a7ce2cf57bf79fb82f1555076b3..22b44648a4029a16114d807b2f16a751de59e368 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -362,6 +362,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev) res.start = readl_relaxed(dict); res.end = res.start + readl_relaxed(dict + 0x4); res.flags = IORESOURCE_MEM; + res.name = NULL; iounmap(dict); start_addr = devm_ioremap_resource(&pdev->dev, &res); diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c index 7c54073afb0a62c952575ab23486cfcdd4c4e307..2ded804ab9f11a9baed78dc93985536bccf68ca2 100644 --- a/drivers/soc/qcom/dcc_v2.c +++ b/drivers/soc/qcom/dcc_v2.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #define TIMEOUT_US (100) @@ -887,7 +889,8 @@ static int dcc_config_add(struct dcc_drvdata *drvdata, unsigned int addr, goto err; } - if (!len) { + /* Check the len to avoid allocate huge memory */ + if (!len || len > (drvdata->ram_size / 8)) { dev_err(drvdata->dev, "DCC: Invalid length\n"); ret = -EINVAL; goto err; @@ -1549,6 +1552,7 @@ static int dcc_probe(struct platform_device *pdev) struct dcc_drvdata *drvdata; struct resource *res; const char *data_sink; + struct clk *pclk; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) @@ -1557,6 +1561,16 @@ static int dcc_probe(struct platform_device *pdev) drvdata->dev = &pdev->dev; platform_set_drvdata(pdev, drvdata); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dcc_clk"); + if (res) { + pclk = devm_clk_get(dev, "dcc_clk"); + if (!IS_ERR(pclk)) { + ret = clk_set_rate(pclk, QDSS_CLK_LEVEL_DYNAMIC); + if (ret) + dev_err(dev, "clk set rate failed\n"); + } + } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dcc-base"); if (!res) return -EINVAL; diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c index dea5f5c07c50129b3335818235973bb0573c073c..920f2e5ee925ad3592b93712bf8697f9e403ec73 100644 --- a/drivers/soc/qcom/dfc_qmi.c +++ b/drivers/soc/qcom/dfc_qmi.c @@ -15,63 +15,23 @@ #include #include #include + #include #include "qmi_rmnet_i.h" +#define CREATE_TRACE_POINTS +#include #define DFC_MAX_BEARERS_V01 16 #define DFC_MAX_QOS_ID_V01 2 #define DEFAULT_FLOW_ID 0 -/* bearer list update result */ -#define NO_BEARER 0 -#define NO_CHANGE 1 -#define UPDATED 2 - -struct qmap_header { - uint8_t pad_len:6; - uint8_t reserved_bit:1; - uint8_t cd_bit:1; - uint8_t mux_id; - __be16 pkt_len; -} __aligned(1); - -struct dfc_ack_cmd { - struct qmap_header header; - uint8_t command_name; - uint8_t cmd_type:2; - uint8_t reserved:6; - uint16_t reserved2; - uint32_t transaction_id; - uint8_t qos_ver:2; - uint8_t reserved3:6; - uint8_t qos_type:2; - uint8_t reserved4:6; - uint16_t dfc_seq; - uint8_t reserved5[3]; - uint8_t bearer_id; -} __aligned(1); - -struct dfc_qos_ids { - uint32_t qos_id_valid; - uint32_t qos_id; -}; - -struct dfc_work { - struct work_struct work; - struct net_device *dev; - uint8_t bearer_id; - uint8_t ack_req; - uint16_t seq; - uint8_t mux_id; -}; - struct dfc_qmi_data { void *rmnet_port; struct workqueue_struct *dfc_wq; struct work_struct svc_arrive; struct qmi_handle handle; struct sockaddr_qrtr ssctl; - int modem; + int index; }; struct dfc_svc_ind { @@ -91,7 +51,6 @@ static void dfc_ind_reg_dereg(struct work_struct *work); static void dfc_svc_init(struct work_struct *work); static void dfc_do_burst_flow_control(struct work_struct *work); -static void dfc_disable_flow(struct work_struct *work); /* **************************************************** */ #define DFC_SERVICE_ID_V01 0x4E @@ -112,7 +71,7 @@ static void dfc_disable_flow(struct work_struct *work); #define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 471 struct dfc_bind_client_req_msg_v01 { - uint8_t ep_id_valid; + u8 ep_id_valid; struct data_ep_id_type_v01 ep_id; }; @@ -121,8 +80,8 @@ struct dfc_bind_client_resp_msg_v01 { }; struct dfc_indication_register_req_msg_v01 { - uint8_t report_flow_status_valid; - uint8_t report_flow_status; + u8 report_flow_status_valid; + u8 report_flow_status; }; struct dfc_indication_register_resp_msg_v01 { @@ -137,17 +96,17 @@ enum dfc_ip_type_enum_v01 { }; struct dfc_qos_id_type_v01 { - uint32_t qos_id; + u32 qos_id; enum dfc_ip_type_enum_v01 ip_type; }; struct dfc_flow_status_info_type_v01 { - uint8_t subs_id; - uint8_t mux_id; - uint8_t bearer_id; - uint32_t num_bytes; - uint16_t seq_num; - uint8_t qos_ids_len; + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 num_bytes; + u16 seq_num; + u8 qos_ids_len; struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01]; }; @@ -155,7 +114,7 @@ static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(uint32_t), + .elem_size = sizeof(u32), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct dfc_qos_id_type_v01, @@ -183,7 +142,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -194,7 +153,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -205,7 +164,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -216,7 +175,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(uint32_t), + .elem_size = sizeof(u32), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -227,7 +186,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, - .elem_size = sizeof(uint16_t), + .elem_size = sizeof(u16), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -238,7 +197,7 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct @@ -269,18 +228,18 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { * that have registered for this event reporting. */ struct dfc_flow_status_ind_msg_v01 { - uint8_t flow_status_valid; - uint8_t flow_status_len; + u8 flow_status_valid; + u8 flow_status_len; struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; - uint8_t eod_ack_reqd_valid; - uint8_t eod_ack_reqd; + u8 eod_ack_reqd_valid; + u8 eod_ack_reqd; }; static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct dfc_bind_client_req_msg_v01, @@ -326,7 +285,7 @@ static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct @@ -337,7 +296,7 @@ static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct @@ -375,7 +334,7 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct @@ -386,7 +345,7 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct @@ -409,7 +368,7 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct @@ -420,7 +379,7 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, - .elem_size = sizeof(uint8_t), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct @@ -492,7 +451,7 @@ dfc_bind_client_req(struct qmi_handle *dfc_handle, static int dfc_indication_register_req(struct qmi_handle *dfc_handle, - struct sockaddr_qrtr *ssctl, uint8_t reg) + struct sockaddr_qrtr *ssctl, u8 reg) { struct dfc_indication_register_resp_msg_v01 *resp; struct dfc_indication_register_req_msg_v01 *req; @@ -550,114 +509,94 @@ static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi) int rc; rc = dfc_bind_client_req(&data->handle, &data->ssctl, - &qmi->fc_info[data->modem].svc); + &qmi->fc_info[data->index].svc); if (rc < 0) return rc; return dfc_indication_register_req(&data->handle, &data->ssctl, 1); } -static int dfc_disable_bearer_flows(struct net_device *dev, uint8_t bearer_id) +static int dfc_bearer_flow_ctl(struct net_device *dev, struct qos_info *qos, + u8 bearer_id, u32 grant_size, int enable) { - struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); struct list_head *p; struct rmnet_flow_map *itm; - int rc = 0; - - if (!qos) - return 0; + int rc = 0, qlen; list_for_each(p, &qos->flow_head) { itm = list_entry(p, struct rmnet_flow_map, list); - if (unlikely(!itm)) - return 0; - if (itm->bearer_id == bearer_id) { - rtnl_lock(); - tc_qdisc_flow_control(dev, itm->tcm_handle, 0); - rtnl_unlock(); + qlen = tc_qdisc_flow_control(dev, itm->tcm_handle, + enable); + trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id, + grant_size, qlen, itm->tcm_handle, + enable); rc++; } } return rc; } -static int dfc_update_fc_map(struct qos_info *qos, uint8_t ack_req, - struct dfc_flow_status_info_type_v01 *fc_info) +static int dfc_all_bearer_flow_ctl(struct net_device *dev, + struct qos_info *qos, u8 ack_req, + struct dfc_flow_status_info_type_v01 *fc_info) { - struct rmnet_bearer_map *itm = NULL; - unsigned long flags; - int rc = NO_BEARER; + struct list_head *p; + struct rmnet_flow_map *flow_itm; + struct rmnet_bearer_map *bearer_itm; + int enable; + int rc = 0, len; - write_lock_irqsave(&qos->flow_map_lock, flags); - itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id); - if (itm) { - if ((itm->grant_size == fc_info->num_bytes) && - (itm->counter > 0)) { - /*flow is enabled and grant_size is the same*/ - rc = NO_CHANGE; - } else { - itm->grant_size = fc_info->num_bytes; - itm->seq = fc_info->seq_num; - itm->ack_req = ack_req; - rc = UPDATED; - } - itm->counter = 0; + list_for_each(p, &qos->bearer_head) { + bearer_itm = list_entry(p, struct rmnet_bearer_map, list); + + bearer_itm->grant_size = fc_info->num_bytes; + bearer_itm->seq = fc_info->seq_num; + bearer_itm->ack_req = ack_req; } - write_unlock_irqrestore(&qos->flow_map_lock, flags); - return rc; -} -static int dfc_do_fc(struct net_device *dev, uint32_t flow_id, - int ip_type, int enable) -{ - struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); - struct rmnet_flow_map *itm = NULL; - int len = 0; + enable = fc_info->num_bytes > 0 ? 1 : 0; - if (!qos) - return 0; + list_for_each(p, &qos->flow_head) { + flow_itm = list_entry(p, struct rmnet_flow_map, list); - itm = qmi_rmnet_get_flow_map(qos, flow_id, ip_type); - if (itm) { - rtnl_lock(); - len = tc_qdisc_flow_control(dev, itm->tcm_handle, enable); - rtnl_unlock(); + len = tc_qdisc_flow_control(dev, flow_itm->tcm_handle, enable); + trace_dfc_qmi_tc(flow_itm->bearer_id, flow_itm->flow_id, + fc_info->num_bytes, len, + flow_itm->tcm_handle, enable); + rc++; } - return len; + return rc; } -static void dfc_disable_flow(struct work_struct *work) +static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, + u8 ack_req, + struct dfc_flow_status_info_type_v01 *fc_info) { - struct dfc_work *data = (struct dfc_work *)work; - int rc = dfc_disable_bearer_flows(data->dev, data->bearer_id); - - pr_debug("%s() %d flows disabled\n", __func__, rc); - kfree(data); -} + struct rmnet_bearer_map *itm = NULL; + int rc = 0; + int action = -1; -static void dfc_do_flow_controls(struct net_device *dev, - struct dfc_flow_status_info_type_v01 *flow) -{ - int i; - int enable = (flow->num_bytes > 0) ? 1 : 0; - int qdisc_len; - - for (i = 0; i < flow->qos_ids_len; i++) { - /* do flow control per specified flow */ - if (flow->qos_ids[i].ip_type == DFC_IPV4_TYPE_V01) { - qdisc_len = dfc_do_fc(dev, flow->qos_ids[i].qos_id, - AF_INET, enable); - pr_debug("%s() qdisc_len=%d\n", __func__, qdisc_len); - } else if (flow->qos_ids[i].ip_type == DFC_IPV6_TYPE_V01) { - qdisc_len = dfc_do_fc(dev, flow->qos_ids[i].qos_id, - AF_INET6, enable); - } else { - pr_err("%s() ip type[%d] not supported\n", - __func__, flow->qos_ids[i].ip_type); - } + itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id); + if (itm) { + if (itm->grant_size == 0 && fc_info->num_bytes > 0) + action = 1; + else if (itm->grant_size > 0 && fc_info->num_bytes == 0) + action = 0; + + itm->grant_size = fc_info->num_bytes; + itm->seq = fc_info->seq_num; + itm->ack_req = ack_req; + + if (action != -1) + rc = dfc_bearer_flow_ctl(dev, qos, fc_info->bearer_id, + itm->grant_size, action); + } else { + pr_debug("grant %u before flow activate", fc_info->num_bytes); + qos->default_grant = fc_info->num_bytes; } + return rc; } static void dfc_do_burst_flow_control(struct work_struct *work) @@ -668,11 +607,34 @@ static void dfc_do_burst_flow_control(struct work_struct *work) struct net_device *dev; struct qos_info *qos; struct dfc_flow_status_info_type_v01 *flow_status; - uint8_t ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; + u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; int i, rc; + if (!svc_ind->data->rmnet_port) { + kfree(ind); + kfree(svc_ind); + return; + } + + /* This will drop some messages but that is + * unavoidable for now since the notifier callback is + * protected by rtnl_lock() and destroy_workqueue() + * will dead lock with this. + */ + if (!rtnl_trylock()) { + kfree(ind); + kfree(svc_ind); + return; + } + for (i = 0; i < ind->flow_status_len; i++) { flow_status = &ind->flow_status[i]; + trace_dfc_flow_ind(svc_ind->data->index, + i, flow_status->mux_id, + flow_status->bearer_id, + flow_status->num_bytes, + flow_status->seq_num, + ack_req); dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port, flow_status->mux_id); if (!dev) @@ -682,27 +644,17 @@ static void dfc_do_burst_flow_control(struct work_struct *work) if (!qos) continue; - rc = dfc_update_fc_map(qos, ack_req, flow_status); - if (rc == NO_BEARER) { - pr_debug("%s: num_bytes[%u]\n", - __func__, flow_status->num_bytes); - qos->default_grant = flow_status->num_bytes; - continue; - } else if (rc == NO_CHANGE) { - continue; - } else { - if ((flow_status->num_bytes > 0) || - (flow_status->bearer_id != 0xFF)) - dfc_do_flow_controls(dev, flow_status); - else - netif_stop_queue(dev); - - } + if (unlikely(flow_status->bearer_id == 0xFF)) + rc = dfc_all_bearer_flow_ctl( + dev, qos, ack_req, flow_status); + else + rc = dfc_update_fc_map(dev, qos, ack_req, flow_status); } clean_out: kfree(ind); kfree(svc_ind); + rtnl_unlock(); } static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, @@ -713,16 +665,17 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, struct dfc_flow_status_ind_msg_v01 *ind_msg; struct dfc_svc_ind *svc_ind; - if (qmi != &dfc->handle) { - pr_err("Wrong client\n"); + if (!dfc->rmnet_port) + return; + + if (qmi != &dfc->handle) return; - } ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data; if (ind_msg->flow_status_valid) { if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) { - pr_err("Invalid fc info len: %d\n", - ind_msg->flow_status_len); + pr_err("%s() Invalid fc info len: %d\n", + __func__, ind_msg->flow_status_len); return; } @@ -764,8 +717,11 @@ static void dfc_svc_init(struct work_struct *work) return; } - qmi->fc_info[data->modem].dfc_client = (void *)data; - pr_debug("Connection established with the DFC Service\n"); + qmi->fc_info[data->index].dfc_client = (void *)data; + trace_dfc_client_state_up(data->index, + qmi->fc_info[data->index].svc.instance, + qmi->fc_info[data->index].svc.ep_type, + qmi->fc_info[data->index].svc.iface_id); } static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc) @@ -787,14 +743,14 @@ static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc) struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data, handle); struct qmi_info *qmi_pt; - int modem; + int client; - pr_debug("Connection with DFC service lost\n"); + trace_dfc_client_state_down(data->index, 1); qmi_pt = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port); if (qmi_pt) { - for (modem = 0; modem < 2; modem++) { - if (qmi_pt->fc_info[modem].dfc_client == (void *)data) - qmi_pt->fc_info[modem].dfc_client = NULL; + for (client = 0; client < MAX_CLIENT_NUM; client++) { + if (qmi_pt->fc_info[client].dfc_client == (void *)data) + qmi_pt->fc_info[client].dfc_client = NULL; break; } } @@ -818,48 +774,49 @@ static struct qmi_msg_handler qmi_indication_handler[] = { {}, }; -/* **************************************************** */ -int dfc_qmi_client_init(void *port, int modem) +int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi) { - struct qmi_info *qmi = rmnet_get_qmi_pt(port); struct dfc_qmi_data *data; - int rc = 0; + int rc = -ENOMEM; - if (!qmi) - return -EINVAL; - - data = kmalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL); + data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL); if (!data) return -ENOMEM; - memset(data, 0, sizeof(struct dfc_qmi_data)); data->rmnet_port = port; - data->modem = modem; + data->index = index; data->dfc_wq = create_singlethread_workqueue("dfc_wq"); if (!data->dfc_wq) { pr_err("%s Could not create workqueue\n", __func__); - kfree(data); - return -ENOMEM; + goto err0; } + INIT_WORK(&data->svc_arrive, dfc_svc_init); rc = qmi_handle_init(&data->handle, QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN, &server_ops, qmi_indication_handler); if (rc < 0) { pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc); - kfree(data); - return rc; + goto err1; } rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01, DFC_SERVICE_VERS_V01, - qmi->fc_info[modem].svc.instance); + qmi->fc_info[index].svc.instance); if (rc < 0) { pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc); - qmi_handle_release(&data->handle); + goto err2; } + return 0; + +err2: + qmi_handle_release(&data->handle); +err1: + destroy_workqueue(data->dfc_wq); +err0: + kfree(data); return rc; } @@ -867,58 +824,61 @@ void dfc_qmi_client_exit(void *dfc_data) { struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; - if (!data) - return; - - qmi_handle_release(&data->handle); + /* Skip this call for now due to error in qmi layer + * qmi_handle_release(&data->handle); + */ + trace_dfc_client_state_down(data->index, 0); + drain_workqueue(data->dfc_wq); + destroy_workqueue(data->dfc_wq); + kfree(data); } void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, struct sk_buff *skb) { - struct dfc_work *svc_check; struct rmnet_bearer_map *bearer; struct rmnet_flow_map *itm; - unsigned long flags; int ip_type; - if (!qos || !skb) + if (!qos) return; + if (!rtnl_trylock()) + return; ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET; - write_lock_irqsave(&qos->flow_map_lock, flags); - itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type); - if (!itm) { - write_unlock_irqrestore(&qos->flow_map_lock, flags); - } else { + itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type); + if (itm) { bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id); if (unlikely(!bearer)) { - write_unlock_irqrestore(&qos->flow_map_lock, flags); + rtnl_unlock(); return; } - bearer->counter += skb->len; - if (bearer->counter < bearer->grant_size) { - write_unlock_irqrestore(&qos->flow_map_lock, flags); + + trace_dfc_flow_check(bearer->bearer_id, + skb->len, bearer->grant_size); + + if (skb->len >= bearer->grant_size) { + bearer->grant_size = 0; + dfc_bearer_flow_ctl(dev, qos, bearer->bearer_id, + bearer->grant_size, 0); } else { - bearer->counter = 0; - write_unlock_irqrestore(&qos->flow_map_lock, flags); - - svc_check = kmalloc(sizeof(struct dfc_work), - GFP_ATOMIC); - if (!svc_check) - return; - - INIT_WORK((struct work_struct *)svc_check, - dfc_disable_flow); - svc_check->dev = dev; - svc_check->bearer_id = bearer->bearer_id; - svc_check->ack_req = bearer->ack_req; - svc_check->seq = bearer->seq; - svc_check->mux_id = qos->mux_id; - schedule_work((struct work_struct *)svc_check); + bearer->grant_size -= skb->len; } } + + rtnl_unlock(); +} + +void dfc_reset_port_pt(void *dfc_data) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + + if (data) { + data->rmnet_port = NULL; + dfc_indication_register_req(&data->handle, &data->ssctl, 0); + destroy_workqueue(data->dfc_wq); + } } #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index c8abef8630700f0575b3e89188b1147b5d905477..f631192d1aa7ad408519379f1fd6f5e45b8f13d2 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -1205,6 +1205,11 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb, if (code != SUBSYS_BEFORE_SHUTDOWN) return NOTIFY_OK; + if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed) { + if (wlfw_send_modem_shutdown_msg(priv)) + icnss_pr_dbg("Fail to send modem shutdown Indication\n"); + } + if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) { set_bit(ICNSS_FW_DOWN, &priv->state); icnss_ignore_fw_timeout(true); @@ -1238,7 +1243,11 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb, event_data->crashed = notif->crashed; fw_down_data.crashed = !!notif->crashed; - icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data); + if (test_bit(ICNSS_FW_READY, &priv->state) && + !test_bit(ICNSS_DRIVER_UNLOADING, &priv->state)) + icnss_call_driver_uevent(priv, + ICNSS_UEVENT_FW_DOWN, + &fw_down_data); icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN, ICNSS_EVENT_SYNC, event_data); diff --git a/drivers/soc/qcom/icnss_qmi.c b/drivers/soc/qcom/icnss_qmi.c index d3815ab251f5bc460c6b0a832a39dd424b90c3fa..e9ef164e3e31f539452ed03805ac6b76fe4abfb8 100644 --- a/drivers/soc/qcom/icnss_qmi.c +++ b/drivers/soc/qcom/icnss_qmi.c @@ -552,6 +552,69 @@ int wlfw_wlan_cfg_send_sync_msg(struct icnss_priv *priv, return ret; } +int wlfw_send_modem_shutdown_msg(struct icnss_priv *priv) +{ + int ret; + struct wlfw_shutdown_req_msg_v01 *req; + struct wlfw_shutdown_resp_msg_v01 *resp; + struct qmi_txn txn; + + if (!priv) + return -ENODEV; + + icnss_pr_dbg("Sending modem shutdown request, state: 0x%lx\n", + priv->state); + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + req->shutdown_valid = 1; + req->shutdown = 1; + + ret = qmi_txn_init(&priv->qmi, &txn, + wlfw_shutdown_resp_msg_v01_ei, resp); + + if (ret < 0) { + icnss_pr_err("Fail to init txn for shutdown resp %d\n", + ret); + goto out; + } + + ret = qmi_send_request(&priv->qmi, NULL, &txn, + QMI_WLFW_SHUTDOWN_REQ_V01, + WLFW_SHUTDOWN_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_shutdown_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + icnss_pr_err("Fail to send Shutdown req %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, WLFW_TIMEOUT); + if (ret < 0) { + icnss_pr_err("Shutdown resp wait failed with ret %d\n", + ret); + goto out; + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + icnss_pr_err("QMI modem shutdown request rejected result:%d error:%d\n", + resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + goto out; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode) { int ret; @@ -1128,7 +1191,15 @@ static int wlfw_new_server(struct qmi_handle *qmi, static void wlfw_del_server(struct qmi_handle *qmi, struct qmi_service *service) { + struct icnss_priv *priv = container_of(qmi, struct icnss_priv, qmi); + icnss_pr_dbg("WLFW server delete\n"); + + if (priv) { + set_bit(ICNSS_FW_DOWN, &priv->state); + icnss_ignore_fw_timeout(true); + } + icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_EXIT, 0, NULL); } diff --git a/drivers/soc/qcom/icnss_qmi.h b/drivers/soc/qcom/icnss_qmi.h index 488c9aad1221505e13a773aebafd8e9c97b6d467..a94c35dfbca9884d96b99d4ad57e1564f369420a 100644 --- a/drivers/soc/qcom/icnss_qmi.h +++ b/drivers/soc/qcom/icnss_qmi.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -50,7 +50,10 @@ static inline int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv) return 0; } static inline void icnss_ignore_fw_timeout(bool ignore) {} - +static int wlfw_send_modem_shutdown_msg(struct icnss_priv *priv) +{ + return 0; +} static inline int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode) { @@ -103,6 +106,7 @@ int wlfw_dynamic_feature_mask_send_sync_msg(struct icnss_priv *priv, int icnss_clear_server(struct icnss_priv *priv); int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv); void icnss_ignore_fw_timeout(bool ignore); +int wlfw_send_modem_shutdown_msg(struct icnss_priv *priv); int wlfw_ini_send_sync_msg(struct icnss_priv *priv, uint8_t fw_log_mode); int wlfw_athdiag_read_send_sync_msg(struct icnss_priv *priv, uint32_t offset, uint32_t mem_type, diff --git a/drivers/soc/qcom/llcc-sdmmagpie.c b/drivers/soc/qcom/llcc-sdmmagpie.c index 7e772cf7151cab4453632cf1ccb9b5d5b017b2ea..4f7397e839574dee821cff0b4539868e3f62e016 100644 --- a/drivers/soc/qcom/llcc-sdmmagpie.c +++ b/drivers/soc/qcom/llcc-sdmmagpie.c @@ -60,7 +60,8 @@ static struct llcc_slice_config sdmmagpie_data[] = { SCT_ENTRY("cpuss", 1, 1, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 1), SCT_ENTRY("modem", 8, 8, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), SCT_ENTRY("modemhw", 9, 9, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 1, 0), - SCT_ENTRY("mmuhwt", 13, 13, 512, 1, 0, 0xF, 0x0, 0, 0, 0, 0, 1), + SCT_ENTRY("gpuhtw", 11, 11, 128, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0), + SCT_ENTRY("gpu", 12, 12, 384, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0), }; static int sdmmagpie_qcom_llcc_probe(struct platform_device *pdev) diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c index ceb9eebd164a3bbeb28085de63f9b915aee6236a..c2d0ea7b9091b6ebc821eef124551669f0583764 100644 --- a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c @@ -11,133 +11,10 @@ * */ -#include -#include +#include #include "heap_mem_ext_v01.h" -struct elem_info mem_alloc_req_msg_data_v01_ei[] = { - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct mem_alloc_req_msg_v01, - num_bytes), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct mem_alloc_req_msg_v01, - block_alignment_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct mem_alloc_req_msg_v01, - block_alignment), - }, - { - .data_type = QMI_EOTI, - .is_array = NO_ARRAY, - .tlv_type = QMI_COMMON_TLV_TYPE, - }, -}; - -struct elem_info mem_alloc_resp_msg_data_v01_ei[] = { - { - .data_type = QMI_SIGNED_2_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct mem_alloc_resp_msg_v01, - resp), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct mem_alloc_resp_msg_v01, - handle_valid), - }, - { - .data_type = QMI_UNSIGNED_8_BYTE, - .elem_len = 1, - .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, - .tlv_type = 0x10, - .offset = offsetof(struct mem_alloc_resp_msg_v01, - handle), - }, - { - .data_type = QMI_OPT_FLAG, - .elem_len = 1, - .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct mem_alloc_resp_msg_v01, - num_bytes_valid), - }, - { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, - .tlv_type = 0x11, - .offset = offsetof(struct mem_alloc_resp_msg_v01, - num_bytes), - }, - { - .data_type = QMI_EOTI, - .is_array = NO_ARRAY, - .tlv_type = QMI_COMMON_TLV_TYPE, - }, -}; - -struct elem_info mem_free_req_msg_data_v01_ei[] = { - { - .data_type = QMI_UNSIGNED_8_BYTE, - .elem_len = 1, - .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct mem_free_req_msg_v01, - handle), - }, - { - .data_type = QMI_EOTI, - .is_array = NO_ARRAY, - .tlv_type = QMI_COMMON_TLV_TYPE, - }, -}; - -struct elem_info mem_free_resp_msg_data_v01_ei[] = { - { - .data_type = QMI_SIGNED_2_BYTE_ENUM, - .elem_len = 1, - .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, - .tlv_type = 0x01, - .offset = offsetof(struct mem_free_resp_msg_v01, - resp), - }, - { - .data_type = QMI_EOTI, - .is_array = NO_ARRAY, - .tlv_type = QMI_COMMON_TLV_TYPE, - }, -}; - -struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = { +struct qmi_elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, @@ -165,7 +42,7 @@ struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = { }, }; -struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = { +struct qmi_elem_info mem_alloc_generic_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -245,7 +122,7 @@ struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = { }, }; -struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { +struct qmi_elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -255,7 +132,7 @@ struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { .offset = offsetof(struct mem_alloc_generic_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, @@ -316,7 +193,7 @@ struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { }, }; -struct elem_info mem_free_generic_req_msg_data_v01_ei[] = { +struct qmi_elem_info mem_free_generic_req_msg_data_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, @@ -380,7 +257,7 @@ struct elem_info mem_free_generic_req_msg_data_v01_ei[] = { }, }; -struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = { +struct qmi_elem_info mem_free_generic_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -390,7 +267,7 @@ struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = { .offset = offsetof(struct mem_free_generic_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -399,7 +276,7 @@ struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = { }, }; -struct elem_info mem_query_size_req_msg_data_v01_ei[] = { +struct qmi_elem_info mem_query_size_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -434,7 +311,7 @@ struct elem_info mem_query_size_req_msg_data_v01_ei[] = { }, }; -struct elem_info mem_query_size_resp_msg_data_v01_ei[] = { +struct qmi_elem_info mem_query_size_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -444,7 +321,7 @@ struct elem_info mem_query_size_resp_msg_data_v01_ei[] = { .offset = offsetof(struct mem_query_size_rsp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h index 56ba9f87abbbe592c36c29a48be00533d3a13500..2c2924f17a30e6f53e84e1cc24ee58a2f1b70a1b 100644 --- a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h @@ -14,10 +14,11 @@ #ifndef HEAP_MEM_EXT_SERVICE_01_H #define HEAP_MEM_EXT_SERVICE_01_H -#include +#include #define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255 #define MEM_FREE_REQ_MAX_MSG_LEN_V01 255 +#define MEM_QUERY_MAX_MSG_LEN_V01 255 #define MAX_ARR_CNT_V01 64 struct dhms_mem_alloc_addr_info_type_v01 { @@ -82,83 +83,6 @@ enum dhms_mem_block_align_enum_v01 { */ }; -/* Request Message; This command is used for getting - * the multiple physically contiguous - * memory blocks from the server memory subsystem - */ -struct mem_alloc_req_msg_v01 { - - /* Mandatory */ - /*requested size*/ - uint32_t num_bytes; - - /* Optional */ - /* Must be set to true if block_alignment - * is being passed - */ - uint8_t block_alignment_valid; - /* The block alignment for the memory block to be allocated - */ - enum dhms_mem_block_align_enum_v01 block_alignment; -}; /* Message */ - -/* Response Message; This command is used for getting - * the multiple physically contiguous memory blocks - * from the server memory subsystem - */ -struct mem_alloc_resp_msg_v01 { - - /* Mandatory */ - /* Result Code */ - /* The result of the requested memory operation - */ - enum qmi_result_type_v01 resp; - /* Optional */ - /* Memory Block Handle - */ - /* Must be set to true if handle is being passed - */ - uint8_t handle_valid; - /* The physical address of the memory allocated on the HLOS - */ - uint64_t handle; - /* Optional */ - /* Memory block size */ - /* Must be set to true if num_bytes is being passed - */ - uint8_t num_bytes_valid; - /* The number of bytes actually allocated for the request. - * This value can be smaller than the size requested in - * QMI_DHMS_MEM_ALLOC_REQ_MSG. - */ - uint32_t num_bytes; -}; /* Message */ - -/* Request Message; This command is used for releasing - * the multiple physically contiguous - * memory blocks to the server memory subsystem - */ -struct mem_free_req_msg_v01 { - - /* Mandatory */ - /* Physical address of memory to be freed - */ - uint32_t handle; -}; /* Message */ - -/* Response Message; This command is used for releasing - * the multiple physically contiguous - * memory blocks to the server memory subsystem - */ -struct mem_free_resp_msg_v01 { - - /* Mandatory */ - /* Result of the requested memory operation, todo, - * need to check the async operation for free - */ - enum qmi_result_type_v01 resp; -}; /* Message */ - /* Request Message; This command is used for getting * the multiple physically contiguous * memory blocks from the server memory subsystem @@ -330,22 +254,14 @@ struct mem_query_size_rsp_msg_v01 { }; /* Message */ -extern struct elem_info mem_alloc_req_msg_data_v01_ei[]; -extern struct elem_info mem_alloc_resp_msg_data_v01_ei[]; -extern struct elem_info mem_free_req_msg_data_v01_ei[]; -extern struct elem_info mem_free_resp_msg_data_v01_ei[]; -extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[]; -extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[]; -extern struct elem_info mem_free_generic_req_msg_data_v01_ei[]; -extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[]; -extern struct elem_info mem_query_size_req_msg_data_v01_ei[]; -extern struct elem_info mem_query_size_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_alloc_generic_req_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_alloc_generic_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_free_generic_req_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_free_generic_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_query_size_req_msg_data_v01_ei[]; +extern struct qmi_elem_info mem_query_size_resp_msg_data_v01_ei[]; /*Service Message Definition*/ -#define MEM_ALLOC_REQ_MSG_V01 0x0020 -#define MEM_ALLOC_RESP_MSG_V01 0x0020 -#define MEM_FREE_REQ_MSG_V01 0x0021 -#define MEM_FREE_RESP_MSG_V01 0x0021 #define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022 #define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022 #define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023 diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c index 696c043d8de4d4ca9c5d8a80565abda035f8f678..4b0e0882ef82454cb4979551771e8c6f6bb6e34d 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.c +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -18,9 +18,9 @@ #include #include #include +#include #include #include -#include #include #include "msm_memshare.h" #include "heap_mem_ext_v01.h" @@ -34,8 +34,6 @@ static unsigned long(attrs); static struct qmi_handle *mem_share_svc_handle; -static void mem_share_svc_recv_msg(struct work_struct *work); -static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg); static struct workqueue_struct *mem_share_svc_workqueue; static uint64_t bootup_request; static bool ramdump_event; @@ -58,65 +56,6 @@ static struct memshare_driver *memsh_drv; static struct memshare_child *memsh_child; static struct mem_blocks memblock[MAX_CLIENTS]; static uint32_t num_clients; -static struct msg_desc mem_share_svc_alloc_req_desc = { - .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_ALLOC_REQ_MSG_V01, - .ei_array = mem_alloc_req_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_alloc_resp_desc = { - .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_ALLOC_RESP_MSG_V01, - .ei_array = mem_alloc_resp_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_free_req_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_FREE_REQ_MSG_V01, - .ei_array = mem_free_req_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_free_resp_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_FREE_RESP_MSG_V01, - .ei_array = mem_free_resp_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_alloc_generic_req_desc = { - .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01, - .ei_array = mem_alloc_generic_req_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_alloc_generic_resp_desc = { - .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01, - .ei_array = mem_alloc_generic_resp_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_free_generic_req_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01, - .ei_array = mem_free_generic_req_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_free_generic_resp_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_FREE_GENERIC_RESP_MSG_V01, - .ei_array = mem_free_generic_resp_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_size_query_req_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01, - .ei_array = mem_query_size_req_msg_data_v01_ei, -}; - -static struct msg_desc mem_share_svc_size_query_resp_desc = { - .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, - .msg_id = MEM_QUERY_SIZE_RESP_MSG_V01, - .ei_array = mem_query_size_resp_msg_data_v01_ei, -}; /* * This API creates ramdump dev handlers @@ -406,6 +345,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code, memblock[i].peripheral == DHMS_MEM_PROC_MPSS_V01 && !memblock[i].guarantee && + !memblock[i].client_request && memblock[i].allotted && !memblock[i].alloc_request) { pr_debug("memshare: hypervisor unmapping for client id: %d\n", @@ -488,49 +428,8 @@ static void shared_hyp_mapping(int client_id) memblock[client_id].hyp_mapping = 1; } -static int handle_alloc_req(void *req_h, void *req, void *conn_h) -{ - struct mem_alloc_req_msg_v01 *alloc_req; - struct mem_alloc_resp_msg_v01 alloc_resp; - int rc = 0; - - mutex_lock(&memsh_drv->mem_share); - alloc_req = (struct mem_alloc_req_msg_v01 *)req; - pr_debug("memshare: %s: Received Alloc Request: alloc_req->num_bytes = %d\n", - __func__, alloc_req->num_bytes); - if (!memblock[GPS].size) { - memset(&alloc_resp, 0, sizeof(alloc_resp)); - alloc_resp.resp = QMI_RESULT_FAILURE_V01; - rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, - &memblock[GPS]); - } - alloc_resp.num_bytes_valid = 1; - alloc_resp.num_bytes = alloc_req->num_bytes; - alloc_resp.handle_valid = 1; - alloc_resp.handle = memblock[GPS].phy_addr; - if (rc) { - alloc_resp.resp = QMI_RESULT_FAILURE_V01; - memblock[GPS].size = 0; - } else { - alloc_resp.resp = QMI_RESULT_SUCCESS_V01; - } - - mutex_unlock(&memsh_drv->mem_share); - - pr_debug("memshare: %s, alloc_resp.num_bytes :%d, alloc_resp.resp :%lx\n", - __func__, alloc_resp.num_bytes, - (unsigned long int)alloc_resp.resp); - rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, - &mem_share_svc_alloc_resp_desc, &alloc_resp, - sizeof(alloc_resp)); - if (rc < 0) - pr_err("memshare: %s, Error sending the alloc request: %d\n", - __func__, rc); - - return rc; -} - -static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) +static void handle_alloc_generic_req(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg) { struct mem_alloc_generic_req_msg_v01 *alloc_req; struct mem_alloc_generic_resp_msg_v01 *alloc_resp; @@ -539,14 +438,14 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) uint32_t size = 0; mutex_lock(&memsh_drv->mem_share); - alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req; + alloc_req = (struct mem_alloc_generic_req_msg_v01 *)decoded_msg; pr_debug("memshare: alloc request client id: %d proc _id: %d\n", alloc_req->client_id, alloc_req->proc_id); alloc_resp = kzalloc(sizeof(*alloc_resp), GFP_KERNEL); if (!alloc_resp) { mutex_unlock(&memsh_drv->mem_share); - return -ENOMEM; + return; } alloc_resp->resp.result = QMI_RESULT_FAILURE_V01; alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01; @@ -560,7 +459,7 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) kfree(alloc_resp); alloc_resp = NULL; mutex_unlock(&memsh_drv->mem_share); - return -EINVAL; + return; } if (!memblock[client_id].allotted) { @@ -601,47 +500,22 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n", alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes, (unsigned long int)alloc_resp->resp.result); - rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, - &mem_share_svc_alloc_generic_resp_desc, alloc_resp, - sizeof(alloc_resp)); + rc = qmi_send_response(mem_share_svc_handle, sq, txn, + MEM_ALLOC_GENERIC_RESP_MSG_V01, + sizeof(struct mem_alloc_generic_resp_msg_v01), + mem_alloc_generic_resp_msg_data_v01_ei, alloc_resp); if (rc < 0) - pr_err("memshare: %s, Error sending the alloc request: %d\n", + pr_err("memshare: %s, Error sending the alloc response: %d\n", __func__, rc); kfree(alloc_resp); alloc_resp = NULL; - return rc; -} - -static int handle_free_req(void *req_h, void *req, void *conn_h) -{ - struct mem_free_req_msg_v01 *free_req; - struct mem_free_resp_msg_v01 free_resp; - int rc; - - mutex_lock(&memsh_drv->mem_free); - if (!memblock[GPS].guarantee) { - free_req = (struct mem_free_req_msg_v01 *)req; - pr_debug("memshare: %s: Received Free Request\n", __func__); - memset(&free_resp, 0, sizeof(free_resp)); - dma_free_coherent(memsh_drv->dev, memblock[GPS].size, - memblock[GPS].virtual_addr, - free_req->handle); - } - free_resp.resp = QMI_RESULT_SUCCESS_V01; - mutex_unlock(&memsh_drv->mem_free); - rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, - &mem_share_svc_free_resp_desc, &free_resp, - sizeof(free_resp)); - if (rc < 0) - pr_err("memshare: %s, Error sending the free request: %d\n", - __func__, rc); - - return rc; + return; } -static int handle_free_generic_req(void *req_h, void *req, void *conn_h) +static void handle_free_generic_req(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg) { struct mem_free_generic_req_msg_v01 *free_req; struct mem_free_generic_resp_msg_v01 free_resp; @@ -652,7 +526,7 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC}; mutex_lock(&memsh_drv->mem_free); - free_req = (struct mem_free_generic_req_msg_v01 *)req; + free_req = (struct mem_free_generic_req_msg_v01 *)decoded_msg; pr_debug("memshare: %s: Received Free Request\n", __func__); memset(&free_resp, 0, sizeof(free_resp)); free_resp.resp.error = QMI_ERR_INTERNAL_V01; @@ -665,9 +539,10 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) __func__); flag = 1; } else if (!memblock[client_id].guarantee && - memblock[client_id].allotted) { - pr_debug("memshare: %s: size: %d", - __func__, memblock[client_id].size); + !memblock[client_id].client_request && + memblock[client_id].allotted) { + pr_debug("memshare: %s:client_id:%d - size: %d", + __func__, client_id, memblock[client_id].size); ret = hyp_assign_phys(memblock[client_id].phy_addr, memblock[client_id].size, source_vmlist, 1, dest_vmids, dest_perms, 1); @@ -676,8 +551,8 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) * This is an error case as hyp mapping was successful * earlier but during unmap it lead to failure. */ - pr_err("memshare: %s, failed to unmap the region\n", - __func__); + pr_err("memshare: %s, failed to unmap the region for client id:%d\n", + __func__, client_id); } size = memblock[client_id].size; if (memblock[client_id].client_id == 1) { @@ -696,8 +571,8 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) attrs); free_client(client_id); } else { - pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n", - __func__); + pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n", + __func__, client_id); } if (flag) { @@ -709,30 +584,31 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h) } mutex_unlock(&memsh_drv->mem_free); - rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, - &mem_share_svc_free_generic_resp_desc, &free_resp, - sizeof(free_resp)); - + rc = qmi_send_response(mem_share_svc_handle, sq, txn, + MEM_FREE_GENERIC_RESP_MSG_V01, + sizeof(struct mem_free_generic_resp_msg_v01), + mem_free_generic_resp_msg_data_v01_ei, &free_resp); if (rc < 0) - pr_err("memshare: %s, Error sending the free request: %d\n", + pr_err("memshare: %s, Error sending the free response: %d\n", __func__, rc); - return rc; + return; } -static int handle_query_size_req(void *req_h, void *req, void *conn_h) +static void handle_query_size_req(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg) { int rc, client_id; struct mem_query_size_req_msg_v01 *query_req; struct mem_query_size_rsp_msg_v01 *query_resp; mutex_lock(&memsh_drv->mem_share); - query_req = (struct mem_query_size_req_msg_v01 *)req; + query_req = (struct mem_query_size_req_msg_v01 *)decoded_msg; query_resp = kzalloc(sizeof(*query_resp), GFP_KERNEL); if (!query_resp) { mutex_unlock(&memsh_drv->mem_share); - return -ENOMEM; + return; } pr_debug("memshare: query request client id: %d proc _id: %d\n", query_req->client_id, query_req->proc_id); @@ -746,7 +622,7 @@ static int handle_query_size_req(void *req_h, void *req, void *conn_h) kfree(query_resp); query_resp = NULL; mutex_unlock(&memsh_drv->mem_share); - return -EINVAL; + return; } if (memblock[client_id].size) { @@ -763,148 +639,51 @@ static int handle_query_size_req(void *req_h, void *req, void *conn_h) pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n", query_resp->size, (unsigned long int)query_resp->resp.result); - rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, - &mem_share_svc_size_query_resp_desc, query_resp, - sizeof(query_resp)); - + rc = qmi_send_response(mem_share_svc_handle, sq, txn, + MEM_QUERY_SIZE_RESP_MSG_V01, + MEM_QUERY_MAX_MSG_LEN_V01, + mem_query_size_resp_msg_data_v01_ei, query_resp); if (rc < 0) - pr_err("memshare: %s, Error sending the query request: %d\n", + pr_err("memshare: %s, Error sending the query response: %d\n", __func__, rc); kfree(query_resp); query_resp = NULL; - return rc; -} - -static int mem_share_svc_connect_cb(struct qmi_handle *handle, - void *conn_h) -{ - if (mem_share_svc_handle != handle || !conn_h) - return -EINVAL; - - return 0; + return; } -static int mem_share_svc_disconnect_cb(struct qmi_handle *handle, - void *conn_h) +static void mem_share_svc_disconnect_cb(struct qmi_handle *qmi, + unsigned int node, unsigned int port) { - if (mem_share_svc_handle != handle || !conn_h) - return -EINVAL; - - return 0; + pr_debug("memshare: Received QMI client disconnect event\n"); } -static int mem_share_svc_req_desc_cb(unsigned int msg_id, - struct msg_desc **req_desc) -{ - int rc; - - pr_debug("memshare: %s\n", __func__); - switch (msg_id) { - case MEM_ALLOC_REQ_MSG_V01: - *req_desc = &mem_share_svc_alloc_req_desc; - rc = sizeof(struct mem_alloc_req_msg_v01); - break; - - case MEM_FREE_REQ_MSG_V01: - *req_desc = &mem_share_svc_free_req_desc; - rc = sizeof(struct mem_free_req_msg_v01); - break; - - case MEM_ALLOC_GENERIC_REQ_MSG_V01: - *req_desc = &mem_share_svc_alloc_generic_req_desc; - rc = sizeof(struct mem_alloc_generic_req_msg_v01); - break; - - case MEM_FREE_GENERIC_REQ_MSG_V01: - *req_desc = &mem_share_svc_free_generic_req_desc; - rc = sizeof(struct mem_free_generic_req_msg_v01); - break; - - case MEM_QUERY_SIZE_REQ_MSG_V01: - *req_desc = &mem_share_svc_size_query_req_desc; - rc = sizeof(struct mem_query_size_req_msg_v01); - break; - - default: - rc = -ENOTSUPP; - break; - } - return rc; -} - -static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h, - void *req_h, unsigned int msg_id, void *req) -{ - int rc; - - pr_debug("memshare: %s\n", __func__); - if (mem_share_svc_handle != handle || !conn_h) - return -EINVAL; - - switch (msg_id) { - case MEM_ALLOC_REQ_MSG_V01: - rc = handle_alloc_req(req_h, req, conn_h); - break; - - case MEM_FREE_REQ_MSG_V01: - rc = handle_free_req(req_h, req, conn_h); - break; - - case MEM_ALLOC_GENERIC_REQ_MSG_V01: - rc = handle_alloc_generic_req(req_h, req, conn_h); - break; - - case MEM_FREE_GENERIC_REQ_MSG_V01: - rc = handle_free_generic_req(req_h, req, conn_h); - break; - - case MEM_QUERY_SIZE_REQ_MSG_V01: - rc = handle_query_size_req(req_h, req, conn_h); - break; - - default: - rc = -ENOTSUPP; - break; - } - return rc; -} - -static void mem_share_svc_recv_msg(struct work_struct *work) -{ - int rc; - - pr_debug("memshare: %s\n", __func__); - do { - rc = qmi_recv_msg(mem_share_svc_handle); - pr_debug("memshare: %s: Notified about a Receive Event", - __func__); - } while (!rc); - - if (rc != -ENOMSG) - pr_err("memshare: %s: Error = %d while receiving message\n", - __func__, rc); -} - -static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle, - enum qmi_event_type event, void *priv) -{ - pr_debug("memshare: %s\n", __func__); - - if (event == QMI_RECV_MSG) - queue_delayed_work(mem_share_svc_workqueue, - &work_recv_msg, 0); -} +static struct qmi_ops server_ops = { + .del_client = mem_share_svc_disconnect_cb, +}; -static struct qmi_svc_ops_options mem_share_svc_ops_options = { - .version = 1, - .service_id = MEM_SHARE_SERVICE_SVC_ID, - .service_vers = MEM_SHARE_SERVICE_VERS, - .service_ins = MEM_SHARE_SERVICE_INS_ID, - .connect_cb = mem_share_svc_connect_cb, - .disconnect_cb = mem_share_svc_disconnect_cb, - .req_desc_cb = mem_share_svc_req_desc_cb, - .req_cb = mem_share_svc_req_cb, +static struct qmi_msg_handler qmi_memshare_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01, + .ei = mem_alloc_generic_req_msg_data_v01_ei, + .decoded_size = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .fn = handle_alloc_generic_req, + }, + { + .type = QMI_REQUEST, + .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01, + .ei = mem_free_generic_req_msg_data_v01_ei, + .decoded_size = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .fn = handle_free_generic_req, + }, + { + .type = QMI_REQUEST, + .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01, + .ei = mem_query_size_req_msg_data_v01_ei, + .decoded_size = MEM_QUERY_MAX_MSG_LEN_V01, + .fn = handle_query_size_req, + }, }; int memshare_alloc(struct device *dev, @@ -937,18 +716,30 @@ static void memshare_init_worker(struct work_struct *work) if (!mem_share_svc_workqueue) return; - mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL); + mem_share_svc_handle = kzalloc(sizeof(struct qmi_handle), + GFP_KERNEL); if (!mem_share_svc_handle) { + destroy_workqueue(mem_share_svc_workqueue); + return; + } + + rc = qmi_handle_init(mem_share_svc_handle, + sizeof(struct qmi_elem_info), + &server_ops, qmi_memshare_handlers); + if (rc < 0) { pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n", __func__); + kfree(mem_share_svc_handle); destroy_workqueue(mem_share_svc_workqueue); return; } - rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options); + rc = qmi_add_server(mem_share_svc_handle, MEM_SHARE_SERVICE_SVC_ID, + MEM_SHARE_SERVICE_VERS, MEM_SHARE_SERVICE_INS_ID); if (rc < 0) { pr_err("memshare: %s: Registering mem share svc failed %d\n", __func__, rc); - qmi_handle_destroy(mem_share_svc_handle); + qmi_handle_release(mem_share_svc_handle); + kfree(mem_share_svc_handle); destroy_workqueue(mem_share_svc_workqueue); return; } @@ -992,6 +783,10 @@ static int memshare_child_probe(struct platform_device *pdev) pdev->dev.of_node, "qcom,allocate-boot-time"); + memblock[num_clients].client_request = of_property_read_bool( + pdev->dev.of_node, + "qcom,allocate-on-request"); + rc = of_property_read_string(pdev->dev.of_node, "label", &name); if (rc) { @@ -1094,11 +889,10 @@ static int memshare_remove(struct platform_device *pdev) if (!memsh_drv) return 0; - qmi_svc_unregister(mem_share_svc_handle); flush_workqueue(mem_share_svc_workqueue); - qmi_handle_destroy(mem_share_svc_handle); + qmi_handle_release(mem_share_svc_handle); + kfree(mem_share_svc_handle); destroy_workqueue(mem_share_svc_workqueue); - return 0; } diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h index 6b546528404c23ecc2e4a64ac3609539f05e035a..908f091c86ebfee9202d22c435b6ab1e2bad4085 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.h +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -41,6 +41,8 @@ struct mem_blocks { uint32_t allotted; /* Memory allocation request received or not */ uint32_t alloc_request; + /* Allocation on request from a client*/ + uint32_t client_request; /* Size required for client */ uint32_t size; /* diff --git a/drivers/soc/qcom/microdump_collector.c b/drivers/soc/qcom/microdump_collector.c new file mode 100644 index 0000000000000000000000000000000000000000..d55f3d177cdc128e9a6613fcfd6f1b178555c5ff --- /dev/null +++ b/drivers/soc/qcom/microdump_collector.c @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#define SMEM_SSR_REASON_MSS0 421 +#define SMEM_SSR_DATA_MSS0 611 +#define SMEM_MODEM 1 + +/* + * This program collects the data from SMEM regions whenever the modem crashes + * and stores it in /dev/ramdump_microdump_modem so as to expose it to + * user space. + */ + +struct microdump_data { + struct ramdump_device *microdump_dev; + void *microdump_modem_notify_handler; + struct notifier_block microdump_modem_ssr_nb; +}; + +static struct microdump_data *drv; + +static int microdump_modem_notifier_nb(struct notifier_block *nb, + unsigned long code, void *data) +{ + int ret = 0; + size_t size_reason = 0, size_data = 0; + char *crash_reason = NULL; + char *crash_data = NULL; + struct ramdump_segment segment[2]; + + if (SUBSYS_RAMDUMP_NOTIFICATION != code && SUBSYS_SOC_RESET != code) + return NOTIFY_OK; + + memset(segment, 0, sizeof(segment)); + + crash_reason = qcom_smem_get(QCOM_SMEM_HOST_ANY + , SMEM_SSR_REASON_MSS0, &size_reason); + + if (IS_ERR_OR_NULL(crash_reason)) { + pr_info("%s: smem %d not available\n", + __func__, SMEM_SSR_REASON_MSS0); + goto out; + } + + segment[0].v_address = crash_reason; + segment[0].size = size_reason; + + crash_data = qcom_smem_get(SMEM_MODEM + , SMEM_SSR_DATA_MSS0, &size_data); + + if (IS_ERR_OR_NULL(crash_data)) { + pr_info("%s: smem %d not available\n", + __func__, SMEM_SSR_DATA_MSS0); + goto out; + } + + segment[1].v_address = crash_data; + segment[1].size = size_data; + + ret = do_ramdump(drv->microdump_dev, segment, 2); + if (ret) + pr_info("%s: do_ramdump() failed\n", __func__); + +out: + return NOTIFY_OK; +} + +static int microdump_modem_ssr_register_notifier(struct microdump_data *drv) +{ + int ret = 0; + + drv->microdump_modem_ssr_nb.notifier_call = microdump_modem_notifier_nb; + + drv->microdump_modem_notify_handler = + subsys_notif_register_notifier("modem", + &drv->microdump_modem_ssr_nb); + + if (IS_ERR(drv->microdump_modem_notify_handler)) { + pr_err("Modem register notifier failed: %ld\n", + PTR_ERR(drv->microdump_modem_notify_handler)); + ret = -EINVAL; + } + + return ret; +} + +static void microdump_modem_ssr_unregister_notifier(struct microdump_data *drv) +{ + subsys_notif_unregister_notifier(drv->microdump_modem_notify_handler, + &drv->microdump_modem_ssr_nb); + drv->microdump_modem_notify_handler = NULL; +} + +/* + * microdump_init() - Registers kernel module for microdump collector + * + * Creates device file /dev/ramdump_microdump_modem and registers handler for + * modem SSR events. + * + * Returns 0 on success and negative error code in case of errors + */ +static int __init microdump_init(void) +{ + int ret = -ENOMEM; + + drv = kzalloc(sizeof(struct microdump_data), GFP_KERNEL); + if (!drv) + goto out; + + drv->microdump_dev = create_ramdump_device("microdump_modem", NULL); + if (!drv->microdump_dev) { + pr_err("%s: Unable to create a microdump_modem ramdump device\n" + , __func__); + ret = -ENODEV; + goto out_kfree; + } + + ret = microdump_modem_ssr_register_notifier(drv); + if (ret) { + destroy_ramdump_device(drv->microdump_dev); + goto out_kfree; + } + return ret; + +out_kfree: + pr_err("%s: Failed to register microdump collector\n", __func__); + kfree(drv); + drv = NULL; +out: + return ret; +} + +static void __exit microdump_exit(void) +{ + microdump_modem_ssr_unregister_notifier(drv); + destroy_ramdump_device(drv->microdump_dev); + kfree(drv); +} + +module_init(microdump_init); +module_exit(microdump_exit); + +MODULE_DESCRIPTION("Microdump Collector"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c index abaa1aef247985b542371a1ac5340744d5a81946..cad99d7ce355301970d06e0aa061b9fe64a520f6 100644 --- a/drivers/soc/qcom/msm-spm.c +++ b/drivers/soc/qcom/msm-spm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -189,21 +189,12 @@ static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev) } static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev, - uint32_t vlevel) + uint32_t vlevel, uint32_t vctl_port) { unsigned int pmic_data = 0; - /** - * VCTL_PORT has to be 0, for PMIC_STS register to be updated. - * Ensure that vctl_port is always set to 0. - */ - if (dev->vctl_port) { - __WARN(); - return; - } - pmic_data |= vlevel; - pmic_data |= (dev->vctl_port & 0x7) << 16; + pmic_data |= (vctl_port & 0x7) << 16; dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF; dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data; @@ -512,10 +503,46 @@ static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev, } #endif +static inline int msm_spm_drv_validate_data(struct msm_spm_driver_data *dev, + unsigned int vlevel, int vctl_port) +{ + int timeout_us = dev->vctl_timeout_us; + uint32_t new_level; + + /* Confirm the voltage we set was what hardware sent and + * FSM is idle. + */ + do { + udelay(1); + new_level = msm_spm_drv_get_sts_curr_pmic_data(dev); + + /** + * VCTL_PORT has to be 0, for vlevel to be updated. + * If port is not 0, check for PMIC_STATE only. + */ + + if (((new_level & 0x30000) == MSM_SPM_PMIC_STATE_IDLE) && + (vctl_port || ((new_level & 0xFF) == vlevel))) + break; + } while (--timeout_us); + + if (!timeout_us) { + pr_err("Wrong level %#x\n", new_level); + return -EIO; + } + + if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) + pr_info("%s: done, remaining timeout %u us\n", + __func__, timeout_us); + + return 0; +} + int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel) { - uint32_t timeout_us, new_level; + uint32_t vlevel_set = vlevel; bool avs_enabled; + int ret = 0; if (!dev) return -EINVAL; @@ -531,45 +558,63 @@ int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel) if (avs_enabled) msm_spm_drv_disable_avs(dev); + if (dev->vctl_port_ub >= 0) { + /** + * VCTL can send 8bit voltage level at once. + * Send lower 8bit first, vlevel change happens + * when upper 8bit is sent. + */ + vlevel = vlevel_set & 0xFF; + } + /* Kick the state machine back to idle */ dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1; msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST); - msm_spm_drv_set_vctl2(dev, vlevel); + msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port); - timeout_us = dev->vctl_timeout_us; - /* Confirm the voltage we set was what hardware sent */ - do { - udelay(1); - new_level = msm_spm_drv_get_sts_curr_pmic_data(dev); - /* FSM is idle */ - if (((new_level & 0x30000) == 0) && - ((new_level & 0xFF) == vlevel)) - break; - } while (--timeout_us); - if (!timeout_us) { - pr_info("Wrong level %#x\n", new_level); + ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port); + if (ret) goto set_vdd_bail; - } - if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) - pr_info("%s: done, remaining timeout %u us\n", - __func__, timeout_us); + if (dev->vctl_port_ub >= 0) { + /* Send upper 8bit of voltage level */ + vlevel = (vlevel_set >> 8) & 0xFF; + + /* Kick the state machine back to idle */ + dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST); + + /* + * Steps for sending for vctl port other than '0' + * Write VCTL register with pmic data and address index + * Perform system barrier + * Wait for 1us + * Read PMIC_STS register to make sure operation is complete + */ + msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port_ub); + + mb(); /* To make sure data is sent before checking status */ + + ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port_ub); + if (ret) + goto set_vdd_bail; + } /* Set AVS min/max */ if (avs_enabled) { - msm_spm_drv_set_avs_vlevel(dev, vlevel); + msm_spm_drv_set_avs_vlevel(dev, vlevel_set); msm_spm_drv_enable_avs(dev); } - return 0; + return ret; set_vdd_bail: if (avs_enabled) msm_spm_drv_enable_avs(dev); - pr_err("%s: failed %#x, remaining timeout %uus, vlevel %#x\n", - __func__, vlevel, timeout_us, new_level); + pr_err("%s: failed %#x vlevel setting in timeout %uus\n", + __func__, vlevel_set, dev->vctl_timeout_us); return -EIO; } @@ -699,6 +744,7 @@ int msm_spm_drv_init(struct msm_spm_driver_data *dev, return -ENODEV; dev->vctl_port = data->vctl_port; + dev->vctl_port_ub = data->vctl_port_ub; dev->phase_port = data->phase_port; dev->pfm_port = data->pfm_port; dev->reg_base_addr = data->reg_base_addr; diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c index 61bfba938d62e21cbb3e53a03cd55ec3eac4a26a..471a9f8dac25e0b50273b2d1766c6e5badd3af79 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c @@ -1273,4 +1273,4 @@ int __init msm_bus_device_init_driver(void) } return platform_driver_register(&msm_bus_rules_driver); } -subsys_initcall(msm_bus_device_init_driver); +fs_initcall(msm_bus_device_init_driver); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index f16c2bb938b0de07e1fc3426d02781b7e6e76ffc..f00a8c09b32468152ddb09ca5aba8496b97db2d5 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "msm_bus_core.h" #include "msm_bus_rpmh.h" #include "msm_bus_noc.h" @@ -621,12 +622,12 @@ int msm_bus_commit_data(struct list_head *clist) MSM_BUS_ERR("%s: Error invalidating mbox: %d\n", __func__, ret); - if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE) { - ret = rpmh_write(cur_mbox, cur_rsc->rscdev->req_state, - cmdlist_active, cnt_active); + if (cur_rsc->node_info->id == MSM_BUS_RSC_DISP) { + ret = rpmh_write_batch(cur_mbox, cur_rsc->rscdev->req_state, + cmdlist_active, n_active); /* - * Ignore -EBUSY from rpmh_write if it's an AWAKE_STATE - * request since AWAKE requests are invalid when + * Ignore -EBUSY from rpmh_write if it's an AMC + * request to Display RSC which are invalid when * the display RSC is in solver mode and the bus driver * does not know the current state of the display RSC. */ diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c index f336cfbdca490105c49f49c77473c31eab9f6d18..79921d2259d366fb9e32cf2c29607fb680366a58 100644 --- a/drivers/soc/qcom/qdss_bridge.c +++ b/drivers/soc/qcom/qdss_bridge.c @@ -421,7 +421,7 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev, static const struct mhi_device_id qdss_mhi_match_table[] = { { .chan = "QDSS" }, - { NULL }, + {}, }; static struct mhi_driver qdss_mhi_driver = { diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c index a6a7b2506538a2f9396ec20203459fa417ea7041..0d7e86e3a6b5a16b75cc8ac3bbd67ee12424a6dd 100644 --- a/drivers/soc/qcom/qmi_rmnet.c +++ b/drivers/soc/qcom/qmi_rmnet.c @@ -14,12 +14,15 @@ #include #include #include +#include #include #include "qmi_rmnet_i.h" +#include -#define MODEM_0_INSTANCE 0 -#define MODEM_0 0 -#define MODEM_1 1 +#define NLMSG_FLOW_ACTIVATE 1 +#define NLMSG_FLOW_DEACTIVATE 2 +#define NLMSG_CLIENT_SETUP 4 +#define NLMSG_CLIENT_DELETE 5 struct qmi_elem_info data_ep_id_type_v01_ei[] = { { @@ -35,7 +38,7 @@ struct qmi_elem_info data_ep_id_type_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(uint32_t), + .elem_size = sizeof(u32), .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct data_ep_id_type_v01, @@ -54,19 +57,41 @@ struct qmi_elem_info data_ep_id_type_v01_ei[] = { }; EXPORT_SYMBOL(data_ep_id_type_v01_ei); -static void *qmi_rmnet_qmi_init(void) +static struct qmi_info *qmi_rmnet_qmi_init(void) { struct qmi_info *qmi_info; + int i; qmi_info = kzalloc(sizeof(*qmi_info), GFP_KERNEL); if (!qmi_info) return NULL; - return (void *)qmi_info; + for (i = 0; i < MAX_CLIENT_NUM; i++) + qmi_info->fc_info[i].dfc_client = NULL; + + return qmi_info; +} + +static void qmi_rmnet_clean_flow_list(struct qos_info *qos) +{ + struct rmnet_bearer_map *bearer, *br_tmp; + struct rmnet_flow_map *itm, *fl_tmp; + + ASSERT_RTNL(); + + list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) { + list_del(&itm->list); + kfree(itm); + } + + list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) { + list_del(&bearer->list); + kfree(bearer); + } } struct rmnet_flow_map * -qmi_rmnet_get_flow_map(struct qos_info *qos, uint32_t flow_id, int ip_type) +qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type) { struct rmnet_flow_map *itm; @@ -74,9 +99,6 @@ qmi_rmnet_get_flow_map(struct qos_info *qos, uint32_t flow_id, int ip_type) return NULL; list_for_each_entry(itm, &qos->flow_head, list) { - if (unlikely(!itm)) - return NULL; - if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type)) return itm; } @@ -92,9 +114,6 @@ qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id) return NULL; list_for_each_entry(itm, &qos->bearer_head, list) { - if (unlikely(!itm)) - return NULL; - if (itm->bearer_id == bearer_id) return itm; } @@ -110,84 +129,94 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm, itm->tcm_handle = new_map->tcm_handle; } -static int qmi_rmnet_add_flow(struct net_device *dev, struct qmi_info *qmi, - struct rmnet_flow_map *new_map) +static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm) { struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); - struct rmnet_flow_map *itm; + struct rmnet_flow_map new_map, *itm; struct rmnet_bearer_map *bearer; - unsigned long flags; if (!qos_info) return -EINVAL; - pr_debug("%s() bearer[%u], flow[%u], ip[%u]\n", __func__, - new_map->bearer_id, new_map->flow_id, new_map->ip_type); + ASSERT_RTNL(); - write_lock_irqsave(&qos_info->flow_map_lock, flags); - itm = qmi_rmnet_get_flow_map(qos_info, new_map->flow_id, - new_map->ip_type); + /* flow activate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle + */ + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + new_map.tcm_handle = tcm->tcm_handle; + trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id, + new_map.ip_type, new_map.tcm_handle, 1); + + itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, + new_map.ip_type); if (itm) { - qmi_rmnet_update_flow_map(itm, new_map); + qmi_rmnet_update_flow_map(itm, &new_map); } else { - write_unlock_irqrestore(&qos_info->flow_map_lock, flags); itm = kzalloc(sizeof(*itm), GFP_KERNEL); if (!itm) return -ENOMEM; - qmi_rmnet_update_flow_map(itm, new_map); - write_lock_irqsave(&qos_info->flow_map_lock, flags); + qmi_rmnet_update_flow_map(itm, &new_map); list_add(&itm->list, &qos_info->flow_head); } - bearer = qmi_rmnet_get_bearer_map(qos_info, new_map->bearer_id); + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id); if (bearer) { bearer->flow_ref++; } else { - write_unlock_irqrestore(&qos_info->flow_map_lock, flags); bearer = kzalloc(sizeof(*bearer), GFP_KERNEL); if (!bearer) return -ENOMEM; - bearer->bearer_id = new_map->bearer_id; + bearer->bearer_id = new_map.bearer_id; bearer->flow_ref = 1; bearer->grant_size = qos_info->default_grant; - write_lock_irqsave(&qos_info->flow_map_lock, flags); list_add(&bearer->list, &qos_info->bearer_head); } - write_unlock_irqrestore(&qos_info->flow_map_lock, flags); + return 0; } static int -qmi_rmnet_del_flow(struct net_device *dev, struct rmnet_flow_map *new_map) +qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm) { struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); - struct rmnet_flow_map *itm; + struct rmnet_flow_map new_map, *itm; struct rmnet_bearer_map *bearer; - unsigned long flags; int bearer_removed = 0; - if (!qos_info) { - pr_err("%s() NULL qos info\n", __func__); + if (!qos_info) return -EINVAL; - } - pr_debug("%s() bearer[%u], flow[%u], ip[%u]\n", __func__, - new_map->bearer_id, new_map->flow_id, new_map->ip_type); - write_lock_irqsave(&qos_info->flow_map_lock, flags); - itm = qmi_rmnet_get_flow_map(qos_info, new_map->flow_id, - new_map->ip_type); - if (itm) + ASSERT_RTNL(); + + /* flow deactivate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type + */ + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, + new_map.ip_type); + if (itm) { + trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id, + new_map.ip_type, itm->tcm_handle, 0); list_del(&itm->list); + } /*clear bearer map*/ - bearer = qmi_rmnet_get_bearer_map(qos_info, new_map->bearer_id); + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id); if (bearer && --bearer->flow_ref == 0) { list_del(&bearer->list); bearer_removed = 1; } - write_unlock_irqrestore(&qos_info->flow_map_lock, flags); kfree(itm); if (bearer_removed) @@ -239,111 +268,110 @@ int qmi_rmnet_reg_dereg_fc_ind(void *port, int reg) EXPORT_SYMBOL(qmi_rmnet_reg_dereg_fc_ind); #endif -void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) +static int +qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) { - struct tcmsg *tcm = (struct tcmsg *)tcm_pt; - struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); - struct rmnet_flow_map new_map; int idx; - if (!dev || !port || !tcm_pt) - return; + ASSERT_RTNL(); - switch (tcm->tcm_family) { - case 1: - /* - * flow activate - * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, - * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle - */ + /* client setup + * tcm->tcm_handle - instance, tcm->tcm_info - ep_type, + * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags + */ + idx = (tcm->tcm_handle == 0) ? 0 : 1; + + if (!qmi) { + qmi = qmi_rmnet_qmi_init(); if (!qmi) - return; + return -ENOMEM; - new_map.bearer_id = tcm->tcm__pad1; - new_map.flow_id = tcm->tcm_parent; - new_map.ip_type = tcm->tcm_ifindex; - new_map.tcm_handle = tcm->tcm_handle; - qmi_rmnet_add_flow(dev, qmi, &new_map); - break; - case 2: - /* - * flow deactivate - * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, - * tcm->tcm_ifindex - ip_type + rmnet_init_qmi_pt(port, qmi); + } + + if (!qmi->fc_info[idx].dfc_client) { + qmi->client_count++; + + /* we may receive multiple client setup events if userspace + * creates a new dfc client. */ - if (!qmi) + qmi->flag = tcm->tcm_ifindex; + + qmi->fc_info[idx].svc.instance = tcm->tcm_handle; + qmi->fc_info[idx].svc.ep_type = tcm->tcm_info; + qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent; + + return dfc_qmi_client_init(port, idx, qmi); + } + + return 0; +} + +static int +__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx) +{ + + ASSERT_RTNL(); + + /* dfc_client can be deleted by service request before + * client delete event arrival. Decrease client_count here always + */ + + if (qmi->fc_info[idx].dfc_client) { + qmi->client_count--; + dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client); + qmi->fc_info[idx].dfc_client = NULL; + + if (qmi->client_count == 0) { + rmnet_reset_qmi_pt(port); + kfree(qmi); + return 0; + } + } + + return 1; +} + +static void +qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) +{ + int idx; + + /* client delete: tcm->tcm_handle - instance*/ + idx = (tcm->tcm_handle == 0) ? 0 : 1; + + __qmi_rmnet_delete_client(port, qmi, idx); +} + +void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) +{ + struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + struct tcmsg *tcm = (struct tcmsg *)tcm_pt; + + switch (tcm->tcm_family) { + case NLMSG_FLOW_ACTIVATE: + if (!qmi || !(qmi->flag & 0x01)) return; - new_map.bearer_id = tcm->tcm__pad1; - new_map.flow_id = tcm->tcm_parent; - new_map.ip_type = tcm->tcm_ifindex; - qmi_rmnet_del_flow(dev, &new_map); + qmi_rmnet_add_flow(dev, tcm); break; - case 4: - /* - * modem up - * tcm->tcm_handle - instance, tcm->tcm_info - ep_type, - * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags - */ - pr_debug("%s() instance[%u], ep_type[%u], iface[%u]\n", - __func__, tcm->tcm_handle, tcm->tcm_info, - tcm->tcm_parent); + case NLMSG_FLOW_DEACTIVATE: + if (!qmi || !(qmi->flag & 0x01)) + return; - if (tcm->tcm_ifindex != 1) + qmi_rmnet_del_flow(dev, tcm); + break; + case NLMSG_CLIENT_SETUP: + if (!(tcm->tcm_ifindex & 0x01)) return; - if (tcm->tcm_handle == MODEM_0_INSTANCE) - idx = MODEM_0; - else - idx = MODEM_1; - - if (!qmi) { - qmi = (struct qmi_info *)qmi_rmnet_qmi_init(); - if (!qmi) - return; - qmi->modem_count = 1; - rmnet_init_qmi_pt(port, qmi); - } else if (!qmi->fc_info[idx].dfc_client) { - /* - * dfc_client is per modem, we may receive multiple - * modem up events due to netmagrd restarts so only - * increase modem_count when we need to create a new - * dfc client. - */ - qmi->modem_count++; - } - if (qmi->fc_info[idx].dfc_client == NULL) { - qmi->fc_info[idx].svc.instance = tcm->tcm_handle; - qmi->fc_info[idx].svc.ep_type = tcm->tcm_info; - qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent; - if (dfc_qmi_client_init(port, idx) < 0) - pr_err("%s failed[%d]\n", __func__, idx); - } + qmi_rmnet_setup_client(port, qmi, tcm); break; - case 5: - /* modem down: tcm->tcm_handle - instance*/ - pr_debug("%s() instance[%u]\n", __func__, tcm->tcm_handle); + case NLMSG_CLIENT_DELETE: if (!qmi) return; - if (tcm->tcm_handle == MODEM_0_INSTANCE) - idx = MODEM_0; - else - idx = MODEM_1; - - /* - * dfc_client can be deleted by service request before - * modem down event arrival. Decrease modem_count here always - */ - qmi->modem_count--; - if (qmi->fc_info[idx].dfc_client) { - dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client); - qmi->fc_info[idx].dfc_client = NULL; - } - if (qmi->modem_count == 0) { - kfree(qmi); - rmnet_reset_qmi_pt(port); - } + qmi_rmnet_delete_client(port, qmi, tcm); break; default: pr_debug("%s(): No handler\n", __func__); @@ -352,23 +380,22 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) } EXPORT_SYMBOL(qmi_rmnet_change_link); -void *qmi_rmnet_qos_init(struct net_device *real_dev, uint8_t mux_id) +void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) { - struct qos_info *qos_info; + struct qos_info *qos; - qos_info = kmalloc(sizeof(struct qos_info), GFP_KERNEL); - if (!qos_info) + qos = kmalloc(sizeof(*qos), GFP_KERNEL); + if (!qos) return NULL; - qos_info->mux_id = mux_id; - qos_info->real_dev = real_dev; - qos_info->default_grant = 10240; - qos_info->tran_num = 0; - rwlock_init(&qos_info->flow_map_lock); - INIT_LIST_HEAD(&qos_info->flow_head); - INIT_LIST_HEAD(&qos_info->bearer_head); + qos->mux_id = mux_id; + qos->real_dev = real_dev; + qos->default_grant = 10240; + qos->tran_num = 0; + INIT_LIST_HEAD(&qos->flow_head); + INIT_LIST_HEAD(&qos->bearer_head); - return (void *)qos_info; + return qos; } EXPORT_SYMBOL(qmi_rmnet_qos_init); @@ -376,6 +403,22 @@ void qmi_rmnet_qos_exit(struct net_device *dev) { struct qos_info *qos = (struct qos_info *)rmnet_get_qos_pt(dev); + qmi_rmnet_clean_flow_list(qos); kfree(qos); } EXPORT_SYMBOL(qmi_rmnet_qos_exit); + +void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) +{ + struct qmi_info *qmi = (struct qmi_info *)qmi_pt; + int i; + + if (!qmi) + return; + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + if (!__qmi_rmnet_delete_client(port, qmi, i)) + return; + } +} +EXPORT_SYMBOL(qmi_rmnet_qmi_exit); diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h index d2047763b6dd468f3a9341309a9c24e890d74f61..151b8345bc6090db94ba8ac06cbd177765c87c26 100644 --- a/drivers/soc/qcom/qmi_rmnet_i.h +++ b/drivers/soc/qcom/qmi_rmnet_i.h @@ -20,7 +20,7 @@ #define IP_VER_4 4 #define IP_VER_6 6 -#define MAX_MODEM_NUM 2 +#define MAX_CLIENT_NUM 2 struct rmnet_flow_map { struct list_head list; @@ -35,7 +35,6 @@ struct rmnet_bearer_map { u8 bearer_id; int flow_ref; u32 grant_size; - u32 counter; u16 seq; u8 ack_req; }; @@ -52,18 +51,18 @@ struct fc_info { }; struct qos_info { - uint8_t mux_id; + u8 mux_id; struct net_device *real_dev; - rwlock_t flow_map_lock; struct list_head flow_head; struct list_head bearer_head; - uint32_t default_grant; - uint32_t tran_num; + u32 default_grant; + u32 tran_num; }; struct qmi_info { - int modem_count; - struct fc_info fc_info[MAX_MODEM_NUM]; + int client_count; + int flag; + struct fc_info fc_info[MAX_CLIENT_NUM]; }; enum data_ep_type_enum_v01 { @@ -79,22 +78,24 @@ enum data_ep_type_enum_v01 { struct data_ep_id_type_v01 { enum data_ep_type_enum_v01 ep_type; - uint32_t iface_id; + u32 iface_id; }; extern struct qmi_elem_info data_ep_id_type_v01_ei[]; struct rmnet_flow_map * qmi_rmnet_get_flow_map(struct qos_info *qos_info, - uint32_t flow_id, int ip_type); + u32 flow_id, int ip_type); struct rmnet_bearer_map * -qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id); +qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id); -int dfc_qmi_client_init(void *port, int modem); +int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi); void dfc_qmi_client_exit(void *dfc_data); +void dfc_reset_port_pt(void *dfc_data); + void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, struct sk_buff *skb); diff --git a/drivers/soc/qcom/qsee_ipc_irq.c b/drivers/soc/qcom/qsee_ipc_irq.c index 99a29c61f560afff336976707512d629d1d07602..0e397261488bf360e4560b84f23894e8b768b541 100644 --- a/drivers/soc/qcom/qsee_ipc_irq.c +++ b/drivers/soc/qcom/qsee_ipc_irq.c @@ -100,7 +100,8 @@ static irqreturn_t qsee_intr(int irq, void *data) irq_pin = irq_find_mapping(qirq->domain, to_hwirq(i, j)); desc = irq_to_desc(irq_pin); - handle_simple_irq(desc); + if (desc) + handle_simple_irq(desc); regmap_write(qirq->regmap, bank->data->clear, BIT(j)); } diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c index df3dbeb9553d3a3765d83d6d6ac338cd79fdaf09..e7d9fac5e589d824a48a21af62e47638756fea90 100644 --- a/drivers/soc/qcom/rpm_stats.c +++ b/drivers/soc/qcom/rpm_stats.c @@ -11,6 +11,8 @@ * */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + #include #include #include @@ -36,6 +38,7 @@ struct msm_rpmstats_record { struct msm_rpmstats_platform_data { phys_addr_t phys_addr_base; u32 phys_size; + u32 num_records; }; struct msm_rpmstats_private_data { @@ -43,7 +46,7 @@ struct msm_rpmstats_private_data { u32 num_records; u32 read_idx; u32 len; - char buf[320]; + char buf[480]; struct msm_rpmstats_platform_data *platform_data; }; @@ -61,6 +64,7 @@ struct msm_rpm_stats_data { }; struct msm_rpmstats_kobj_attr { + struct kobject *kobj; struct kobj_attribute ka; struct msm_rpmstats_platform_data *pd; }; @@ -175,29 +179,32 @@ static ssize_t rpmstats_show(struct kobject *kobj, { struct msm_rpmstats_private_data prvdata; struct msm_rpmstats_platform_data *pdata = NULL; + ssize_t length; pdata = GET_PDATA_OF_ATTR(attr); prvdata.reg_base = ioremap_nocache(pdata->phys_addr_base, pdata->phys_size); if (!prvdata.reg_base) { - pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n", - __func__, &pdata->phys_addr_base, - pdata->phys_size); + pr_err("ERROR could not ioremap start=%pa, len=%u\n", + &pdata->phys_addr_base, pdata->phys_size); return -EBUSY; } prvdata.read_idx = prvdata.len = 0; prvdata.platform_data = pdata; - prvdata.num_records = RPM_STATS_NUM_REC; + prvdata.num_records = pdata->num_records; if (prvdata.read_idx < prvdata.num_records) prvdata.len = msm_rpmstats_copy_stats(&prvdata); - return snprintf(buf, prvdata.len, "%s", prvdata.buf); + length = scnprintf(buf, prvdata.len, "%s", prvdata.buf); + iounmap(prvdata.reg_base); + return length; } -static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd) +static int msm_rpmstats_create_sysfs(struct platform_device *pdev, + struct msm_rpmstats_platform_data *pd) { struct kobject *rpmstats_kobj = NULL; struct msm_rpmstats_kobj_attr *rpms_ka = NULL; @@ -205,7 +212,7 @@ static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd) rpmstats_kobj = kobject_create_and_add("system_sleep", power_kobj); if (!rpmstats_kobj) { - pr_err("%s: Cannot create rpmstats kobject\n", __func__); + pr_err("Cannot create rpmstats kobject\n"); ret = -ENOMEM; goto fail; } @@ -217,6 +224,8 @@ static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd) goto fail; } + rpms_ka->kobj = rpmstats_kobj; + sysfs_attr_init(&rpms_ka->ka.attr); rpms_ka->pd = pd; rpms_ka->ka.attr.mode = 0444; @@ -225,6 +234,7 @@ static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd) rpms_ka->ka.store = NULL; ret = sysfs_create_file(rpmstats_kobj, &rpms_ka->ka.attr); + platform_set_drvdata(pdev, rpms_ka); fail: return ret; @@ -233,28 +243,27 @@ static int msm_rpmstats_create_sysfs(struct msm_rpmstats_platform_data *pd) static int msm_rpmstats_probe(struct platform_device *pdev) { struct msm_rpmstats_platform_data *pdata; - struct msm_rpmstats_platform_data *pd; struct resource *res = NULL, *offset = NULL; u32 offset_addr = 0; void __iomem *phys_ptr = NULL; + char *key; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "phys_addr_base"); + key = "phys_addr_base"; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, key); if (!res) return -EINVAL; - offset = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "offset_addr"); + key = "offset_addr"; + offset = platform_get_resource_byname(pdev, IORESOURCE_MEM, key); if (offset) { /* Remap the rpm-stats pointer */ phys_ptr = ioremap_nocache(offset->start, SZ_4); if (!phys_ptr) { - pr_err("%s: Failed to ioremap address: %x\n", - __func__, offset_addr); + pr_err("Failed to ioremap offset address\n"); return -ENODEV; } offset_addr = readl_relaxed(phys_ptr); @@ -264,14 +273,33 @@ static int msm_rpmstats_probe(struct platform_device *pdev) pdata->phys_addr_base = res->start + offset_addr; pdata->phys_size = resource_size(res); - if (pdev->dev.platform_data) - pd = pdev->dev.platform_data; + key = "qcom,num-records"; + if (of_property_read_u32(pdev->dev.of_node, key, &pdata->num_records)) + pdata->num_records = RPM_STATS_NUM_REC; - msm_rpmstats_create_sysfs(pdata); + msm_rpmstats_create_sysfs(pdev, pdata); return 0; } +static int msm_rpmstats_remove(struct platform_device *pdev) +{ + struct msm_rpmstats_kobj_attr *rpms_ka; + + if (!pdev) + return -EINVAL; + + rpms_ka = (struct msm_rpmstats_kobj_attr *) + platform_get_drvdata(pdev); + + sysfs_remove_file(rpms_ka->kobj, &rpms_ka->ka.attr); + kobject_put(rpms_ka->kobj); + platform_set_drvdata(pdev, NULL); + + return 0; +} + + static const struct of_device_id rpm_stats_table[] = { { .compatible = "qcom,rpm-stats" }, { }, @@ -279,6 +307,7 @@ static const struct of_device_id rpm_stats_table[] = { static struct platform_driver msm_rpmstats_driver = { .probe = msm_rpmstats_probe, + .remove = msm_rpmstats_remove, .driver = { .name = "msm_rpm_stat", .owner = THIS_MODULE, diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index aaad9caae0c2d2419ad1d75b1d3415f19ca98615..7fbe88a81bd7a6742d954988a8b117a2c82f8106 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -257,21 +257,28 @@ static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc, switch (state) { case RPMH_ACTIVE_ONLY_STATE: case RPMH_AWAKE_STATE: - if (req->sleep_val != UINT_MAX) + if (req->sleep_val != UINT_MAX) { req->wake_val = cmd->data; + rpm->dirty = true; + } break; case RPMH_WAKE_ONLY_STATE: - req->wake_val = cmd->data; + if (req->wake_val != cmd->data) { + req->wake_val = cmd->data; + rpm->dirty = true; + } break; case RPMH_SLEEP_STATE: - req->sleep_val = cmd->data; + if (req->sleep_val != cmd->data) { + req->sleep_val = cmd->data; + rpm->dirty = true; + } break; default: break; }; unlock: - rpm->dirty = true; spin_unlock_irqrestore(&rpm->lock, flags); return req; diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c index 283e367cf439a58729ae07b9a94288137e62249b..22704b807700b39d4d3cdc4f4ec98fe408fb529d 100644 --- a/drivers/soc/qcom/rpmh_master_stat.c +++ b/drivers/soc/qcom/rpmh_master_stat.c @@ -265,6 +265,7 @@ static int msm_rpmh_master_stats_remove(struct platform_device *pdev) kobject_put(prvdata->kobj); platform_set_drvdata(pdev, NULL); iounmap(rpmh_unit_base); + rpmh_unit_base = NULL; return 0; } diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c index 28bb85bd3870621218004458d4470777f26e3653..c54b9a8e507b0e94af2739d5bf45e7af85401d90 100644 --- a/drivers/soc/qcom/smcinvoke.c +++ b/drivers/soc/qcom/smcinvoke.c @@ -15,13 +15,17 @@ #include #include +#include #include #include #include #include +#include #include #include #include +#include +#include #include #include @@ -30,25 +34,117 @@ #include "smcinvoke_object.h" #include "../../misc/qseecom_kernel.h" -#define SMCINVOKE_DEV "smcinvoke" -#define SMCINVOKE_TZ_PARAM_ID 0x224 -#define SMCINVOKE_TZ_CMD 0x32000600 -#define SMCINVOKE_TZ_ROOT_OBJ 1 -#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096 -#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t)) -#define SMCINVOKE_TZ_OBJ_NULL 0 - -#define FOR_ARGS(ndxvar, counts, section) \ - for (ndxvar = object_counts_index_##section(counts); \ - ndxvar < (object_counts_index_##section(counts) \ - + object_counts_num_##section(counts)); \ +#define SMCINVOKE_DEV "smcinvoke" +#define SMCINVOKE_TZ_ROOT_OBJ 1 +#define SMCINVOKE_TZ_OBJ_NULL 0 +#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096 +#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t)) +#define SMCINVOKE_NEXT_AVAILABLE_TXN 0 +#define SMCINVOKE_REQ_PLACED 1 +#define SMCINVOKE_REQ_PROCESSING 2 +#define SMCINVOKE_REQ_PROCESSED 3 +#define SMCINVOKE_INCREMENT 1 +#define SMCINVOKE_DECREMENT 0 +#define SMCINVOKE_OBJ_TYPE_TZ_OBJ 0 +#define SMCINVOKE_OBJ_TYPE_SERVER 1 +#define SMCINVOKE_MEM_MAP_OBJ 0 +#define SMCINVOKE_MEM_RGN_OBJ 1 +#define SMCINVOKE_MEM_PERM_RW 6 + +/* TZ defined values - Start */ +#define SMCINVOKE_INVOKE_PARAM_ID 0x224 +#define SMCINVOKE_CB_RSP_PARAM_ID 0x22 +#define SMCINVOKE_INVOKE_CMD 0x32000600 +#define SMCINVOKE_CB_RSP_CMD 0x32000601 +#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3 +/* TZ defined values - End */ + +/* + * This is the state when server FD has been closed but + * TZ still has refs of CBOBjs served by this server + */ +#define SMCINVOKE_SERVER_STATE_DEFUNCT 1 + +#define FOR_ARGS(ndxvar, counts, section) \ + for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \ + ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \ + + OBJECT_COUNTS_NUM_##section(counts)); \ ++ndxvar) +#define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \ + sizeof(struct smcinvoke_msg_hdr) + \ + sizeof(union smcinvoke_tz_args) * \ + OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts)) + +/* + * +ve uhandle : either remote obj or mem obj, decided by f_ops + * -ve uhandle : either Obj NULL or CBObj + * - -1: OBJ NULL + * - < -1: CBObj + */ +#define UHANDLE_IS_FD(h) ((h) >= 0) +#define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL) +#define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL) +#define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL) +/* + * MAKE => create handle for other domain i.e. TZ or userspace + * GET => retrieve obj from incoming handle + */ +#define UHANDLE_GET_CB_OBJ(h) (-2-(h)) +#define UHANDLE_MAKE_CB_OBJ(o) (-2-(o)) +#define UHANDLE_GET_FD(h) (h) + +/* + * +ve tzhandle : remote object i.e. owned by TZ + * -ve tzhandle : local object i.e. owned by linux + * -------------------------------------------------- + *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) | + * --------------------------------------------------- + * Server ids are defined below for various local objects + * server id 0 : Kernel Obj + * server id 1 : Memory region Obj + * server id 2 : Memory map Obj + * server id 3-15: Reserverd + * server id 16 & up: Callback Objs + */ +#define KRNL_SRVR_ID 0 +#define MEM_RGN_SRVR_ID 1 +#define MEM_MAP_SRVR_ID 2 +#define CBOBJ_SERVER_ID_START 0x10 +/* CBOBJs will be served by server id 0x10 onwards */ +#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF)) +#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF) +#define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s) + +#define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL) +#define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000) +#define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h)) + +#define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID) +#define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID) +#define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID) +#define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \ + TZHANDLE_IS_MEM_MAP_OBJ(h)) +#define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START) + +#define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops) + +static DEFINE_MUTEX(g_smcinvoke_lock); +static DEFINE_HASHTABLE(g_cb_servers, 8); +static LIST_HEAD(g_mem_objs); +static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START; +static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id; +static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE; + static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long); static int smcinvoke_open(struct inode *, struct file *); static int smcinvoke_release(struct inode *, struct file *); -static const struct file_operations smcinvoke_fops = { +static const struct file_operations g_smcinvoke_fops = { .owner = THIS_MODULE, .unlocked_ioctl = smcinvoke_ioctl, .compat_ioctl = smcinvoke_ioctl, @@ -56,6 +152,12 @@ static const struct file_operations smcinvoke_fops = { .release = smcinvoke_release, }; +static dev_t smcinvoke_device_no; +static struct cdev smcinvoke_cdev; +static struct class *driver_class; +static struct device *class_dev; +static struct platform_device *smcinvoke_pdev; + struct smcinvoke_buf_hdr { uint32_t offset; uint32_t size; @@ -63,22 +165,320 @@ struct smcinvoke_buf_hdr { union smcinvoke_tz_args { struct smcinvoke_buf_hdr b; - uint32_t tzhandle; + int32_t handle; }; + struct smcinvoke_msg_hdr { - uint32_t tzhandle; - uint32_t op; - uint32_t counts; + uint32_t tzhandle; + uint32_t op; + uint32_t counts; }; -struct smcinvoke_tzobj_context { - uint32_t tzhandle; +/* Inbound reqs from TZ */ +struct smcinvoke_tzcb_req { + int32_t result; + struct smcinvoke_msg_hdr hdr; + union smcinvoke_tz_args args[0]; }; -static dev_t smcinvoke_device_no; -struct cdev smcinvoke_cdev; -struct class *driver_class; -struct device *class_dev; +struct smcinvoke_file_data { + uint32_t context_type; + union { + uint32_t tzhandle; + uint16_t server_id; + }; +}; + +struct smcinvoke_piggyback_msg { + uint32_t version; + uint32_t op; + uint32_t counts; + int32_t objs[0]; +}; + +/* Data structure to hold request coming from TZ */ +struct smcinvoke_cb_txn { + uint32_t txn_id; + int32_t state; + struct smcinvoke_tzcb_req *cb_req; + size_t cb_req_bytes; + struct file **filp_to_release; + struct hlist_node hash; +}; + +struct smcinvoke_server_info { + uint16_t server_id; + uint16_t state; + uint32_t txn_id; + wait_queue_head_t req_wait_q; + wait_queue_head_t rsp_wait_q; + size_t cb_buf_size; + DECLARE_HASHTABLE(reqs_table, 4); + DECLARE_HASHTABLE(responses_table, 4); + struct hlist_node hash; + struct list_head pending_cbobjs; +}; + +struct smcinvoke_cbobj { + uint16_t cbobj_id; + struct kref ref_cnt; + struct smcinvoke_server_info *server; + struct list_head list; +}; + +/* + * We require couple of objects, one for mem region & another + * for mapped mem_obj once mem region has been mapped. It is + * possible that TZ can release either independent of other. + */ +struct smcinvoke_mem_obj { + /* these ids are objid part of tzhandle */ + uint16_t mem_region_id; + uint16_t mem_map_obj_id; + struct dma_buf *dma_buf; + struct dma_buf_attachment *buf_attach; + struct sg_table *sgt; + struct kref mem_regn_ref_cnt; + struct kref mem_map_obj_ref_cnt; + uint64_t p_addr; + size_t p_addr_len; + struct list_head list; +}; + +static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id) +{ + struct smcinvoke_server_info *data = NULL; + + hash_for_each_possible(g_cb_servers, data, hash, server_id) { + if (data->server_id == server_id) + return data; + } + return NULL; +} + +static uint16_t next_cb_server_id_locked(void) +{ + while (find_cb_server_locked(++g_last_cb_server_id)); + + return g_last_cb_server_id; +} + +static inline void release_filp(struct file **filp_to_release, size_t arr_len) +{ + size_t i = 0; + + for (i = 0; i < arr_len; i++) { + if (filp_to_release[i]) { + fput(filp_to_release[i]); + filp_to_release[i] = NULL; + } + } +} + +static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id, + bool is_mem_rgn_obj) +{ + struct smcinvoke_mem_obj *mem_obj = NULL; + + if (list_empty(&g_mem_objs)) { + pr_err("%s: mem obj %d not found\n", __func__, mem_obj_id); + return NULL; + } + + list_for_each_entry(mem_obj, &g_mem_objs, list) { + if ((is_mem_rgn_obj && + (mem_obj->mem_region_id == mem_obj_id)) || + (!is_mem_rgn_obj && + (mem_obj->mem_map_obj_id == mem_obj_id))) + return mem_obj; + } + return NULL; +} + +static uint32_t next_mem_region_obj_id_locked(void) +{ + while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ)); + + return g_last_mem_rgn_id; +} + +static uint32_t next_mem_map_obj_id_locked(void) +{ + while (find_mem_obj_locked(++g_last_mem_map_obj_id, + SMCINVOKE_MEM_MAP_OBJ)); + + return g_last_mem_map_obj_id; +} + +static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) +{ + list_del(&mem_obj->list); + dma_buf_put(mem_obj->dma_buf); + kfree(mem_obj); +} + +static void del_mem_regn_obj_locked(struct kref *kref) +{ + struct smcinvoke_mem_obj *mem_obj = container_of(kref, + struct smcinvoke_mem_obj, mem_regn_ref_cnt); + + /* + * mem_regn obj and mem_map obj are held into mem_obj structure which + * can't be released until both kinds of objs have been released. + * So check whether mem_map iobj has ref 0 and only then release mem_obj + */ + if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0) + free_mem_obj_locked(mem_obj); +} + +static void del_mem_map_obj_locked(struct kref *kref) +{ + struct smcinvoke_mem_obj *mem_obj = container_of(kref, + struct smcinvoke_mem_obj, mem_map_obj_ref_cnt); + + mem_obj->p_addr_len = 0; + mem_obj->p_addr = 0; + if (mem_obj->sgt) + dma_buf_unmap_attachment(mem_obj->buf_attach, + mem_obj->sgt, DMA_BIDIRECTIONAL); + if (mem_obj->buf_attach) + dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach); + + /* + * mem_regn obj and mem_map obj are held into mem_obj structure which + * can't be released until both kinds of objs have been released. + * So check if mem_regn obj has ref 0 and only then release mem_obj + */ + if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0) + free_mem_obj_locked(mem_obj); +} + +static int release_mem_obj_locked(int32_t tzhandle) +{ + int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle); + struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked( + TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj); + + if (!mem_obj) + return OBJECT_ERROR_BADOBJ; + + if (is_mem_regn_obj) + kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked); + else + kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); + + return OBJECT_OK; +} + +static void free_pending_cbobj_locked(struct kref *kref) +{ + struct smcinvoke_server_info *server = NULL; + struct smcinvoke_cbobj *obj = container_of(kref, + struct smcinvoke_cbobj, ref_cnt); + list_del(&obj->list); + server = obj->server; + kfree(obj); + if ((server->state == SMCINVOKE_SERVER_STATE_DEFUNCT) && + list_empty(&server->pending_cbobjs)) + kfree(server); +} + +static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id) +{ + struct smcinvoke_server_info *server = find_cb_server_locked(srvr_id); + struct list_head *head = NULL; + struct smcinvoke_cbobj *cbobj = NULL; + struct smcinvoke_cbobj *obj = NULL; + + if (!server) + return OBJECT_ERROR_BADOBJ; + + head = &server->pending_cbobjs; + list_for_each_entry(cbobj, head, list) + if (cbobj->cbobj_id == obj_id) { + kref_get(&cbobj->ref_cnt); + return 0; + } + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return OBJECT_ERROR_KMEM; + + obj->cbobj_id = obj_id; + kref_init(&obj->ref_cnt); + obj->server = server; + list_add_tail(&obj->list, head); + + return 0; +} + +static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id) +{ + struct smcinvoke_server_info *srvr_info = + find_cb_server_locked(srvr_id); + struct list_head *head = NULL; + struct smcinvoke_cbobj *cbobj = NULL; + + if (!srvr_info) + return -EINVAL; + + head = &srvr_info->pending_cbobjs; + list_for_each_entry(cbobj, head, list) + if (cbobj->cbobj_id == obj_id) { + kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked); + return 0; + } + return -EINVAL; +} + +static int release_tzhandle_locked(int32_t tzhandle) +{ + if (TZHANDLE_IS_MEM_OBJ(tzhandle)) + return release_mem_obj_locked(tzhandle); + else if (TZHANDLE_IS_CB_OBJ(tzhandle)) + return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle), + TZHANDLE_GET_OBJID(tzhandle)); + return OBJECT_ERROR; +} + +static void release_tzhandles(const int32_t *tzhandles, size_t len) +{ + size_t i; + + mutex_lock(&g_smcinvoke_lock); + for (i = 0; i < len; i++) + release_tzhandle_locked(tzhandles[i]); + mutex_unlock(&g_smcinvoke_lock); +} + +static struct smcinvoke_cb_txn *find_cbtxn_locked( + struct smcinvoke_server_info *server, + uint32_t txn_id, int32_t state) +{ + int i = 0; + struct smcinvoke_cb_txn *cb_txn = NULL; + + /* + * Since HASH_BITS() does not work on pointers, we can't select hash + * table using state and loop over it. + */ + if (state == SMCINVOKE_REQ_PLACED) { + /* pick up 1st req */ + hash_for_each(server->reqs_table, i, cb_txn, hash) { + hash_del(&cb_txn->hash); + return cb_txn; + } + } else if (state == SMCINVOKE_REQ_PROCESSING) { + hash_for_each_possible( + server->responses_table, cb_txn, hash, txn_id) { + if (cb_txn->txn_id == txn_id) { + hash_del(&cb_txn->hash); + return cb_txn; + } + } + } + return NULL; +} /* * size_add saturates at SIZE_MAX. If integer overflow is detected, @@ -107,75 +507,158 @@ static inline size_t size_align(size_t a, size_t b) return size_add(a, pad_size(a, b)); } +static uint16_t get_server_id(int cb_server_fd) +{ + uint16_t server_id = 0; + struct smcinvoke_file_data *svr_cxt = NULL; + struct file *tmp_filp = fget(cb_server_fd); + + if (!tmp_filp) + return server_id; + + svr_cxt = tmp_filp->private_data; + if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER) + server_id = svr_cxt->server_id; + + if (tmp_filp) + fput(tmp_filp); + + return server_id; +} + +static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf) +{ + *dma_buf = dma_buf_get(uhandle); + return IS_ERR_OR_NULL(*dma_buf) ? false : true; +} + +static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj, + struct file **filp) +{ + bool ret = false; + struct file *tmp_filp = fget(uhandle); + + if (!tmp_filp) + return ret; + + if (FILE_IS_REMOTE_OBJ(tmp_filp)) { + *tzobj = tmp_filp->private_data; + if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + *filp = tmp_filp; + tmp_filp = NULL; + ret = true; + } + } + + if (tmp_filp) + fput(tmp_filp); + return ret; +} + +static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj) +{ + struct smcinvoke_mem_obj *t_mem_obj = + kzalloc(sizeof(*t_mem_obj), GFP_KERNEL); + + if (!t_mem_obj) { + dma_buf_put(dma_buf); + return -ENOMEM; + } + + kref_init(&t_mem_obj->mem_regn_ref_cnt); + t_mem_obj->dma_buf = dma_buf; + mutex_lock(&g_smcinvoke_lock); + t_mem_obj->mem_region_id = next_mem_region_obj_id_locked(); + list_add_tail(&t_mem_obj->list, &g_mem_objs); + mutex_unlock(&g_smcinvoke_lock); + *mem_obj = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, + t_mem_obj->mem_region_id); + return 0; +} + /* * This function retrieves file pointer corresponding to FD provided. It stores * retrived file pointer until IOCTL call is concluded. Once call is completed, * all stored file pointers are released. file pointers are stored to prevent * other threads from releasing that FD while IOCTL is in progress. */ -static int get_tzhandle_from_fd(int64_t fd, struct file **filp, - uint32_t *tzhandle) +static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd, + struct file **filp, uint32_t *tzhandle) { int ret = -EBADF; - struct file *tmp_filp = NULL; - struct smcinvoke_tzobj_context *tzobj = NULL; + uint16_t server_id = 0; - if (fd == SMCINVOKE_USERSPACE_OBJ_NULL) { + if (UHANDLE_IS_NULL(uhandle)) { *tzhandle = SMCINVOKE_TZ_OBJ_NULL; ret = 0; - goto out; - } else if (fd < SMCINVOKE_USERSPACE_OBJ_NULL) { - goto out; - } - - tmp_filp = fget(fd); - if (!tmp_filp) - goto out; + } else if (UHANDLE_IS_CB_OBJ(uhandle)) { + server_id = get_server_id(server_fd); + if (server_id < CBOBJ_SERVER_ID_START) + goto out; - /* Verify if filp is smcinvoke device's file pointer */ - if (!tmp_filp->f_op || !tmp_filp->private_data || - (tmp_filp->f_op != &smcinvoke_fops)) { - fput(tmp_filp); - goto out; + mutex_lock(&g_smcinvoke_lock); + ret = get_pending_cbobj_locked(server_id, + UHANDLE_GET_CB_OBJ(uhandle)); + mutex_unlock(&g_smcinvoke_lock); + if (ret) + goto out; + *tzhandle = TZHANDLE_MAKE_LOCAL(server_id, + UHANDLE_GET_CB_OBJ(uhandle)); + ret = 0; + } else if (UHANDLE_IS_FD(uhandle)) { + struct dma_buf *dma_buf = NULL; + struct smcinvoke_file_data *tzobj = NULL; + + if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) { + ret = create_mem_obj(dma_buf, tzhandle); + } else if (is_remote_obj(UHANDLE_GET_FD(uhandle), + &tzobj, filp)) { + *tzhandle = tzobj->tzhandle; + ret = 0; + } } - - tzobj = tmp_filp->private_data; - *tzhandle = tzobj->tzhandle; - *filp = tmp_filp; - ret = 0; out: + if (ret && *filp) { + fput(*filp); + *filp = NULL; + } return ret; } -static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd) +static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int64_t *fd) { - int unused_fd = -1, ret = -1; + int unused_fd = -1, ret = -EINVAL; struct file *f = NULL; - struct smcinvoke_tzobj_context *cxt = NULL; - - if (tzhandle == SMCINVOKE_TZ_OBJ_NULL) { - *fd = SMCINVOKE_USERSPACE_OBJ_NULL; - ret = 0; - goto out; - } + struct smcinvoke_file_data *cxt = NULL; cxt = kzalloc(sizeof(*cxt), GFP_KERNEL); if (!cxt) { ret = -ENOMEM; goto out; } + if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + cxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ; + cxt->tzhandle = obj; + } else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) { + cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER; + cxt->server_id = obj; + } else { + goto out; + } + unused_fd = get_unused_fd_flags(O_RDWR); if (unused_fd < 0) goto out; - f = anon_inode_getfile(SMCINVOKE_DEV, &smcinvoke_fops, cxt, O_RDWR); + if (fd == NULL) + goto out; + + f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR); if (IS_ERR(f)) goto out; *fd = unused_fd; fd_install(*fd, f); - ((struct smcinvoke_tzobj_context *) - (f->private_data))->tzhandle = tzhandle; return 0; out: if (unused_fd >= 0) @@ -185,50 +668,237 @@ static int get_fd_from_tzhandle(uint32_t tzhandle, int64_t *fd) return ret; } -static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len, - const uint8_t *out_buf, size_t out_buf_len, - int32_t *smcinvoke_result) +static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id, + int64_t *uhandle) { - int ret = 0; - struct scm_desc desc = {0}; - size_t inbuf_flush_size = (1UL << get_order(in_buf_len)) * PAGE_SIZE; - size_t outbuf_flush_size = (1UL << get_order(out_buf_len)) * PAGE_SIZE; + int ret = -1; - desc.arginfo = SMCINVOKE_TZ_PARAM_ID; - desc.args[0] = (uint64_t)virt_to_phys(in_buf); - desc.args[1] = inbuf_flush_size; - desc.args[2] = (uint64_t)virt_to_phys(out_buf); - desc.args[3] = outbuf_flush_size; + if (TZHANDLE_IS_NULL(tzhandle)) { + *uhandle = UHANDLE_NULL; + ret = 0; + } else if (TZHANDLE_IS_CB_OBJ(tzhandle)) { + if (srvr_id != TZHANDLE_GET_SERVER(tzhandle)) + goto out; + *uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle)); + mutex_lock(&g_smcinvoke_lock); + ret = get_pending_cbobj_locked(srvr_id, + TZHANDLE_GET_OBJID(tzhandle)); + mutex_unlock(&g_smcinvoke_lock); + } else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) { + struct smcinvoke_mem_obj *mem_obj = NULL; + + mutex_lock(&g_smcinvoke_lock); + mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle), + SMCINVOKE_MEM_RGN_OBJ); + + if (mem_obj != NULL) { + unsigned long flags = 0; + int fd; + + if (dma_buf_get_flags(mem_obj->dma_buf, &flags)) + goto exit_lock; + fd = dma_buf_fd(mem_obj->dma_buf, flags); + + if (fd < 0) + goto exit_lock; + *uhandle = fd; + ret = 0; + } +exit_lock: + mutex_unlock(&g_smcinvoke_lock); + } else if (TZHANDLE_IS_REMOTE(tzhandle)) { + /* if execution comes here => tzhandle is an unsigned int */ + ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ, + (uint32_t)tzhandle, uhandle); + } +out: + return ret; +} - dmac_flush_range(in_buf, in_buf + inbuf_flush_size); - dmac_flush_range(out_buf, out_buf + outbuf_flush_size); +static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *msg = buf; - ret = scm_call2(SMCINVOKE_TZ_CMD, &desc); + if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) + return OBJECT_ERROR_INVALID; - /* process listener request */ - if (!ret && (desc.ret[0] == QSEOS_RESULT_INCOMPLETE || - desc.ret[0] == QSEOS_RESULT_BLOCKED_ON_LISTENER)) - ret = qseecom_process_listener_from_smcinvoke(&desc); + return release_tzhandle_locked(msg->hdr.tzhandle); +} - *smcinvoke_result = (int32_t)desc.ret[1]; - if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) - pr_err("SCM call failed with ret val = %d %d %d %d\n", - ret, (int)desc.ret[0], - (int)desc.ret[1], (int)desc.ret[2]); +static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len) +{ + int ret = OBJECT_OK; + struct smcinvoke_tzcb_req *msg = buf; + struct { + uint64_t p_addr; + uint64_t len; + uint32_t perms; + } *ob = NULL; + int32_t *oo = NULL; + struct smcinvoke_mem_obj *mem_obj = NULL; + struct dma_buf_attachment *buf_attach = NULL; + struct sg_table *sgt = NULL; + + if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) || + (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) + return OBJECT_ERROR_INVALID; + + /* args[0] = BO, args[1] = OI, args[2] = OO */ + ob = buf + msg->args[0].b.offset; + oo = &msg->args[2].handle; + + mutex_lock(&g_smcinvoke_lock); + mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle), + SMCINVOKE_MEM_RGN_OBJ); + if (!mem_obj) { + mutex_unlock(&g_smcinvoke_lock); + return OBJECT_ERROR_BADOBJ; + } - dmac_inv_range(in_buf, in_buf + inbuf_flush_size); - dmac_inv_range(out_buf, out_buf + outbuf_flush_size); + if (!mem_obj->p_addr) { + kref_init(&mem_obj->mem_map_obj_ref_cnt); + buf_attach = dma_buf_attach(mem_obj->dma_buf, + &smcinvoke_pdev->dev); + if (IS_ERR(buf_attach)) { + ret = OBJECT_ERROR_KMEM; + goto out; + } + mem_obj->buf_attach = buf_attach; + + sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + ret = OBJECT_ERROR_KMEM; + goto out; + } + mem_obj->sgt = sgt; + + /* contiguous only => nents=1 */ + if (sgt->nents != 1) { + ret = OBJECT_ERROR_INVALID; + goto out; + } + mem_obj->p_addr = sg_dma_address(sgt->sgl); + mem_obj->p_addr_len = sgt->sgl->length; + if (!mem_obj->p_addr) { + ret = OBJECT_ERROR_INVALID; + goto out; + } + mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked(); + } else { + kref_get(&mem_obj->mem_map_obj_ref_cnt); + } + ob->p_addr = mem_obj->p_addr; + ob->len = mem_obj->p_addr_len; + ob->perms = SMCINVOKE_MEM_PERM_RW; + *oo = mem_obj->mem_map_obj_id; +out: + if (ret != OBJECT_OK) + kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); + mutex_unlock(&g_smcinvoke_lock); return ret; } -static int marshal_out(void *buf, uint32_t buf_size, +static void process_kernel_obj(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *cb_req = buf; + + cb_req->result = (cb_req->hdr.op == OBJECT_OP_MAP_REGION) ? + smcinvoke_map_mem_region(buf, buf_len) : + OBJECT_ERROR_INVALID; +} + +static void process_mem_obj(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *cb_req = buf; + + mutex_lock(&g_smcinvoke_lock); + cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ? + smcinvoke_release_mem_obj_locked(buf, buf_len) : + OBJECT_ERROR_INVALID; + mutex_unlock(&g_smcinvoke_lock); +} + +/* + * Buf should be aligned to struct smcinvoke_tzcb_req + */ +static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp) +{ + /* ret is going to TZ. Provide values from OBJECT_ERROR_<> */ + int ret = OBJECT_ERROR_DEFUNCT; + struct smcinvoke_cb_txn *cb_txn = NULL; + struct smcinvoke_tzcb_req *cb_req = NULL; + struct smcinvoke_server_info *srvr_info = NULL; + + if (buf_len < sizeof(struct smcinvoke_tzcb_req)) + return; + + cb_req = buf; + /* check whether it is to be served by kernel or userspace */ + if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) { + return process_kernel_obj(buf, buf_len); + } else if (TZHANDLE_IS_MEM_OBJ(cb_req->hdr.tzhandle)) { + return process_mem_obj(buf, buf_len); + } else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) { + cb_req->result = OBJECT_ERROR_INVALID; + return; + } + + cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL); + if (!cb_txn) { + ret = OBJECT_ERROR_KMEM; + goto out; + } + + cb_txn->state = SMCINVOKE_REQ_PLACED; + cb_txn->cb_req = cb_req; + cb_txn->cb_req_bytes = buf_len; + cb_txn->filp_to_release = arr_filp; + + mutex_lock(&g_smcinvoke_lock); + srvr_info = find_cb_server_locked( + TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle)); + if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) { + mutex_unlock(&g_smcinvoke_lock); + goto out; + } + + cb_txn->txn_id = ++srvr_info->txn_id; + hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id); + mutex_unlock(&g_smcinvoke_lock); + wake_up_interruptible(&srvr_info->req_wait_q); + wait_event(srvr_info->rsp_wait_q, + (cb_txn->state == SMCINVOKE_REQ_PROCESSED) || + (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)); +out: + /* + * If we are here, either req is processed or not + * if processed, result would have been set by txn processor + * if not processed, we should set result with ret which should have + * correct value that TZ/TA can understand + */ + if (!cb_txn || (cb_txn->state != SMCINVOKE_REQ_PROCESSED)) { + cb_req->result = ret; + if (srvr_info && + srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT && + OBJECT_OP_METHODID(cb_req->hdr.op) == OBJECT_OP_RELEASE) { + mutex_lock(&g_smcinvoke_lock); + put_pending_cbobj_locked( + TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle), + TZHANDLE_GET_OBJID(cb_req->hdr.tzhandle)); + mutex_unlock(&g_smcinvoke_lock); + } + } + kfree(cb_txn); +} + +static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size, struct smcinvoke_cmd_req *req, union smcinvoke_arg *args_buf) { int ret = -EINVAL, i = 0; union smcinvoke_tz_args *tz_args = NULL; size_t offset = sizeof(struct smcinvoke_msg_hdr) + - object_counts_total(req->counts) * + OBJECT_COUNTS_TOTAL(req->counts) * sizeof(union smcinvoke_tz_args); if (offset > buf_size) @@ -237,7 +907,10 @@ static int marshal_out(void *buf, uint32_t buf_size, tz_args = (union smcinvoke_tz_args *) (buf + sizeof(struct smcinvoke_msg_hdr)); - tz_args += object_counts_num_BI(req->counts); + tz_args += OBJECT_COUNTS_NUM_BI(req->counts); + + if (args_buf == NULL) + return 0; FOR_ARGS(i, req->counts, BO) { args_buf[i].b.size = tz_args->b.size; @@ -254,15 +927,17 @@ static int marshal_out(void *buf, uint32_t buf_size, } tz_args++; } - tz_args += object_counts_num_OI(req->counts); + tz_args += OBJECT_COUNTS_NUM_OI(req->counts); FOR_ARGS(i, req->counts, OO) { /* - * create a new FD and assign to output object's - * context + * create a new FD and assign to output object's context. + * We are passing cb_server_fd from output param in case OO + * is a CBObj. For CBObj, we have to ensure that it is sent + * to server who serves it and that info comes from USpace. */ - ret = get_fd_from_tzhandle(tz_args->tzhandle, - &(args_buf[i].o.fd)); + ret = get_uhandle_from_tzhandle(tz_args->handle, + args_buf[i].o.cb_server_fd, &(args_buf[i].o.fd)); if (ret) goto out; tz_args++; @@ -272,6 +947,93 @@ static int marshal_out(void *buf, uint32_t buf_size, return ret; } +static bool is_inbound_req(int val) +{ + return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED || + val == QSEOS_RESULT_INCOMPLETE || + val == QSEOS_RESULT_BLOCKED_ON_LISTENER); +} + +static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len, + uint8_t *out_buf, size_t out_buf_len, + struct smcinvoke_cmd_req *req, + union smcinvoke_arg *args_buf, + bool *tz_acked) +{ + int ret = 0, cmd; + struct scm_desc desc = {0}; + struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL}; + + *tz_acked = false; + /* buf size should be page aligned */ + if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0) + return -EINVAL; + + desc.arginfo = SMCINVOKE_INVOKE_PARAM_ID; + desc.args[0] = (uint64_t)virt_to_phys(in_buf); + desc.args[1] = in_buf_len; + desc.args[2] = (uint64_t)virt_to_phys(out_buf); + desc.args[3] = out_buf_len; + cmd = SMCINVOKE_INVOKE_CMD; + dmac_flush_range(in_buf, in_buf + in_buf_len); + dmac_flush_range(out_buf, out_buf + out_buf_len); + /* + * purpose of lock here is to ensure that any CB obj that may be going + * to user as OO is not released by piggyback message on another invoke + * request. We should not move this lock to process_invoke_req() because + * that will either cause deadlock or prevent any other invoke request + * to come in. We release this lock when either + * a) TZ requires HLOS action to complete ongoing invoke operation + * b) Final response to invoke has been marshalled out + */ + while (1) { + mutex_lock(&g_smcinvoke_lock); + ret = scm_call2(cmd, &desc); + if (!ret && !is_inbound_req(desc.ret[0])) { + req->result = (int32_t)desc.ret[1]; + /* dont marshal if Obj returns an error */ + if (!req->result) { + dmac_inv_range(in_buf, in_buf + in_buf_len); + if (args_buf != NULL) + ret = marshal_out_invoke_req(in_buf, + in_buf_len, req, args_buf); + } + *tz_acked = true; + } + mutex_unlock(&g_smcinvoke_lock); + + if (cmd == SMCINVOKE_CB_RSP_CMD) + release_filp(arr_filp, OBJECT_COUNTS_MAX_OO); + + if (ret || !is_inbound_req(desc.ret[0])) + break; + + /* process listener request */ + if (desc.ret[0] == QSEOS_RESULT_INCOMPLETE || + desc.ret[0] == QSEOS_RESULT_BLOCKED_ON_LISTENER) + ret = qseecom_process_listener_from_smcinvoke(&desc); + + /* + * qseecom does not understand smcinvoke's callback object && + * erringly sets ret value as -EINVAL :( We need to handle it. + */ + if (ret && desc.ret[0] != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) + break; + + dmac_inv_range(out_buf, out_buf + out_buf_len); + + if (desc.ret[0] == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) { + process_tzcb_req(out_buf, out_buf_len, arr_filp); + memset(&desc, 0, sizeof(struct scm_desc)); + desc.arginfo = SMCINVOKE_CB_RSP_PARAM_ID; + desc.args[0] = (uint64_t)virt_to_phys(out_buf); + desc.args[1] = out_buf_len; + cmd = SMCINVOKE_CB_RSP_CMD; + dmac_flush_range(out_buf, out_buf + out_buf_len); + } + } + return ret; +} /* * SMC expects arguments in following format * --------------------------------------------------------------------------- @@ -287,38 +1049,42 @@ static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req, uint32_t i = 0; size_t total_size = sizeof(struct smcinvoke_msg_hdr) + - object_counts_total(req->counts) * + OBJECT_COUNTS_TOTAL(req->counts) * sizeof(union smcinvoke_tz_args); /* Computed total_size should be 8 bytes aligned from start of buf */ total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE); /* each buffer has to be 8 bytes aligned */ - while (i < object_counts_num_buffers(req->counts)) + while (i < OBJECT_COUNTS_NUM_buffers(req->counts)) total_size = size_add(total_size, size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE)); - /* Since we're using get_free_pages, no need for explicit PAGE align */ - return total_size; + return PAGE_ALIGN(total_size); } -static int marshal_in(const struct smcinvoke_cmd_req *req, +static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req, const union smcinvoke_arg *args_buf, uint32_t tzhandle, - uint8_t *buf, size_t buf_size, struct file **arr_filp) + uint8_t *buf, size_t buf_size, struct file **arr_filp, + int32_t *tzhandles_to_release) { - int ret = -EINVAL, i = 0; - union smcinvoke_tz_args *tz_args = NULL; - struct smcinvoke_msg_hdr msg_hdr = {tzhandle, req->op, req->counts}; + int ret = -EINVAL, i = 0, j = 0, k = 0; + const struct smcinvoke_msg_hdr msg_hdr = { + tzhandle, req->op, req->counts}; uint32_t offset = sizeof(struct smcinvoke_msg_hdr) + sizeof(union smcinvoke_tz_args) * - object_counts_total(req->counts); + OBJECT_COUNTS_TOTAL(req->counts); + union smcinvoke_tz_args *tz_args = NULL; if (buf_size < offset) goto out; *(struct smcinvoke_msg_hdr *)buf = msg_hdr; - tz_args = (union smcinvoke_tz_args *) - (buf + sizeof(struct smcinvoke_msg_hdr)); + tz_args = (union smcinvoke_tz_args *)(buf + + sizeof(struct smcinvoke_msg_hdr)); + + if (args_buf == NULL) + return 0; FOR_ARGS(i, req->counts, BI) { offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE); @@ -326,11 +1092,10 @@ static int marshal_in(const struct smcinvoke_cmd_req *req, (args_buf[i].b.size > (buf_size - offset))) goto out; - tz_args->b.offset = offset; - tz_args->b.size = args_buf[i].b.size; - tz_args++; + tz_args[i].b.offset = offset; + tz_args[i].b.size = args_buf[i].b.size; - if (copy_from_user(buf+offset, + if (copy_from_user(buf + offset, (void __user *)(uintptr_t)(args_buf[i].b.addr), args_buf[i].b.size)) goto out; @@ -343,167 +1108,590 @@ static int marshal_in(const struct smcinvoke_cmd_req *req, (args_buf[i].b.size > (buf_size - offset))) goto out; - tz_args->b.offset = offset; - tz_args->b.size = args_buf[i].b.size; - tz_args++; - + tz_args[i].b.offset = offset; + tz_args[i].b.size = args_buf[i].b.size; offset += args_buf[i].b.size; } FOR_ARGS(i, req->counts, OI) { - if (get_tzhandle_from_fd(args_buf[i].o.fd, - &arr_filp[i], &(tz_args->tzhandle))) + ret = get_tzhandle_from_uhandle(args_buf[i].o.fd, + args_buf[i].o.cb_server_fd, &arr_filp[j++], + &(tz_args[i].handle)); + if (ret) goto out; - tz_args++; + tzhandles_to_release[k++] = tz_args[i].handle; } ret = 0; out: return ret; } -long smcinvoke_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn, + struct smcinvoke_accept *user_req, int srvr_id) { - int ret = -1, i = 0, nr_args = 0; - struct smcinvoke_cmd_req req = {0}; - void *in_msg = NULL; - size_t inmsg_size = 0; - void *out_msg = NULL; - union smcinvoke_arg *args_buf = NULL; - struct file *filp_to_release[object_counts_max_OO] = {NULL}; - struct smcinvoke_tzobj_context *tzobj = filp->private_data; + int ret = 0, i = 0; + union smcinvoke_arg tmp_arg; + struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req; + union smcinvoke_tz_args *tz_args = tzcb_req->args; + size_t tzcb_req_len = cb_txn->cb_req_bytes; + size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req); + size_t user_req_buf_offset = sizeof(union smcinvoke_arg) * + OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts); + + if (tz_buf_offset > tzcb_req_len) { + ret = -EINVAL; + goto out; + } - switch (cmd) { - case SMCINVOKE_IOCTL_INVOKE_REQ: - if (_IOC_SIZE(cmd) != sizeof(req)) { - ret = -EINVAL; + user_req->txn_id = cb_txn->txn_id; + if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id, + (int64_t *)&user_req->cbobj_id)) { + ret = -EINVAL; + goto out; + } + user_req->op = tzcb_req->hdr.op; + user_req->counts = tzcb_req->hdr.counts; + user_req->argsize = sizeof(union smcinvoke_arg); + + FOR_ARGS(i, tzcb_req->hdr.counts, BI) { + user_req_buf_offset = size_align(user_req_buf_offset, + SMCINVOKE_ARGS_ALIGN_SIZE); + tmp_arg.b.size = tz_args[i].b.size; + if ((tz_args[i].b.offset > tzcb_req_len) || + (tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) || + (user_req_buf_offset > user_req->buf_len) || + (tmp_arg.b.size > + user_req->buf_len - user_req_buf_offset)) { + ret = -EINVAL; + pr_err("%s: buffer overflow detected\n", __func__); + goto out; + } + tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset; + + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg)) || + copy_to_user(u64_to_user_ptr(tmp_arg.b.addr), + (uint8_t *)(tzcb_req) + tz_args[i].b.offset, + tz_args[i].b.size)) { + ret = -EFAULT; goto out; } - ret = copy_from_user(&req, (void __user *)arg, sizeof(req)); + user_req_buf_offset += tmp_arg.b.size; + } + FOR_ARGS(i, tzcb_req->hdr.counts, BO) { + user_req_buf_offset = size_align(user_req_buf_offset, + SMCINVOKE_ARGS_ALIGN_SIZE); + + tmp_arg.b.size = tz_args[i].b.size; + if ((user_req_buf_offset > user_req->buf_len) || + (tmp_arg.b.size > + user_req->buf_len - user_req_buf_offset)) { + ret = -EINVAL; + pr_err("%s: buffer overflow detected\n", __func__); + goto out; + } + tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset; + + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg))) { + ret = -EFAULT; + goto out; + } + user_req_buf_offset += tmp_arg.b.size; + } + FOR_ARGS(i, tzcb_req->hdr.counts, OI) { + /* + * create a new FD and assign to output object's + * context + */ + ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id, + &(tmp_arg.o.fd)); if (ret) { - ret = -EFAULT; + ret = -EINVAL; + goto out; + } + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg))) { + ret = -EFAULT; + goto out; + } + } +out: + return ret; +} + +static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req, + struct smcinvoke_cb_txn *cb_txn, + struct file **arr_filp) +{ + int ret = -EINVAL, i = 0; + int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0}; + struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req; + union smcinvoke_tz_args *tz_args = tzcb_req->args; + + tzcb_req->result = user_req->result; + FOR_ARGS(i, tzcb_req->hdr.counts, BO) { + union smcinvoke_arg tmp_arg; + + if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr( + user_req->buf_addr + i * sizeof(union smcinvoke_arg)), + sizeof(union smcinvoke_arg))) { + ret = -EFAULT; + goto out; + } + if (tmp_arg.b.size > tz_args[i].b.size) + goto out; + if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset, + u64_to_user_ptr(tmp_arg.b.addr), + tmp_arg.b.size)) { + ret = -EFAULT; goto out; } + } - nr_args = object_counts_num_buffers(req.counts) + - object_counts_num_objects(req.counts); + FOR_ARGS(i, tzcb_req->hdr.counts, OO) { + union smcinvoke_arg tmp_arg; - if (req.argsize != sizeof(union smcinvoke_arg)) { - ret = -EINVAL; + if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr( + user_req->buf_addr + i * sizeof(union smcinvoke_arg)), + sizeof(union smcinvoke_arg))) { + ret = -EFAULT; goto out; } + ret = get_tzhandle_from_uhandle(tmp_arg.o.fd, 0, &arr_filp[i], + &(tz_args[i].handle)); + if (ret) + goto out; + tzhandles_to_release[i] = tz_args[i].handle; + } + ret = 0; +out: + if (ret) + release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO); + return ret; +} - if (nr_args) { +static void process_piggyback_data(void *buf, size_t buf_size) +{ + int i; + struct smcinvoke_tzcb_req req = {0}; + struct smcinvoke_piggyback_msg *msg = buf; + int32_t *objs = msg->objs; + + dmac_flush_range(buf, buf + buf_size); + if (msg->counts) + dmac_inv_range(buf, buf + buf_size); + + for (i = 0; i < msg->counts; i++) { + req.hdr.op = msg->op; + req.hdr.counts = 0; /* release op does not require any args */ + req.hdr.tzhandle = objs[i]; + process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL); + /* cbobjs_in_flight will be adjusted during CB processing */ + } +} - args_buf = kzalloc(nr_args * req.argsize, GFP_KERNEL); - if (!args_buf) { - ret = -ENOMEM; - goto out; - } - ret = copy_from_user(args_buf, - (void __user *)(uintptr_t)(req.args), - nr_args * req.argsize); +static long process_ack_local_obj(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL; + struct smcinvoke_file_data *filp_data = filp->private_data; + + if (_IOC_SIZE(cmd) != sizeof(int32_t)) + return -EINVAL; + + ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg, + sizeof(int32_t)); + if (ret) + return -EFAULT; + + mutex_lock(&g_smcinvoke_lock); + if (UHANDLE_IS_CB_OBJ(local_obj)) + ret = put_pending_cbobj_locked(filp_data->server_id, + UHANDLE_GET_CB_OBJ(local_obj)); + mutex_unlock(&g_smcinvoke_lock); + + return ret; +} + +static long process_server_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + int64_t server_fd = -1; + struct smcinvoke_server server_req = {0}; + struct smcinvoke_server_info *server_info = NULL; + + if (_IOC_SIZE(cmd) != sizeof(server_req)) + return -EINVAL; + + ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg, + sizeof(server_req)); + if (ret) + return -EFAULT; + + server_info = kzalloc(sizeof(*server_info), GFP_KERNEL); + if (!server_info) + return -ENOMEM; + init_waitqueue_head(&server_info->req_wait_q); + init_waitqueue_head(&server_info->rsp_wait_q); + server_info->cb_buf_size = server_req.cb_buf_size; + hash_init(server_info->reqs_table); + hash_init(server_info->responses_table); + INIT_LIST_HEAD(&server_info->pending_cbobjs); + + mutex_lock(&g_smcinvoke_lock); + + server_info->server_id = next_cb_server_id_locked(); + hash_add(g_cb_servers, &server_info->hash, + server_info->server_id); + if (g_max_cb_buf_size < server_req.cb_buf_size) + g_max_cb_buf_size = server_req.cb_buf_size; + + mutex_unlock(&g_smcinvoke_lock); + ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER, + server_info->server_id, &server_fd); + + if (ret) { + mutex_lock(&g_smcinvoke_lock); + hash_del(&server_info->hash); + mutex_unlock(&g_smcinvoke_lock); + kfree(server_info); + } + return server_fd; +} + +static long process_accept_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + struct smcinvoke_file_data *server_obj = filp->private_data; + struct smcinvoke_accept user_args = {0}; + struct smcinvoke_cb_txn *cb_txn = NULL; + struct smcinvoke_server_info *server_info = NULL; + + if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) + return -EINVAL; + + if (copy_from_user(&user_args, (void __user *)arg, + sizeof(struct smcinvoke_accept))) + return -EFAULT; + + if (user_args.argsize != sizeof(union smcinvoke_arg)) + return -EINVAL; + + /* ACCEPT is available only on server obj */ + if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) + return -EPERM; + + mutex_lock(&g_smcinvoke_lock); + server_info = find_cb_server_locked(server_obj->server_id); + mutex_unlock(&g_smcinvoke_lock); + if (!server_info) + return -EINVAL; + + /* First check if it has response otherwise wait for req */ + if (user_args.has_resp) { + mutex_lock(&g_smcinvoke_lock); + cb_txn = find_cbtxn_locked(server_info, user_args.txn_id, + SMCINVOKE_REQ_PROCESSING); + mutex_unlock(&g_smcinvoke_lock); + /* cb_txn can be null if userspace provides wrong txn id. */ + if (!cb_txn) { + pr_err("%s: Invalid txn received = %d\n", + __func__, user_args.txn_id); + goto out; + } + ret = marshal_out_tzcb_req(&user_args, cb_txn, + cb_txn->filp_to_release); + /* + * if client did not set error and we get error locally + * we return local error to TA + */ + if (ret && cb_txn->cb_req->result == 0) + cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; + + if (OBJECT_OP_METHODID(user_args.op) == OBJECT_OP_RELEASE) + put_pending_cbobj_locked( + TZHANDLE_GET_SERVER(cb_txn->cb_req->hdr.tzhandle), + TZHANDLE_GET_OBJID(cb_txn->cb_req->hdr.tzhandle)); + + cb_txn->state = SMCINVOKE_REQ_PROCESSED; + wake_up(&server_info->rsp_wait_q); + /* + * if marshal_out fails, we should let userspace release + * any ref/obj it created for CB processing + */ + if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts)) + goto out; + } + /* + * Once response has been delivered, thread will wait for another + * callback req to process. + */ + do { + ret = wait_event_interruptible(server_info->req_wait_q, + !hash_empty(server_info->reqs_table)); + if (ret) + goto out; + + mutex_lock(&g_smcinvoke_lock); + cb_txn = find_cbtxn_locked(server_info, + SMCINVOKE_NEXT_AVAILABLE_TXN, + SMCINVOKE_REQ_PLACED); + mutex_unlock(&g_smcinvoke_lock); + if (cb_txn) { + cb_txn->state = SMCINVOKE_REQ_PROCESSING; + ret = marshal_in_tzcb_req(cb_txn, &user_args, + server_obj->server_id); if (ret) { - ret = -EFAULT; - goto out; + cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; + cb_txn->state = SMCINVOKE_REQ_PROCESSED; + wake_up_interruptible(&server_info->rsp_wait_q); + continue; } + mutex_lock(&g_smcinvoke_lock); + hash_add(server_info->responses_table, &cb_txn->hash, + cb_txn->txn_id); + mutex_unlock(&g_smcinvoke_lock); + ret = copy_to_user((void __user *)arg, &user_args, + sizeof(struct smcinvoke_accept)); } + } while (!cb_txn); +out: + return ret; +} - inmsg_size = compute_in_msg_size(&req, args_buf); - in_msg = (void *)__get_free_pages(GFP_KERNEL, - get_order(inmsg_size)); - if (!in_msg) { +static long process_invoke_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1, nr_args = 0; + struct smcinvoke_cmd_req req = {0}; + void *in_msg = NULL, *out_msg = NULL; + size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE; + union smcinvoke_arg *args_buf = NULL; + struct smcinvoke_file_data *tzobj = filp->private_data; + /* + * Hold reference to remote object until invoke op is not + * completed. Release once invoke is done. + */ + struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL}; + /* + * If anything goes wrong, release alloted tzhandles for + * local objs which could be either CBObj or MemObj. + */ + int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0}; + bool tz_acked = false; + + if (_IOC_SIZE(cmd) != sizeof(req)) { + ret = -EINVAL; + goto out; + } + if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + ret = -EPERM; + goto out; + } + ret = copy_from_user(&req, (void __user *)arg, sizeof(req)); + if (ret) { + ret = -EFAULT; + goto out; + } + + nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) + + OBJECT_COUNTS_NUM_objects(req.counts); + + if (req.argsize != sizeof(union smcinvoke_arg)) { + ret = -EINVAL; + goto out; + } + + if (nr_args) { + args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL); + if (!args_buf) { ret = -ENOMEM; goto out; } - out_msg = (void *)__get_free_page(GFP_KERNEL); - if (!out_msg) { - ret = -ENOMEM; + ret = copy_from_user(args_buf, u64_to_user_ptr(req.args), + nr_args * req.argsize); + + if (ret) { + ret = -EFAULT; goto out; } + } - ret = marshal_in(&req, args_buf, tzobj->tzhandle, in_msg, - inmsg_size, filp_to_release); - if (ret) - goto out; + inmsg_size = compute_in_msg_size(&req, args_buf); + in_msg = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP, + get_order(inmsg_size)); + if (!in_msg) { + ret = -ENOMEM; + goto out; + } + memset(in_msg, 0, inmsg_size); + + mutex_lock(&g_smcinvoke_lock); + outmsg_size = PAGE_ALIGN(g_max_cb_buf_size); + mutex_unlock(&g_smcinvoke_lock); + out_msg = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP, + get_order(outmsg_size)); + if (!out_msg) { + ret = -ENOMEM; + goto out; + } + memset(out_msg, 0, outmsg_size); - ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg, - SMCINVOKE_TZ_MIN_BUF_SIZE, &req.result); - if (ret) - goto out; + ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg, + inmsg_size, filp_to_release, tzhandles_to_release); + if (ret) + goto out; + + ret = prepare_send_scm_msg(in_msg, inmsg_size, out_msg, outmsg_size, + &req, args_buf, &tz_acked); + /* + * If scm_call is success, TZ owns responsibility to release + * refs for local objs. + */ + if (tz_acked == false) + goto out; + memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release)); + + /* + * if invoke op results in an err, no need to marshal_out and + * copy args buf to user space + */ + if (!req.result) { /* - * if invoke op results in an err, no need to marshal_out and - * copy args buf to user space + * Dont check ret of marshal_out because there might be a + * FD for OO which userspace must release even if an error + * occurs. Releasing FD from user space is much simpler than + * doing here. ORing of ret is reqd not to miss past error */ - if (!req.result) { - ret = marshal_out(in_msg, inmsg_size, &req, args_buf); + ret |= copy_to_user(u64_to_user_ptr(req.args), args_buf, + nr_args * req.argsize); + } + /* copy result of invoke op */ + ret |= copy_to_user((void __user *)arg, &req, sizeof(req)); + if (ret) + goto out; - ret |= copy_to_user( - (void __user *)(uintptr_t)(req.args), - args_buf, nr_args * req.argsize); - } - ret |= copy_to_user((void __user *)arg, &req, sizeof(req)); - if (ret) - goto out; + /* Outbuf could be carrying local objs to be released. */ + process_piggyback_data(out_msg, outmsg_size); +out: + release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO); + if (ret) + release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO); + free_pages((long)out_msg, get_order(outmsg_size)); + free_pages((long)in_msg, get_order(inmsg_size)); + kfree(args_buf); + return ret; +} + +static long smcinvoke_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + switch (cmd) { + case SMCINVOKE_IOCTL_INVOKE_REQ: + ret = process_invoke_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_ACCEPT_REQ: + ret = process_accept_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_SERVER_REQ: + ret = process_server_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ: + ret = process_ack_local_obj(filp, cmd, arg); break; default: ret = -ENOIOCTLCMD; break; } -out: - free_page((long)out_msg); - free_pages((long)in_msg, get_order(inmsg_size)); - kfree(args_buf); - for (i = 0; i < object_counts_max_OO; i++) { - if (filp_to_release[i]) - fput(filp_to_release[i]); - } - return ret; } static int smcinvoke_open(struct inode *nodp, struct file *filp) { - struct smcinvoke_tzobj_context *tzcxt = NULL; + struct smcinvoke_file_data *tzcxt = NULL; tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL); if (!tzcxt) return -ENOMEM; tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ; + tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ; filp->private_data = tzcxt; return 0; } +static int destroy_cb_server(uint16_t server_id) +{ + struct smcinvoke_server_info *server = NULL; + + mutex_lock(&g_smcinvoke_lock); + server = find_cb_server_locked(server_id); + if (server) { + if (!list_empty(&server->pending_cbobjs)) { + server->state = SMCINVOKE_SERVER_STATE_DEFUNCT; + wake_up_interruptible(&server->rsp_wait_q); + /* + * we dont worry about threads waiting on req_wait_q + * because server can't be closed as long as there is + * atleast one accept thread active + */ + } else { + hash_del(&server->hash); + kfree(server); + } + } + mutex_unlock(&g_smcinvoke_lock); + return 0; +} + static int smcinvoke_release(struct inode *nodp, struct file *filp) { - int ret = 0, smcinvoke_result = 0; + int ret = 0; + bool release_handles; uint8_t *in_buf = NULL; uint8_t *out_buf = NULL; struct smcinvoke_msg_hdr hdr = {0}; - struct smcinvoke_tzobj_context *tzobj = filp->private_data; - uint32_t tzhandle = tzobj->tzhandle; + struct smcinvoke_file_data *file_data = filp->private_data; + struct smcinvoke_cmd_req req = {0}; + uint32_t tzhandle = 0; + if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) { + ret = destroy_cb_server(file_data->server_id); + goto out; + } + + tzhandle = file_data->tzhandle; /* Root object is special in sense it is indestructible */ if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) goto out; - in_buf = (uint8_t *)__get_free_page(GFP_KERNEL); - out_buf = (uint8_t *)__get_free_page(GFP_KERNEL); - if (!in_buf || !out_buf) + in_buf = (uint8_t *)__get_free_page(GFP_KERNEL | __GFP_COMP); + out_buf = (uint8_t *)__get_free_page(GFP_KERNEL | __GFP_COMP); + if (!in_buf || !out_buf) { + ret = -ENOMEM; goto out; + } hdr.tzhandle = tzhandle; - hdr.op = object_op_RELEASE; + hdr.op = OBJECT_OP_RELEASE; hdr.counts = 0; *(struct smcinvoke_msg_hdr *)in_buf = hdr; - ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, - out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, &smcinvoke_result); + ret = prepare_send_scm_msg(in_buf, SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, + SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, &release_handles); out: kfree(filp->private_data); free_page((long)in_buf); @@ -512,7 +1700,7 @@ static int smcinvoke_release(struct inode *nodp, struct file *filp) return ret; } -static int __init smcinvoke_init(void) +static int smcinvoke_probe(struct platform_device *pdev) { unsigned int baseminor = 0; unsigned int count = 1; @@ -538,7 +1726,7 @@ static int __init smcinvoke_init(void) goto exit_destroy_class; } - cdev_init(&smcinvoke_cdev, &smcinvoke_fops); + cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops); smcinvoke_cdev.owner = THIS_MODULE; rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0), @@ -547,6 +1735,8 @@ static int __init smcinvoke_init(void) pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV); goto exit_destroy_device; } + smcinvoke_pdev = pdev; + return 0; exit_destroy_device: @@ -555,11 +1745,10 @@ static int __init smcinvoke_init(void) class_destroy(driver_class); exit_unreg_chrdev_region: unregister_chrdev_region(smcinvoke_device_no, count); - return rc; } -static void __exit smcinvoke_exit(void) +static int smcinvoke_remove(struct platform_device *pdev) { int count = 1; @@ -567,8 +1756,37 @@ static void __exit smcinvoke_exit(void) device_destroy(driver_class, smcinvoke_device_no); class_destroy(driver_class); unregister_chrdev_region(smcinvoke_device_no, count); + return 0; } -device_initcall(smcinvoke_init); + +static const struct of_device_id smcinvoke_match[] = { + { + .compatible = "qcom,smcinvoke", + }, + {}, +}; + +static struct platform_driver smcinvoke_plat_driver = { + .probe = smcinvoke_probe, + .remove = smcinvoke_remove, + .driver = { + .name = "smcinvoke", + .owner = THIS_MODULE, + .of_match_table = smcinvoke_match, + }, +}; + +static int smcinvoke_init(void) +{ + return platform_driver_register(&smcinvoke_plat_driver); +} + +static void smcinvoke_exit(void) +{ + platform_driver_unregister(&smcinvoke_plat_driver); +} + +module_init(smcinvoke_init); module_exit(smcinvoke_exit); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smcinvoke_object.h b/drivers/soc/qcom/smcinvoke_object.h index 2761f87e14f550dd25d36d3c96562ec34ac59ab7..792a96bb686f316acbaa71e69cc820168604b885 100644 --- a/drivers/soc/qcom/smcinvoke_object.h +++ b/drivers/soc/qcom/smcinvoke_object.h @@ -14,38 +14,76 @@ #include -#define object_op_METHOD_MASK ((uint32_t)0x0000FFFFu) -#define object_op_RELEASE (object_op_METHOD_MASK - 0) -#define object_op_RETAIN (object_op_METHOD_MASK - 1) +/* + * Method bits are not modified by transport layers. These describe the + * method (member function) being requested by the client. + */ +#define OBJECT_OP_METHOD_MASK (0x0000FFFFu) +#define OBJECT_OP_METHODID(op) ((op) & OBJECT_OP_METHOD_MASK) +#define OBJECT_OP_RELEASE (OBJECT_OP_METHOD_MASK - 0) +#define OBJECT_OP_RETAIN (OBJECT_OP_METHOD_MASK - 1) +#define OBJECT_OP_MAP_REGION 0 -#define object_counts_max_BI 0xF -#define object_counts_max_BO 0xF -#define object_counts_max_OI 0xF -#define object_counts_max_OO 0xF +#define OBJECT_COUNTS_MAX_BI 0xF +#define OBJECT_COUNTS_MAX_BO 0xF +#define OBJECT_COUNTS_MAX_OI 0xF +#define OBJECT_COUNTS_MAX_OO 0xF /* unpack counts */ -#define object_counts_num_BI(k) ((size_t) (((k) >> 0) & object_counts_max_BI)) -#define object_counts_num_BO(k) ((size_t) (((k) >> 4) & object_counts_max_BO)) -#define object_counts_num_OI(k) ((size_t) (((k) >> 8) & object_counts_max_OI)) -#define object_counts_num_OO(k) ((size_t) (((k) >> 12) & object_counts_max_OO)) -#define object_counts_num_buffers(k) \ - (object_counts_num_BI(k) + object_counts_num_BO(k)) +#define OBJECT_COUNTS_NUM_BI(k) ((size_t) (((k) >> 0) & OBJECT_COUNTS_MAX_BI)) +#define OBJECT_COUNTS_NUM_BO(k) ((size_t) (((k) >> 4) & OBJECT_COUNTS_MAX_BO)) +#define OBJECT_COUNTS_NUM_OI(k) ((size_t) (((k) >> 8) & OBJECT_COUNTS_MAX_OI)) +#define OBJECT_COUNTS_NUM_OO(k) ((size_t) (((k) >> 12) & OBJECT_COUNTS_MAX_OO)) +#define OBJECT_COUNTS_NUM_buffers(k) \ + (OBJECT_COUNTS_NUM_BI(k) + OBJECT_COUNTS_NUM_BO(k)) -#define object_counts_num_objects(k) \ - (object_counts_num_OI(k) + object_counts_num_OO(k)) +#define OBJECT_COUNTS_NUM_objects(k) \ + (OBJECT_COUNTS_NUM_OI(k) + OBJECT_COUNTS_NUM_OO(k)) /* Indices into args[] */ -#define object_counts_index_BI(k) 0 -#define object_counts_index_BO(k) \ - (object_counts_index_BI(k) + object_counts_num_BI(k)) -#define object_counts_index_OI(k) \ - (object_counts_index_BO(k) + object_counts_num_BO(k)) -#define object_counts_index_OO(k) \ - (object_counts_index_OI(k) + object_counts_num_OI(k)) -#define object_counts_total(k) \ - (object_counts_index_OO(k) + object_counts_num_OO(k)) +#define OBJECT_COUNTS_INDEX_BI(k) 0 +#define OBJECT_COUNTS_INDEX_BO(k) \ + (OBJECT_COUNTS_INDEX_BI(k) + OBJECT_COUNTS_NUM_BI(k)) +#define OBJECT_COUNTS_INDEX_OI(k) \ + (OBJECT_COUNTS_INDEX_BO(k) + OBJECT_COUNTS_NUM_BO(k)) +#define OBJECT_COUNTS_INDEX_OO(k) \ + (OBJECT_COUNTS_INDEX_OI(k) + OBJECT_COUNTS_NUM_OI(k)) +#define OBJECT_COUNTS_TOTAL(k) \ + (OBJECT_COUNTS_INDEX_OO(k) + OBJECT_COUNTS_NUM_OO(k)) + +#define OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \ + ((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \ + ((in_objs) << 8) | ((out_objs) << 12))) + + +/* Object_invoke return codes */ + +#define OBJECT_isOK(err) ((err) == 0) +#define OBJECT_isERROR(err) ((err) != 0) + +/* Generic error codes */ + +#define OBJECT_OK 0 /* non-specific success code */ +#define OBJECT_ERROR 1 /* non-specific error */ +#define OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */ +#define OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */ +#define OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */ + +#define OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */ + +/* Transport layer error codes */ +#define OBJECT_ERROR_DEFUNCT -90 /* object no longer exists */ +#define OBJECT_ERROR_ABORT -91 /* calling thread must exit */ +#define OBJECT_ERROR_BADOBJ -92 /* invalid object context */ +#define OBJECT_ERROR_NOSLOTS -93 /* caller's object table full */ +#define OBJECT_ERROR_MAXARGS -94 /* too many args */ +#define OBJECT_ERROR_MAXDATA -95 /* buffers too large */ +#define OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */ +#define OBJECT_ERROR_KMEM -97 /* kernel out of memory */ +#define OBJECT_ERROR_REMOTE -98 /* local method sent to remote object */ +#define OBJECT_ERROR_BUSY -99 /* Object is busy */ #endif /* __SMCINVOKE_OBJECT_H */ diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index ac5979cf56d9d45dd24bef4b9e700f1f3a37656d..e6cb274083a50d637fc604ed1a291d0bd99a65a8 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -26,6 +26,8 @@ #include #include +#include + /* * The Shared Memory Point to Point (SMP2P) protocol facilitates communication * of a single 32-bit value between two processors. Each value has a single @@ -47,8 +49,12 @@ #define SMP2P_MAX_ENTRY_NAME 16 #define SMP2P_FEATURE_SSR_ACK 0x1 +#define SMP2P_FLAGS_RESTART_DONE_BIT 0 +#define SMP2P_FLAGS_RESTART_ACK_BIT 1 #define SMP2P_MAGIC 0x504d5324 +#define SMP2P_VERSION 1 +#define SMP2P_FEATURES SMP2P_FEATURE_SSR_ACK /** * struct smp2p_smem_item - in memory communication structure @@ -140,6 +146,10 @@ struct qcom_smp2p { unsigned valid_entries; + bool ssr_ack_enabled; + bool ssr_ack; + bool open; + unsigned local_pid; unsigned remote_pid; @@ -152,6 +162,14 @@ struct qcom_smp2p { struct list_head outbound; }; +static void *ilc; +#define SMP2P_LOG_PAGE_CNT 2 +#define SMP2P_INFO(x, ...) \ +do { \ + if (ilc) \ + ipc_log_string(ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \ +} while (0) + static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) { /* Make sure any updated data is written before the kick */ @@ -159,42 +177,67 @@ static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit)); } -/** - * qcom_smp2p_intr() - interrupt handler for incoming notifications - * @irq: unused - * @data: smp2p driver context - * - * Handle notifications from the remote side to handle newly allocated entries - * or any changes to the state bits of existing entries. - */ -static irqreturn_t qcom_smp2p_intr(int irq, void *data) +static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p) { - struct smp2p_smem_item *in; + struct smp2p_smem_item *in = smp2p->in; + bool restart; + + if (!smp2p->ssr_ack_enabled) + return false; + + restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT); + if (restart == smp2p->ssr_ack) + return false; + + SMP2P_INFO("%d: SSR DETECTED\n", smp2p->remote_pid); + return true; +} + +static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *out = smp2p->out; + u32 ack; + u32 val; + + smp2p->ssr_ack = ack = !smp2p->ssr_ack; + ack = ack << SMP2P_FLAGS_RESTART_ACK_BIT; + + val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT); + val |= ack; + out->flags = val; + + qcom_smp2p_kick(smp2p); +} + +static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *out = smp2p->out; + struct smp2p_smem_item *in = smp2p->in; + u32 features; + + if (in->version == out->version) { + features = in->features & out->features; + out->features = features; + + if (features & SMP2P_FEATURE_SSR_ACK) + smp2p->ssr_ack_enabled = true; + + smp2p->open = true; + SMP2P_INFO("%d: state=open ssr_ack=%d\n", smp2p->remote_pid, + smp2p->ssr_ack_enabled); + } +} + +static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) +{ + struct smp2p_smem_item *in = smp2p->in; struct smp2p_entry *entry; - struct qcom_smp2p *smp2p = data; unsigned long status; - unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND]; - unsigned pid = smp2p->remote_pid; - size_t size; int irq_pin; char buf[SMP2P_MAX_ENTRY_NAME]; u32 val; int i; - in = smp2p->in; - - /* Acquire smem item, if not already found */ - if (!in) { - in = qcom_smem_get(pid, smem_id, &size); - if (IS_ERR(in)) { - dev_err(smp2p->dev, - "Unable to acquire remote smp2p item\n"); - return IRQ_HANDLED; - } - - smp2p->in = in; - } - /* Match newly created entries */ for (i = smp2p->valid_entries; i < in->valid_entries; i++) { list_for_each_entry(entry, &smp2p->inbound, node) { @@ -223,6 +266,9 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data) if (!status) continue; + SMP2P_INFO("%d: %s: status:%0x val:%0x\n", + smp2p->remote_pid, entry->name, status, val); + for_each_set_bit(i, &status, 32) { if ((val & BIT(i) && test_bit(i, entry->irq_rising)) || (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) { @@ -236,6 +282,50 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data) } } } +} + +/** + * qcom_smp2p_intr() - interrupt handler for incoming notifications + * @irq: unused + * @data: smp2p driver context + * + * Handle notifications from the remote side to handle newly allocated entries + * or any changes to the state bits of existing entries. + */ +static irqreturn_t qcom_smp2p_intr(int irq, void *data) +{ + struct smp2p_smem_item *in; + struct qcom_smp2p *smp2p = data; + unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND]; + unsigned int pid = smp2p->remote_pid; + size_t size; + + in = smp2p->in; + + /* Acquire smem item, if not already found */ + if (!in) { + in = qcom_smem_get(pid, smem_id, &size); + if (IS_ERR(in)) { + dev_err(smp2p->dev, + "Unable to acquire remote smp2p item\n"); + return IRQ_HANDLED; + } + + smp2p->in = in; + } + + if (!smp2p->open) + qcom_smp2p_negotiate(smp2p); + + if (smp2p->open) { + bool do_restart; + + do_restart = qcom_smp2p_check_ssr(smp2p); + qcom_smp2p_notify_in(smp2p); + + if (do_restart) + qcom_smp2p_do_ssr_ack(smp2p); + } return IRQ_HANDLED; } @@ -393,13 +483,14 @@ static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p) out->remote_pid = smp2p->remote_pid; out->total_entries = SMP2P_MAX_ENTRY; out->valid_entries = 0; + out->features = SMP2P_FEATURES; /* * Make sure the rest of the header is written before we validate the * item by writing a valid version number. */ wmb(); - out->version = 1; + out->version = SMP2P_VERSION; qcom_smp2p_kick(smp2p); @@ -449,6 +540,9 @@ static int qcom_smp2p_probe(struct platform_device *pdev) const char *key; int ret; + if (!ilc) + ilc = ipc_log_context_create(SMP2P_LOG_PAGE_CNT, "smp2p", 0); + smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL); if (!smp2p) return -ENOMEM; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 0822f7148969f0dc7439bee0042e810324e113ca..f19e44b2d0c3fe49f5b0c2dd5c98ff0921353d8d 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -307,8 +307,11 @@ static struct msm_soc_info cpu_of_id[] = { /* sm8150 ID */ [339] = {MSM_CPU_SM8150, "SM8150"}, - /* sa8150 ID */ - [362] = {MSM_CPU_SA8150, "SA8150"}, + /* sa8155 ID */ + [362] = {MSM_CPU_SA8155, "SA8155"}, + + /* sa8155P ID */ + [367] = {MSM_CPU_SA8155P, "SA8155P"}, /* sdmshrike ID */ [340] = {MSM_CPU_SDMSHRIKE, "SDMSHRIKE"}, @@ -319,6 +322,12 @@ static struct msm_soc_info cpu_of_id[] = { /* qcs405 ID */ [352] = {MSM_CPU_QCS405, "QCS405"}, + /* qcs403 ID */ + [373] = {MSM_CPU_QCS403, "QCS403"}, + + /* qcs401 ID */ + [371] = {MSM_CPU_QCS401, "QCS401"}, + /* sdxprairie ID */ [357] = {SDX_CPU_SDXPRAIRIE, "SDXPRAIRIE"}, @@ -1180,9 +1189,13 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 339; strlcpy(dummy_socinfo.build_id, "sm8150 - ", sizeof(dummy_socinfo.build_id)); - } else if (early_machine_is_sa8150()) { + } else if (early_machine_is_sa8155()) { dummy_socinfo.id = 362; - strlcpy(dummy_socinfo.build_id, "sa8150 - ", + strlcpy(dummy_socinfo.build_id, "sa8155 - ", + sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_sa8155p()) { + dummy_socinfo.id = 367; + strlcpy(dummy_socinfo.build_id, "sa8155p - ", sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_sdmshrike()) { dummy_socinfo.id = 340; @@ -1196,6 +1209,14 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 352; strlcpy(dummy_socinfo.build_id, "qcs405 - ", sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_qcs403()) { + dummy_socinfo.id = 373; + strlcpy(dummy_socinfo.build_id, "qcs403 - ", + sizeof(dummy_socinfo.build_id)); + } else if (early_machine_is_qcs401()) { + dummy_socinfo.id = 371; + strlcpy(dummy_socinfo.build_id, "qcs401 - ", + sizeof(dummy_socinfo.build_id)); } else if (early_machine_is_sdxprairie()) { dummy_socinfo.id = 357; strlcpy(dummy_socinfo.build_id, "sdxprairie - ", diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index cb531e5f31ad288f8e135a50c9922a20451267f0..1010f47ddb6e8ee7d3b3be8983c8576d25a9852f 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -362,7 +362,7 @@ static int spcom_rx(struct spcom_channel *ch, { unsigned long jiffies = msecs_to_jiffies(timeout_msec); long timeleft = 1; - int ret; + int ret = 0; mutex_lock(&ch->lock); @@ -374,10 +374,10 @@ static int spcom_rx(struct spcom_channel *ch, /* wait for rx response */ pr_debug("wait for rx done, timeout_msec=%d\n", timeout_msec); if (timeout_msec) - timeleft = wait_for_completion_timeout(&ch->rx_done, - jiffies); + timeleft = wait_for_completion_interruptible_timeout( + &ch->rx_done, jiffies); else - wait_for_completion(&ch->rx_done); + ret = wait_for_completion_interruptible(&ch->rx_done); mutex_lock(&ch->lock); if (timeout_msec && timeleft == 0) { @@ -388,6 +388,12 @@ static int spcom_rx(struct spcom_channel *ch, pr_warn("rpmsg channel is closing\n"); ret = -ERESTART; goto exit_err; + } else if (ret < 0 || timeleft == -ERESTARTSYS) { + pr_debug("wait interrupted: ret=%d, timeleft=%ld\n", + ret, timeleft); + if (timeleft == -ERESTARTSYS) + ret = -ERESTARTSYS; + goto exit_err; } else if (ch->actual_rx_size) { pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size); @@ -439,6 +445,7 @@ static int spcom_rx(struct spcom_channel *ch, static int spcom_get_next_request_size(struct spcom_channel *ch) { int size = -1; + int ret = 0; /* NOTE: Remote clients might not be connected yet.*/ mutex_lock(&ch->lock); @@ -448,18 +455,26 @@ static int spcom_get_next_request_size(struct spcom_channel *ch) if (ch->actual_rx_size) { pr_debug("next-req-size already ready ch [%s] size [%zu]\n", ch->name, ch->actual_rx_size); + ret = -EFAULT; goto exit_ready; } mutex_unlock(&ch->lock); /* unlock while waiting */ pr_debug("Wait for Rx Done, ch [%s].\n", ch->name); - wait_for_completion(&ch->rx_done); + ret = wait_for_completion_interruptible(&ch->rx_done); + if (ret < 0) { + pr_debug("ch [%s]:interrupted wait ret=%d\n", + ret, ch->name); + goto exit_error; + } mutex_lock(&ch->lock); /* re-lock after waiting */ if (ch->actual_rx_size == 0) { pr_err("invalid rx size [%zu] ch [%s]\n", ch->actual_rx_size, ch->name); + mutex_unlock(&ch->lock); + ret = -EFAULT; goto exit_error; } @@ -470,6 +485,8 @@ static int spcom_get_next_request_size(struct spcom_channel *ch) size -= sizeof(struct spcom_msg_hdr); } else { pr_err("rx size [%d] too small.\n", size); + ret = -EFAULT; + mutex_unlock(&ch->lock); goto exit_error; } @@ -477,10 +494,7 @@ static int spcom_get_next_request_size(struct spcom_channel *ch) return size; exit_error: - mutex_unlock(&ch->lock); - return -EFAULT; - - + return ret; } /*======================================================================*/ @@ -532,7 +546,7 @@ static int spcom_handle_restart_sp_command(void) { void *subsystem_get_retval = NULL; - pr_err("restart - PIL FW loading process initiated\n"); + pr_debug("restart - PIL FW loading process initiated\n"); subsystem_get_retval = subsystem_get("spss"); if (!subsystem_get_retval) { @@ -540,7 +554,7 @@ static int spcom_handle_restart_sp_command(void) return -EINVAL; } - pr_err("restart - PIL FW loading process is complete\n"); + pr_debug("restart - PIL FW loading process is complete\n"); return 0; } @@ -882,7 +896,7 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch, /* Check if this shared buffer is already locked */ for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) { if (ch->dmabuf_handle_table[i] == dma_buf) { - pr_err("fd [%d] shared buf is already locked.\n", fd); + pr_debug("fd [%d] shared buf is already locked.\n", fd); /* decrement back the ref count */ mutex_unlock(&ch->lock); dma_buf_put(dma_buf); @@ -1293,8 +1307,6 @@ static int spcom_device_release(struct inode *inode, struct file *filp) const char *name = file_to_filename(filp); int ret = 0; - pr_err("close file [%s].\n", name); - if (strcmp(name, "unknown") == 0) { pr_err("name is unknown\n"); return -EINVAL; @@ -1467,7 +1479,8 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff, ret = spcom_handle_read(ch, buf, buf_size); if (ret < 0) { - pr_err("read error [%d].\n", ret); + if (ret != -ERESTARTSYS) + pr_err("read error [%d].\n", ret); kfree(buf); return ret; } @@ -1893,7 +1906,7 @@ static int spcom_rpdev_cb(struct rpmsg_device *rpdev, spin_lock_irqsave(&spcom_dev->rx_lock, flags); list_add(&rx_item->list, &spcom_dev->rx_list_head); spin_unlock_irqrestore(&spcom_dev->rx_lock, flags); - pr_err("signaling rx item for %s, received %d bytes\n", + pr_debug("signaling rx item for %s, received %d bytes\n", rpdev->id.name, len); schedule_work(&rpmsg_rx_consumer); @@ -1935,17 +1948,27 @@ static int spcom_rpdev_probe(struct rpmsg_device *rpdev) static void spcom_rpdev_remove(struct rpmsg_device *rpdev) { struct spcom_channel *ch; + int i; if (!rpdev) { pr_err("rpdev is NULL\n"); return; } + dev_info(&rpdev->dev, "rpmsg device %s removed\n", rpdev->id.name); ch = dev_get_drvdata(&rpdev->dev); if (!ch) { pr_err("channel %s not found\n", rpdev->id.name); return; } + /* release all ion buffers locked by the channel */ + for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) { + if (ch->dmabuf_handle_table[i]) { + dma_buf_put(ch->dmabuf_handle_table[i]); + ch->dmabuf_handle_table[i] = NULL; + dev_info(&rpdev->dev, "dma_buf_put(%d)\n", i); + } + } mutex_lock(&ch->lock); ch->rpdev = NULL; ch->rpmsg_abort = true; @@ -1956,7 +1979,6 @@ static void spcom_rpdev_remove(struct rpmsg_device *rpdev) if (atomic_dec_and_test(&spcom_dev->rpmsg_dev_count)) complete_all(&spcom_dev->rpmsg_state_change); - dev_info(&rpdev->dev, "rpmsg device %s removed\n", rpdev->id.name); } /* register rpmsg driver to match with channel ch_name */ @@ -2064,7 +2086,7 @@ static int spcom_probe(struct platform_device *pdev) ret = spcom_register_chardev(); if (ret) { pr_err("create character device failed.\n"); - goto fail_reg_chardev; + goto fail_while_chardev_reg; } ret = spcom_parse_dt(np); @@ -2082,6 +2104,7 @@ static int spcom_probe(struct platform_device *pdev) fail_reg_chardev: pr_err("failed to init driver\n"); spcom_unregister_chrdev(); +fail_while_chardev_reg: kfree(dev); spcom_dev = NULL; diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c index 44c0c55e278f4debb3ea9d3d18581a5633b9771f..620da5eb06e8c08db1a0e3bdfab6e765bc0dca9d 100644 --- a/drivers/soc/qcom/spm_devices.c +++ b/drivers/soc/qcom/spm_devices.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -782,12 +782,16 @@ static int msm_spm_dev_probe(struct platform_device *pdev) } spm_data.vctl_port = -1; + spm_data.vctl_port_ub = -1; spm_data.phase_port = -1; spm_data.pfm_port = -1; key = "qcom,vctl-port"; of_property_read_u32(node, key, &spm_data.vctl_port); + key = "qcom,vctl-port-ub"; + of_property_read_u32(node, key, &spm_data.vctl_port_ub); + key = "qcom,phase-port"; of_property_read_u32(node, key, &spm_data.phase_port); diff --git a/drivers/soc/qcom/spm_driver.h b/drivers/soc/qcom/spm_driver.h index a6458135c548dd41bc59a3720d6deb1cec676ac8..198267a4012ac911c2a570c6c0e16b85381bc578 100644 --- a/drivers/soc/qcom/spm_driver.h +++ b/drivers/soc/qcom/spm_driver.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -62,6 +62,7 @@ struct msm_spm_platform_data { uint32_t ver_reg; uint32_t vctl_port; + int vctl_port_ub; uint32_t phase_port; uint32_t pfm_port; @@ -84,6 +85,7 @@ struct msm_spm_driver_data { uint32_t minor; uint32_t ver_reg; uint32_t vctl_port; + int vctl_port_ub; uint32_t phase_port; uint32_t pfm_port; void __iomem *reg_base_addr; diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c index 2f5bd24a93f2df73abf2b3bef1611943257c889a..605aae29948b9e3447b46f65daded94186e1d214 100644 --- a/drivers/soc/qcom/subsys-pil-tz.c +++ b/drivers/soc/qcom/subsys-pil-tz.c @@ -1031,7 +1031,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev) { struct pil_tz_data *d; struct resource *res; - u32 proxy_timeout; + struct device_node *crypto_node; + u32 proxy_timeout, crypto_id; int len, rc; d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL); @@ -1087,7 +1088,16 @@ static int pil_tz_driver_probe(struct platform_device *pdev) rc); return rc; } - scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE_0); + + crypto_id = MSM_BUS_MASTER_CRYPTO_CORE_0; + crypto_node = of_parse_phandle(pdev->dev.of_node, + "qcom,mas-crypto", 0); + if (!IS_ERR_OR_NULL(crypto_node)) { + of_property_read_u32(crypto_node, "cell-id", + &crypto_id); + } + of_node_put(crypto_node); + scm_pas_init((int)crypto_id); } rc = pil_desc_init(&d->desc); diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c index aa694f05f8dbc68d34621b2d85e5f247ca045161..ff1dd668bef0b884dc04fdd5ae07ee481ee33685 100644 --- a/drivers/soc/qcom/watchdog_v2.c +++ b/drivers/soc/qcom/watchdog_v2.c @@ -54,7 +54,6 @@ #define SCM_SET_REGSAVE_CMD 0x2 #define SCM_SVC_SEC_WDOG_DIS 0x7 #define MAX_CPU_CTX_SIZE 2048 -#define MAX_CPU_SCANDUMP_SIZE 0x10100 static struct msm_watchdog_data *wdog_data; @@ -100,6 +99,7 @@ struct msm_watchdog_data { unsigned long long thread_start; unsigned long long ping_start[NR_CPUS]; unsigned long long ping_end[NR_CPUS]; + unsigned int cpu_scandump_sizes[NR_CPUS]; }; /* @@ -598,8 +598,10 @@ static void configure_scandump(struct msm_watchdog_data *wdog_dd) int cpu; static dma_addr_t dump_addr; static void *dump_vaddr; + unsigned int scandump_size; for_each_cpu(cpu, cpu_present_mask) { + scandump_size = wdog_dd->cpu_scandump_sizes[cpu]; cpu_data = devm_kzalloc(wdog_dd->dev, sizeof(struct msm_dump_data), GFP_KERNEL); @@ -607,17 +609,17 @@ static void configure_scandump(struct msm_watchdog_data *wdog_dd) continue; dump_vaddr = (void *) dma_alloc_coherent(wdog_dd->dev, - MAX_CPU_SCANDUMP_SIZE, + scandump_size, &dump_addr, GFP_KERNEL); if (!dump_vaddr) { dev_err(wdog_dd->dev, "Couldn't get memory for dump\n"); continue; } - memset(dump_vaddr, 0x0, MAX_CPU_SCANDUMP_SIZE); + memset(dump_vaddr, 0x0, scandump_size); cpu_data->addr = dump_addr; - cpu_data->len = MAX_CPU_SCANDUMP_SIZE; + cpu_data->len = scandump_size; snprintf(cpu_data->name, sizeof(cpu_data->name), "KSCANDUMP%d", cpu); dump_entry.id = MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu; @@ -627,7 +629,7 @@ static void configure_scandump(struct msm_watchdog_data *wdog_dd) if (ret) { dev_err(wdog_dd->dev, "Dump setup failed, id = %d\n", MSM_DUMP_DATA_SCANDUMP_PER_CPU + cpu); - dma_free_coherent(wdog_dd->dev, MAX_CPU_SCANDUMP_SIZE, + dma_free_coherent(wdog_dd->dev, scandump_size, dump_vaddr, dump_addr); devm_kfree(wdog_dd->dev, cpu_data); @@ -752,7 +754,7 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev, { struct device_node *node = pdev->dev.of_node; struct resource *res; - int ret; + int ret, cpu, num_scandump_sizes; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base"); if (!res) @@ -814,6 +816,17 @@ static int msm_wdog_dt_to_pdata(struct platform_device *pdev, pdata->wakeup_irq_enable = of_property_read_bool(node, "qcom,wakeup-enable"); + num_scandump_sizes = of_property_count_elems_of_size(node, + "qcom,scandump-sizes", + sizeof(u32)); + if (num_scandump_sizes < 0 || num_scandump_sizes != NR_CPUS) + dev_info(&pdev->dev, "%s scandump sizes property not correct\n", + __func__); + else + for_each_cpu(cpu, cpu_present_mask) + of_property_read_u32_index(node, "qcom,scandump-sizes", + cpu, + &pdata->cpu_scandump_sizes[cpu]); pdata->irq_ppi = irq_is_percpu(pdata->bark_irq); dump_pdata(pdata); return 0; diff --git a/drivers/soc/qcom/wlan_firmware_service_v01.c b/drivers/soc/qcom/wlan_firmware_service_v01.c index 6511af59a6d2cd376a5f7ca22abfe9c0c64ae6e3..8ec59f1087e9bedfc294710ab53c847a301aec1b 100644 --- a/drivers/soc/qcom/wlan_firmware_service_v01.c +++ b/drivers/soc/qcom/wlan_firmware_service_v01.c @@ -2283,3 +2283,47 @@ struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { }, }; +struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_shutdown_req_msg_v01, + shutdown_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_shutdown_req_msg_v01, + shutdown), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_shutdown_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + diff --git a/drivers/soc/qcom/wlan_firmware_service_v01.h b/drivers/soc/qcom/wlan_firmware_service_v01.h index bc3e2e5a79704da098518cfe63902db93b4ea056..3d5c285f0b9e34fe2fb826268d1c0f98d60956ec 100644 --- a/drivers/soc/qcom/wlan_firmware_service_v01.h +++ b/drivers/soc/qcom/wlan_firmware_service_v01.h @@ -30,6 +30,7 @@ #define QMI_WLFW_XO_CAL_IND_V01 0x003D #define QMI_WLFW_INI_RESP_V01 0x002F #define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 +#define QMI_WLFW_SHUTDOWN_RESP_V01 0x0043 #define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 #define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 #define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 @@ -57,6 +58,7 @@ #define QMI_WLFW_CAP_RESP_V01 0x0024 #define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A #define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 +#define QMI_WLFW_SHUTDOWN_REQ_V01 0x0043 #define QMI_WLFW_VBATT_REQ_V01 0x0032 #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 #define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 @@ -608,4 +610,17 @@ struct wlfw_xo_cal_ind_msg_v01 { #define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4 extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[]; +struct wlfw_shutdown_req_msg_v01 { + u8 shutdown_valid; + u8 shutdown; +}; +#define WLFW_SHUTDOWN_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct qmi_elem_info wlfw_shutdown_req_msg_v01_ei[]; + +struct wlfw_shutdown_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define WLFW_SHUTDOWN_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct qmi_elem_info wlfw_shutdown_resp_msg_v01_ei[]; + #endif diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 40b75748835f552cd4f5a08525a770f839c69d4e..ba009bb9d82bd72cb860ffb0416990feaedb3c6a 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -255,7 +255,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd, return; else if (pd->info->pwr_w_mask) regmap_write(pmu->regmap, pmu->info->pwr_offset, - on ? pd->info->pwr_mask : + on ? pd->info->pwr_w_mask : (pd->info->pwr_mask | pd->info->pwr_w_mask)); else regmap_update_bits(pmu->regmap, pmu->info->pwr_offset, diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 7428091d3f5b8ffa77ed2a3be65a114720cb4bfa..bd00b7cc8b78bab933114300097f14a0559275ec 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id) struct bcm2835aux_spi *bs = spi_master_get_devdata(master); irqreturn_t ret = IRQ_NONE; + /* IRQ may be shared, so return if our interrupts are disabled */ + if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) & + (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE))) + return ret; + /* check if we have data to read */ while (bs->rx_len && (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 5c9516ae4942e5cf8b2ef381d2ecd496803cbf14..4a001634023e09b8e83b8e6b82b5af557e2c0853 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && (xspi->tx_bytes > 0)) { + + /* When xspi in busy condition, bytes may send failed, + * then spi control did't work thoroughly, add one byte delay + */ + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & + CDNS_SPI_IXR_TXFULL) + usleep_range(10, 20); + if (xspi->txbuf) cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); else diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 2770fbd4ce49ff1eb211255f5489f1f9536b5f95..52056535f54e07fc882e94e97bbcf12595ea3a62 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -277,6 +277,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, } k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); + brps = min_t(int, brps, 32); scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); sh_msiof_write(p, TSCR, scr); diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c index 2c90bef1224fa0c746ef81bc2eab7c1321eb7a6c..28fba4c4434388ba8dfc7811992d1c8cbf05cd13 100644 --- a/drivers/spmi/spmi-pmic-arb-debug.c +++ b/drivers/spmi/spmi-pmic-arb-debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -365,17 +365,7 @@ static struct platform_driver spmi_pmic_arb_debug_driver = { }, }; -int __init spmi_pmic_arb_debug_init(void) -{ - return platform_driver_register(&spmi_pmic_arb_debug_driver); -} -arch_initcall(spmi_pmic_arb_debug_init); - -static void __exit spmi_pmic_arb_debug_exit(void) -{ - platform_driver_unregister(&spmi_pmic_arb_debug_driver); -} -module_exit(spmi_pmic_arb_debug_exit); +module_platform_driver(spmi_pmic_arb_debug_driver); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:spmi_pmic_arb_debug"); diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index db43169a95fbebf9ad31e5df2d978539032a059d..6985e6cc434ef07c682b589b48f3b617ab5ede57 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1369,6 +1369,7 @@ static struct platform_driver spmi_pmic_arb_driver = { .driver = { .name = "spmi_pmic_arb", .of_match_table = spmi_pmic_arb_match_table, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 4630dc85634e6aaaa06e492c0996e8863b8e48b2..1d233d01ec1f755e00d379547ae708d71d73a099 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -23,6 +23,25 @@ config ANDROID_VSOC a 'cuttlefish' Android image inside QEmu. The driver interacts with a QEmu ivshmem device. If built as a module, it will be called vsoc. +config ANDROID_LOW_MEMORY_KILLER + bool "Android Low Memory Killer" + ---help--- + Registers processes to be killed when low memory conditions, this is useful + as there is no particular swap space on android. + + The registered process will kill according to the priorities in android init + scripts (/init.rc), and it defines priority values with minimum free memory size + for each priority. + +config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES + bool "Android Low Memory Killer: detect oom_adj values" + depends on ANDROID_LOW_MEMORY_KILLER + default y + ---help--- + Detect oom_adj values written to + /sys/module/lowmemorykiller/parameters/adj and convert them + to oom_score_adj values. + source "drivers/staging/android/ion/Kconfig" source "drivers/staging/android/fiq_debugger/Kconfig" diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 2638b4a23df4cddd74fd10207d9632e155f4d7a1..191ac4bdefa3e0da8b847153d41813573976ece2 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/ obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_VSOC) += vsoc.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c index f6a806219f84821f570153296c75205c8cfe014c..63dd2d69e56848970c5aad14f24b7cb585d90b88 100644 --- a/drivers/staging/android/fiq_debugger/fiq_debugger.c +++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -400,7 +401,7 @@ static void fiq_debugger_work(struct work_struct *work) cmd += 6; while (*cmd == ' ') cmd++; - if (*cmd != '\0') + if ((*cmd != '\0') && sysrq_on()) kernel_restart(cmd); else kernel_restart(NULL); @@ -430,29 +431,39 @@ static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd) static void fiq_debugger_help(struct fiq_debugger_state *state) { fiq_debugger_printf(&state->output, - "FIQ Debugger commands:\n" - " pc PC status\n" - " regs Register dump\n" - " allregs Extended Register dump\n" - " bt Stack trace\n" - " reboot [] Reboot with command \n" - " reset [] Hard reset with command \n" - " irqs Interupt status\n" - " kmsg Kernel log\n" - " version Kernel version\n"); - fiq_debugger_printf(&state->output, - " sleep Allow sleep while in FIQ\n" - " nosleep Disable sleep while in FIQ\n" - " console Switch terminal to console\n" - " cpu Current CPU\n" - " cpu Switch to CPU\n"); + "FIQ Debugger commands:\n"); + if (sysrq_on()) { + fiq_debugger_printf(&state->output, + " pc PC status\n" + " regs Register dump\n" + " allregs Extended Register dump\n" + " bt Stack trace\n"); + fiq_debugger_printf(&state->output, + " reboot [] Reboot with command \n" + " reset [] Hard reset with command \n" + " irqs Interrupt status\n" + " kmsg Kernel log\n" + " version Kernel version\n"); + fiq_debugger_printf(&state->output, + " cpu Current CPU\n" + " cpu Switch to CPU\n" + " sysrq sysrq options\n" + " sysrq Execute sysrq with \n"); + } else { + fiq_debugger_printf(&state->output, + " reboot Reboot\n" + " reset Hard reset\n" + " irqs Interrupt status\n"); + } fiq_debugger_printf(&state->output, - " ps Process list\n" - " sysrq sysrq options\n" - " sysrq Execute sysrq with \n"); + " sleep Allow sleep while in FIQ\n" + " nosleep Disable sleep while in FIQ\n" + " console Switch terminal to console\n" + " ps Process list\n"); #ifdef CONFIG_KGDB - fiq_debugger_printf(&state->output, - " kgdb Enter kernel debugger\n"); + if (fiq_kgdb_enable) { + fiq_debugger_printf(&state->output, + " kgdb Enter kernel debugger\n"); #endif } @@ -484,18 +495,23 @@ static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { fiq_debugger_help(state); } else if (!strcmp(cmd, "pc")) { - fiq_debugger_dump_pc(&state->output, regs); + if (sysrq_on()) + fiq_debugger_dump_pc(&state->output, regs); } else if (!strcmp(cmd, "regs")) { - fiq_debugger_dump_regs(&state->output, regs); + if (sysrq_on()) + fiq_debugger_dump_regs(&state->output, regs); } else if (!strcmp(cmd, "allregs")) { - fiq_debugger_dump_allregs(&state->output, regs); + if (sysrq_on()) + fiq_debugger_dump_allregs(&state->output, regs); } else if (!strcmp(cmd, "bt")) { - fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp); + if (sysrq_on()) + fiq_debugger_dump_stacktrace(&state->output, regs, + 100, svc_sp); } else if (!strncmp(cmd, "reset", 5)) { cmd += 5; while (*cmd == ' ') cmd++; - if (*cmd) { + if (*cmd && sysrq_on()) { char tmp_cmd[32]; strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd)); machine_restart(tmp_cmd); @@ -505,9 +521,12 @@ static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, } else if (!strcmp(cmd, "irqs")) { fiq_debugger_dump_irqs(state); } else if (!strcmp(cmd, "kmsg")) { - fiq_debugger_dump_kernel_log(state); + if (sysrq_on()) + fiq_debugger_dump_kernel_log(state); } else if (!strcmp(cmd, "version")) { - fiq_debugger_printf(&state->output, "%s\n", linux_banner); + if (sysrq_on()) + fiq_debugger_printf(&state->output, "%s\n", + linux_banner); } else if (!strcmp(cmd, "sleep")) { state->no_sleep = false; fiq_debugger_printf(&state->output, "enabling sleep\n"); @@ -519,14 +538,17 @@ static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state, fiq_debugger_uart_flush(state); state->console_enable = true; } else if (!strcmp(cmd, "cpu")) { - fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); - } else if (!strncmp(cmd, "cpu ", 4)) { + if (sysrq_on()) + fiq_debugger_printf(&state->output, "cpu %d\n", + state->current_cpu); + } else if (!strncmp(cmd, "cpu ", 4) && sysrq_on()) { unsigned long cpu = 0; if (kstrtoul(cmd + 4, 10, &cpu) == 0) fiq_debugger_switch_cpu(state, cpu); else fiq_debugger_printf(&state->output, "invalid cpu\n"); - fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu); + fiq_debugger_printf(&state->output, "cpu %d\n", + state->current_cpu); } else { if (state->debug_busy) { fiq_debugger_printf(&state->output, diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 81c7eb4eab1c4e87f324425c20b6dc3d65093562..e68cafa237131669f1c6c00042101bcc268dd324 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -180,7 +180,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, void ion_buffer_destroy(struct ion_buffer *buffer) { - if (WARN_ON(buffer->kmap_cnt > 0)) + if (WARN_ON_ONCE(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->free(buffer); kfree(buffer); @@ -364,8 +364,10 @@ static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, map_attrs); } - if (count <= 0) + if (count <= 0) { + mutex_unlock(&buffer->lock); return ERR_PTR(-ENOMEM); + } a->dma_mapped = true; mutex_unlock(&buffer->lock); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index a8b9baae205f9207b6bc9644b6c9c92ed9d19d0b..2f8fb2f4db1252f2d3fa9937435c941e142377a6 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -37,6 +37,7 @@ #define ION_SYSTEM_HEAP_NAME "system" #define ION_MM_HEAP_NAME "mm" #define ION_SPSS_HEAP_NAME "spss" +#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout" #define ION_QSECOM_HEAP_NAME "qsecom" #define ION_QSECOM_TA_HEAP_NAME "qsecom_ta" #define ION_SECURE_HEAP_NAME "secure_heap" @@ -415,6 +416,10 @@ void ion_system_secure_heap_destroy(struct ion_heap *heap); struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap); void ion_cma_secure_heap_destroy(struct ion_heap *heap); +struct ion_heap *ion_secure_carveout_heap_create( + struct ion_platform_heap *heap); +void ion_secure_carveout_heap_destroy(struct ion_heap *heap); + /** * functions for creating and destroying a heap pool -- allows you * to keep a pool of pre allocated memory to use from your heap. Keeping diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 03427f31c38d11a4c576588f8b61719fb0687ad1..ffc7e8272178ba2675c0dfbe80e026aa7ff00404 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -22,7 +22,12 @@ #include #include #include +#include +#include +#include +#include #include "ion.h" +#include "ion_secure_util.h" #define ION_CARVEOUT_ALLOCATE_FAIL -1 @@ -64,6 +69,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, struct sg_table *table; phys_addr_t paddr; int ret; + struct device *dev = heap->priv; table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) @@ -81,6 +87,10 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); buffer->sg_table = table; + if (ion_buffer_cached(buffer)) + ion_pages_sync_for_device(dev, sg_page(table->sgl), + buffer->size, DMA_FROM_DEVICE); + return 0; err_free_table: @@ -96,12 +106,13 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) struct sg_table *table = buffer->sg_table; struct page *page = sg_page(table->sgl); phys_addr_t paddr = page_to_phys(page); + struct device *dev = (struct device *)heap->priv; ion_heap_buffer_zero(buffer); if (ion_buffer_cached(buffer)) - dma_sync_sg_for_device(NULL, table->sgl, table->nents, - DMA_BIDIRECTIONAL); + ion_pages_sync_for_device(dev, page, buffer->size, + DMA_BIDIRECTIONAL); ion_carveout_free(heap, paddr, buffer->size); sg_free_table(table); @@ -116,18 +127,22 @@ static struct ion_heap_ops carveout_heap_ops = { .unmap_kernel = ion_heap_unmap_kernel, }; -struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +static struct ion_heap *__ion_carveout_heap_create( + struct ion_platform_heap *heap_data, + bool sync) { struct ion_carveout_heap *carveout_heap; int ret; struct page *page; size_t size; + struct device *dev = (struct device *)heap_data->priv; page = pfn_to_page(PFN_DOWN(heap_data->base)); size = heap_data->size; - ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + if (sync) + ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL); ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); if (ret) @@ -152,6 +167,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) return &carveout_heap->heap; } +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + return __ion_carveout_heap_create(heap_data, true); +} + void ion_carveout_heap_destroy(struct ion_heap *heap) { struct ion_carveout_heap *carveout_heap = @@ -161,3 +181,187 @@ void ion_carveout_heap_destroy(struct ion_heap *heap) kfree(carveout_heap); carveout_heap = NULL; } + +struct ion_sc_entry { + struct list_head list; + struct ion_heap *heap; + u32 token; +}; + +struct ion_sc_heap { + struct ion_heap heap; + struct device *dev; + struct list_head children; +}; + +static struct ion_heap *ion_sc_find_child(struct ion_heap *heap, u32 flags) +{ + struct ion_sc_heap *manager; + struct ion_sc_entry *entry; + + manager = container_of(heap, struct ion_sc_heap, heap); + flags = flags & ION_FLAGS_CP_MASK; + list_for_each_entry(entry, &manager->children, list) { + if (entry->token == flags) + return entry->heap; + } + return NULL; +} + +static int ion_sc_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long flags) +{ + struct ion_heap *child; + + /* cache maintenance is not possible on secure memory */ + flags &= ~((unsigned long)ION_FLAG_CACHED); + buffer->flags = flags; + + child = ion_sc_find_child(heap, flags); + if (!child) + return -EINVAL; + return ion_carveout_heap_allocate(child, buffer, len, flags); +} + +static void ion_sc_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *child; + struct sg_table *table = buffer->sg_table; + struct page *page = sg_page(table->sgl); + phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + child = ion_sc_find_child(buffer->heap, buffer->flags); + if (!child) { + WARN(1, "ion_secure_carvout: invalid buffer flags on free. Memory will be leaked\n."); + return; + } + + ion_carveout_free(child, paddr, buffer->size); + sg_free_table(table); + kfree(table); +} + +static struct ion_heap_ops ion_sc_heap_ops = { + .allocate = ion_sc_heap_allocate, + .free = ion_sc_heap_free, +}; + +static int ion_sc_get_dt_token(struct ion_sc_entry *entry, + struct device_node *np, u64 base, u64 size) +{ + u32 token; + int ret = -EINVAL; + + if (of_property_read_u32(np, "token", &token)) + return -EINVAL; + + ret = ion_hyp_assign_from_flags(base, size, token); + if (ret) + pr_err("secure_carveout_heap: Assign token 0x%x failed\n", + token); + else + entry->token = token; + + return ret; +} + +static int ion_sc_add_child(struct ion_sc_heap *manager, + struct device_node *np) +{ + struct device *dev = manager->dev; + struct ion_platform_heap heap_data = {0}; + struct ion_sc_entry *entry; + struct device_node *phandle; + const __be32 *basep; + u64 base, size; + int ret; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + INIT_LIST_HEAD(&entry->list); + + phandle = of_parse_phandle(np, "memory-region", 0); + if (!phandle) + goto out_free; + + basep = of_get_address(phandle, 0, &size, NULL); + if (!basep) + goto out_free; + + base = of_translate_address(phandle, basep); + if (base == OF_BAD_ADDR) + goto out_free; + + heap_data.priv = dev; + heap_data.base = base; + heap_data.size = size; + + /* This will zero memory initially */ + entry->heap = __ion_carveout_heap_create(&heap_data, false); + if (IS_ERR(entry->heap)) + goto out_free; + + ret = ion_sc_get_dt_token(entry, np, base, size); + if (ret) + goto out_free_carveout; + + list_add(&entry->list, &manager->children); + dev_info(dev, "ion_secure_carveout: creating heap@0x%llx, size 0x%llx\n", + base, size); + return 0; + +out_free_carveout: + ion_carveout_heap_destroy(entry->heap); +out_free: + kfree(entry); + return -EINVAL; +} + +void ion_secure_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_sc_heap *manager = + container_of(heap, struct ion_sc_heap, heap); + struct ion_sc_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &manager->children, list) { + ion_carveout_heap_destroy(entry->heap); + kfree(entry); + } + kfree(manager); +} + +struct ion_heap *ion_secure_carveout_heap_create( + struct ion_platform_heap *heap_data) +{ + struct device *dev = heap_data->priv; + int ret; + struct ion_sc_heap *manager; + struct device_node *np; + + manager = kzalloc(sizeof(*manager), GFP_KERNEL); + if (!manager) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&manager->children); + manager->dev = dev; + + for_each_child_of_node(dev->of_node, np) { + ret = ion_sc_add_child(manager, np); + if (ret) { + dev_err(dev, "Creating child pool %s failed\n", + np->name); + goto err; + } + } + + manager->heap.ops = &ion_sc_heap_ops; + manager->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT; + return &manager->heap; + +err: + ion_secure_carveout_heap_destroy(&manager->heap); + return ERR_PTR(-EINVAL); +} diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index a30fa2b2cd461079f97c529cd2b01aa0c6679414..32426fc04220f76c10cd5a58224637ba297bd7f0 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -348,6 +348,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE: heap = ion_system_secure_heap_create(heap_data); break; + case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT: + heap = ion_secure_carveout_heap_create(heap_data); + break; default: pr_err("%s: Invalid heap type %d\n", __func__, heap_data->type); @@ -403,6 +406,9 @@ void ion_heap_destroy(struct ion_heap *heap) case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE: ion_system_secure_heap_destroy(heap); break; + case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT: + ion_secure_carveout_heap_destroy(heap); + break; default: pr_err("%s: Invalid heap type %d\n", __func__, heap->type); diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c index 472763a29c18efb8de4b3661101f65d824b0dfd5..93fafcac33acbb3679c41e60c74d2a41256170cb 100644 --- a/drivers/staging/android/ion/ion_secure_util.c +++ b/drivers/staging/android/ion/ion_secure_util.c @@ -30,7 +30,8 @@ bool is_secure_vmid_valid(int vmid) vmid == VMID_CP_CAMERA_PREVIEW || vmid == VMID_CP_SPSS_SP || vmid == VMID_CP_SPSS_SP_SHARED || - vmid == VMID_CP_SPSS_HLOS_SHARED); + vmid == VMID_CP_SPSS_HLOS_SHARED || + vmid == VMID_CP_CDSP); } int get_secure_vmid(unsigned long flags) @@ -57,6 +58,8 @@ int get_secure_vmid(unsigned long flags) return VMID_CP_SPSS_SP_SHARED; if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED) return VMID_CP_SPSS_HLOS_SHARED; + if (flags & ION_FLAG_CP_CDSP) + return VMID_CP_CDSP; return -EINVAL; } @@ -239,3 +242,44 @@ bool hlos_accessible_buffer(struct ion_buffer *buffer) return true; } + +int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags) +{ + u32 *vmids, *modes; + u32 nr, i; + int ret = -EINVAL; + u32 src_vm = VMID_HLOS; + + nr = count_set_bits(flags); + vmids = kcalloc(nr, sizeof(*vmids), GFP_KERNEL); + if (!vmids) + return -ENOMEM; + + modes = kcalloc(nr, sizeof(*modes), GFP_KERNEL); + if (!modes) { + kfree(vmids); + return -ENOMEM; + } + + if ((flags & ~ION_FLAGS_CP_MASK) || + populate_vm_list(flags, vmids, nr)) { + pr_err("%s: Failed to parse secure flags 0x%x\n", __func__, + flags); + goto out; + } + + for (i = 0; i < nr; i++) + if (vmids[i] == VMID_CP_SEC_DISPLAY) + modes[i] = PERM_READ; + else + modes[i] = PERM_READ | PERM_WRITE; + + ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr); + if (ret) + pr_err("%s: Assign call failed, flags 0x%x\n", __func__, flags); + +out: + kfree(modes); + kfree(vmids); + return ret; +} diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h index ea9d85e75781f7ee9e83a55d6116f20caef04f8c..7947452daa48f0a102b53a56de6017a148e39de4 100644 --- a/drivers/staging/android/ion/ion_secure_util.h +++ b/drivers/staging/android/ion/ion_secure_util.h @@ -25,6 +25,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags, bool set_page_private); int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags, bool set_page_private); +int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags); bool hlos_accessible_buffer(struct ion_buffer *buffer); diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 1eaa9531413d76122d225b09128b5ead571191ec..3accf2536817a04070c7287a89cb62d367ce0071 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -2,7 +2,7 @@ * drivers/staging/android/ion/ion_system_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -587,8 +587,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) int i; for (i = 0; i < NUM_ORDERS; i++) - if (pools[i]) + if (pools[i]) { ion_page_pool_destroy(pools[i]); + pools[i] = NULL; + } } /** diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c index e9dfa263b926dbd293e656a1a0ae366709921ba1..51ef7e27a3c2c3133cd4f5cbd6ae4ac432ae633e 100644 --- a/drivers/staging/android/ion/ion_system_secure_heap.c +++ b/drivers/staging/android/ion/ion_system_secure_heap.c @@ -215,7 +215,7 @@ static int alloc_prefetch_info( bool shrink, struct list_head *items) { struct prefetch_info *info; - u64 __user *user_sizes; + u64 user_sizes; int err; unsigned int nr_sizes, vmid, i; @@ -236,7 +236,7 @@ static int alloc_prefetch_info( if (!info) return -ENOMEM; - err = get_user(info->size, &user_sizes[i]); + err = get_user(info->size, ((u64 __user *)user_sizes + i)); if (err) goto out_free; @@ -270,7 +270,9 @@ static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr, return -EINVAL; for (i = 0; i < data->nr_regions; i++) { - ret = alloc_prefetch_info(&data->regions[i], shrink, &items); + ret = alloc_prefetch_info( + (struct ion_prefetch_regions *)data->regions + i, + shrink, &items); if (ret) goto out_free; } diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c index 57a705f58b0ffd27532560965c8b62c2d255ad58..1b50640333209f6d90c9728d5664eae80389fc84 100644 --- a/drivers/staging/android/ion/msm/msm_ion_of.c +++ b/drivers/staging/android/ion/msm/msm_ion_of.c @@ -72,6 +72,10 @@ static struct ion_heap_desc ion_heap_meta[] = { { .id = ION_AUDIO_HEAP_ID, .name = ION_AUDIO_HEAP_NAME, + }, + { + .id = ION_SECURE_CARVEOUT_HEAP_ID, + .name = ION_SECURE_CARVEOUT_HEAP_NAME, } }; #endif @@ -87,6 +91,7 @@ static struct heap_types_info { MAKE_HEAP_TYPE_MAPPING(SYSTEM), MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG), MAKE_HEAP_TYPE_MAPPING(CARVEOUT), + MAKE_HEAP_TYPE_MAPPING(SECURE_CARVEOUT), MAKE_HEAP_TYPE_MAPPING(CHUNK), MAKE_HEAP_TYPE_MAPPING(DMA), MAKE_HEAP_TYPE_MAPPING(SECURE_DMA), diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 0000000000000000000000000000000000000000..fd60704314508ab8ba92594d12424a956d809deb --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,757 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include +#include + +#ifdef CONFIG_HIGHMEM +#define _ZONE ZONE_HIGHMEM +#else +#define _ZONE ZONE_NORMAL +#endif + +#define CREATE_TRACE_POINTS +#include "trace/lowmemorykiller.h" + +/* to enable lowmemorykiller */ +static int enable_lmk = 1; +module_param_named(enable_lmk, enable_lmk, int, 0644); + +static u32 lowmem_debug_level = 1; +static short lowmem_adj[6] = { + 0, + 1, + 6, + 12, +}; + +static int lowmem_adj_size = 4; +static int lowmem_minfree[6] = { + 3 * 512, /* 6MB */ + 2 * 1024, /* 8MB */ + 4 * 1024, /* 16MB */ + 16 * 1024, /* 64MB */ +}; + +static int lowmem_minfree_size = 4; +static int lmk_fast_run = 1; + +static unsigned long lowmem_deathpending_timeout; + +#define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + pr_info(x); \ + } while (0) + +static unsigned long lowmem_count(struct shrinker *s, + struct shrink_control *sc) +{ + if (!enable_lmk) + return 0; + + return global_node_page_state(NR_ACTIVE_ANON) + + global_node_page_state(NR_ACTIVE_FILE) + + global_node_page_state(NR_INACTIVE_ANON) + + global_node_page_state(NR_INACTIVE_FILE); +} + +static atomic_t shift_adj = ATOMIC_INIT(0); +static short adj_max_shift = 353; + +/* User knob to enable/disable adaptive lmk feature */ +static int enable_adaptive_lmk; +module_param_named(enable_adaptive_lmk, enable_adaptive_lmk, int, 0644); + +/* + * This parameter controls the behaviour of LMK when vmpressure is in + * the range of 90-94. Adaptive lmk triggers based on number of file + * pages wrt vmpressure_file_min, when vmpressure is in the range of + * 90-94. Usually this is a pseudo minfree value, higher than the + * highest configured value in minfree array. + */ +static int vmpressure_file_min; +module_param_named(vmpressure_file_min, vmpressure_file_min, int, 0644); + +/* User knob to enable/disable oom reaping feature */ +static int oom_reaper; +module_param_named(oom_reaper, oom_reaper, int, 0644); + +enum { + VMPRESSURE_NO_ADJUST = 0, + VMPRESSURE_ADJUST_ENCROACH, + VMPRESSURE_ADJUST_NORMAL, +}; + +static int adjust_minadj(short *min_score_adj) +{ + int ret = VMPRESSURE_NO_ADJUST; + + if (!enable_adaptive_lmk) + return 0; + + if (atomic_read(&shift_adj) && + (*min_score_adj > adj_max_shift)) { + if (*min_score_adj == OOM_SCORE_ADJ_MAX + 1) + ret = VMPRESSURE_ADJUST_ENCROACH; + else + ret = VMPRESSURE_ADJUST_NORMAL; + *min_score_adj = adj_max_shift; + } + atomic_set(&shift_adj, 0); + + return ret; +} + +static int lmk_vmpressure_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int other_free, other_file; + unsigned long pressure = action; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (!enable_adaptive_lmk) + return 0; + + if (pressure >= 95) { + other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages(); + other_free = global_zone_page_state(NR_FREE_PAGES); + + atomic_set(&shift_adj, 1); + trace_almk_vmpressure(pressure, other_free, other_file); + } else if (pressure >= 90) { + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + + other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages(); + + other_free = global_zone_page_state(NR_FREE_PAGES); + + if (other_free < lowmem_minfree[array_size - 1] && + other_file < vmpressure_file_min) { + atomic_set(&shift_adj, 1); + trace_almk_vmpressure(pressure, other_free, other_file); + } + } else if (atomic_read(&shift_adj)) { + other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages(); + + other_free = global_zone_page_state(NR_FREE_PAGES); + /* + * shift_adj would have been set by a previous invocation + * of notifier, which is not followed by a lowmem_shrink yet. + * Since vmpressure has improved, reset shift_adj to avoid + * false adaptive LMK trigger. + */ + trace_almk_vmpressure(pressure, other_free, other_file); + atomic_set(&shift_adj, 0); + } + + return 0; +} + +static struct notifier_block lmk_vmpr_nb = { + .notifier_call = lmk_vmpressure_notifier, +}; + +static int test_task_flag(struct task_struct *p, int flag) +{ + struct task_struct *t; + + for_each_thread(p, t) { + task_lock(t); + if (test_tsk_thread_flag(t, flag)) { + task_unlock(t); + return 1; + } + task_unlock(t); + } + + return 0; +} + +static int test_task_state(struct task_struct *p, int state) +{ + struct task_struct *t; + + for_each_thread(p, t) { + task_lock(t); + if (t->state & state) { + task_unlock(t); + return 1; + } + task_unlock(t); + } + + return 0; +} + +static int test_task_lmk_waiting(struct task_struct *p) +{ + struct task_struct *t; + + for_each_thread(p, t) { + task_lock(t); + if (task_lmk_waiting(t)) { + task_unlock(t); + return 1; + } + task_unlock(t); + } + + return 0; +} + +static DEFINE_MUTEX(scan_mutex); + +static int can_use_cma_pages(gfp_t gfp_mask) +{ + int can_use = 0; + int mtype = gfpflags_to_migratetype(gfp_mask); + int i = 0; + int *mtype_fallbacks = get_migratetype_fallbacks(mtype); + + if (is_migrate_cma(mtype)) { + can_use = 1; + } else { + for (i = 0;; i++) { + int fallbacktype = mtype_fallbacks[i]; + + if (is_migrate_cma(fallbacktype)) { + can_use = 1; + break; + } + + if (fallbacktype == MIGRATE_TYPES) + break; + } + } + return can_use; +} + +void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, + int *other_free, int *other_file, + int use_cma_pages) +{ + struct zone *zone; + struct zoneref *zoneref; + int zone_idx; + + for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) { + zone_idx = zonelist_zone_idx(zoneref); + if (zone_idx == ZONE_MOVABLE) { + if (!use_cma_pages && other_free) + *other_free -= + zone_page_state(zone, NR_FREE_CMA_PAGES); + continue; + } + + if (zone_idx > classzone_idx) { + if (other_free != NULL) + *other_free -= zone_page_state(zone, + NR_FREE_PAGES); + if (other_file != NULL) + *other_file -= zone_page_state(zone, + NR_ZONE_INACTIVE_FILE) + + zone_page_state(zone, + NR_ZONE_ACTIVE_FILE); + } else if (zone_idx < classzone_idx) { + if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) && + other_free) { + if (!use_cma_pages) { + *other_free -= min( + zone->lowmem_reserve[classzone_idx] + + zone_page_state( + zone, NR_FREE_CMA_PAGES), + zone_page_state( + zone, NR_FREE_PAGES)); + } else { + *other_free -= + zone->lowmem_reserve[classzone_idx]; + } + } else { + if (other_free) + *other_free -= + zone_page_state(zone, NR_FREE_PAGES); + } + } + } +} + +#ifdef CONFIG_HIGHMEM +static void adjust_gfp_mask(gfp_t *gfp_mask) +{ + struct zone *preferred_zone; + struct zoneref *zref; + struct zonelist *zonelist; + enum zone_type high_zoneidx; + + if (current_is_kswapd()) { + zonelist = node_zonelist(0, *gfp_mask); + high_zoneidx = gfp_zone(*gfp_mask); + zref = first_zones_zonelist(zonelist, high_zoneidx, NULL); + preferred_zone = zref->zone; + + if (high_zoneidx == ZONE_NORMAL) { + if (zone_watermark_ok_safe( + preferred_zone, 0, + high_wmark_pages(preferred_zone), 0)) + *gfp_mask |= __GFP_HIGHMEM; + } else if (high_zoneidx == ZONE_HIGHMEM) { + *gfp_mask |= __GFP_HIGHMEM; + } + } +} +#else +static void adjust_gfp_mask(gfp_t *unused) +{ +} +#endif + +void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc) +{ + gfp_t gfp_mask; + struct zone *preferred_zone; + struct zoneref *zref; + struct zonelist *zonelist; + enum zone_type high_zoneidx, classzone_idx; + unsigned long balance_gap; + int use_cma_pages; + + gfp_mask = sc->gfp_mask; + adjust_gfp_mask(&gfp_mask); + + zonelist = node_zonelist(0, gfp_mask); + high_zoneidx = gfp_zone(gfp_mask); + zref = first_zones_zonelist(zonelist, high_zoneidx, NULL); + preferred_zone = zref->zone; + classzone_idx = zone_idx(preferred_zone); + use_cma_pages = can_use_cma_pages(gfp_mask); + + balance_gap = min(low_wmark_pages(preferred_zone), + (preferred_zone->present_pages + + 100-1) / + 100); + + if (likely(current_is_kswapd() && zone_watermark_ok(preferred_zone, 0, + high_wmark_pages(preferred_zone) + SWAP_CLUSTER_MAX + + balance_gap, 0, 0))) { + if (lmk_fast_run) + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + other_file, use_cma_pages); + else + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + NULL, use_cma_pages); + + if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) { + if (!use_cma_pages) { + *other_free -= min( + preferred_zone->lowmem_reserve[_ZONE] + + zone_page_state( + preferred_zone, NR_FREE_CMA_PAGES), + zone_page_state( + preferred_zone, NR_FREE_PAGES)); + } else { + *other_free -= + preferred_zone->lowmem_reserve[_ZONE]; + } + } else { + *other_free -= zone_page_state(preferred_zone, + NR_FREE_PAGES); + } + + lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " + "ofree %d, %d\n", *other_free, *other_file); + } else { + tune_lmk_zone_param(zonelist, classzone_idx, other_free, + other_file, use_cma_pages); + + if (!use_cma_pages) { + *other_free -= + zone_page_state(preferred_zone, NR_FREE_CMA_PAGES); + } + + lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " + "%d\n", *other_free, *other_file); + } +} + +static void mark_lmk_victim(struct task_struct *tsk) +{ + struct mm_struct *mm = tsk->mm; + + if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { + atomic_inc(&tsk->signal->oom_mm->mm_count); + set_bit(MMF_OOM_VICTIM, &mm->flags); + } +} + +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +{ + struct task_struct *tsk; + struct task_struct *selected = NULL; + unsigned long rem = 0; + int tasksize; + int i; + int ret = 0; + short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; + int selected_tasksize = 0; + short selected_oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free; + int other_file; + + if (!mutex_trylock(&scan_mutex)) + return 0; + + other_free = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; + + if (global_node_page_state(NR_SHMEM) + total_swapcache_pages() + + global_node_page_state(NR_UNEVICTABLE) < + global_node_page_state(NR_FILE_PAGES)) + other_file = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + global_node_page_state(NR_UNEVICTABLE) - + total_swapcache_pages(); + else + other_file = 0; + + tune_lmk_param(&other_free, &other_file, sc); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) + array_size = lowmem_minfree_size; + for (i = 0; i < array_size; i++) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { + min_score_adj = lowmem_adj[i]; + break; + } + } + + ret = adjust_minadj(&min_score_adj); + + lowmem_print(3, "%s %lu, %x, ofree %d %d, ma %hd\n", + __func__, sc->nr_to_scan, sc->gfp_mask, other_free, + other_file, min_score_adj); + + if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { + trace_almk_shrink(0, ret, other_free, other_file, 0); + lowmem_print(5, "%s %lu, %x, return 0\n", + __func__, sc->nr_to_scan, sc->gfp_mask); + mutex_unlock(&scan_mutex); + return 0; + } + + selected_oom_score_adj = min_score_adj; + + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + short oom_score_adj; + + if (tsk->flags & PF_KTHREAD) + continue; + + /* if task no longer has any memory ignore it */ + if (test_task_flag(tsk, TIF_MM_RELEASED)) + continue; + + if (oom_reaper) { + p = find_lock_task_mm(tsk); + if (!p) + continue; + + if (test_bit(MMF_OOM_VICTIM, &p->mm->flags)) { + if (test_bit(MMF_OOM_SKIP, &p->mm->flags)) { + task_unlock(p); + continue; + } else if (time_before_eq(jiffies, + lowmem_deathpending_timeout)) { + task_unlock(p); + rcu_read_unlock(); + mutex_unlock(&scan_mutex); + return 0; + } + } + } else { + if (time_before_eq(jiffies, + lowmem_deathpending_timeout)) + if (test_task_lmk_waiting(tsk)) { + rcu_read_unlock(); + mutex_unlock(&scan_mutex); + return 0; + } + + p = find_lock_task_mm(tsk); + if (!p) + continue; + } + + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { + task_unlock(p); + continue; + } + tasksize = get_mm_rss(p->mm); + task_unlock(p); + if (tasksize <= 0) + continue; + if (selected) { + if (oom_score_adj < selected_oom_score_adj) + continue; + if (oom_score_adj == selected_oom_score_adj && + tasksize <= selected_tasksize) + continue; + } + selected = p; + selected_tasksize = tasksize; + selected_oom_score_adj = oom_score_adj; + lowmem_print(3, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); + } + if (selected) { + long cache_size = other_file * (long)(PAGE_SIZE / 1024); + long cache_limit = minfree * (long)(PAGE_SIZE / 1024); + long free = other_free * (long)(PAGE_SIZE / 1024); + + if (test_task_lmk_waiting(selected) && + (test_task_state(selected, TASK_UNINTERRUPTIBLE))) { + lowmem_print(2, "'%s' (%d) is already killed\n", + selected->comm, + selected->pid); + rcu_read_unlock(); + mutex_unlock(&scan_mutex); + return 0; + } + + task_lock(selected); + send_sig(SIGKILL, selected, 0); + if (selected->mm) { + task_set_lmk_waiting(selected); + if (!test_bit(MMF_OOM_SKIP, &selected->mm->flags) && + oom_reaper) { + mark_lmk_victim(selected); + wake_oom_reaper(selected); + } + } + task_unlock(selected); + trace_lowmemory_kill(selected, cache_size, cache_limit, free); + lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n" + "to free %ldkB on behalf of '%s' (%d) because\n" + "cache %ldkB is below limit %ldkB for oom score %hd\n" + "Free memory is %ldkB above reserved.\n" + "Free CMA is %ldkB\n" + "Total reserve is %ldkB\n" + "Total free pages is %ldkB\n" + "Total file cache is %ldkB\n" + "GFP mask is 0x%x\n", + selected->comm, selected->pid, selected->tgid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + cache_size, cache_limit, + min_score_adj, + free, + global_zone_page_state(NR_FREE_CMA_PAGES) * + (long)(PAGE_SIZE / 1024), + totalreserve_pages * (long)(PAGE_SIZE / 1024), + global_zone_page_state(NR_FREE_PAGES) * + (long)(PAGE_SIZE / 1024), + global_node_page_state(NR_FILE_PAGES) * + (long)(PAGE_SIZE / 1024), + sc->gfp_mask); + + if (lowmem_debug_level >= 2 && selected_oom_score_adj == 0) { + show_mem(SHOW_MEM_FILTER_NODES, NULL); + show_mem_call_notifiers(); + dump_tasks(NULL, NULL); + } + + lowmem_deathpending_timeout = jiffies + HZ; + rem += selected_tasksize; + rcu_read_unlock(); + /* give the system time to free up the memory */ + msleep_interruptible(20); + trace_almk_shrink(selected_tasksize, ret, + other_free, other_file, + selected_oom_score_adj); + } else { + trace_almk_shrink(1, ret, other_free, other_file, 0); + rcu_read_unlock(); + } + + lowmem_print(4, "%s %lu, %x, return %lu\n", + __func__, sc->nr_to_scan, sc->gfp_mask, rem); + mutex_unlock(&scan_mutex); + return rem; +} + +static struct shrinker lowmem_shrinker = { + .scan_objects = lowmem_scan, + .count_objects = lowmem_count, + .seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ + register_shrinker(&lowmem_shrinker); + vmpressure_notifier_register(&lmk_vmpr_nb); + return 0; +} +device_initcall(lowmem_init); + +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +static short lowmem_oom_adj_to_oom_score_adj(short oom_adj) +{ + if (oom_adj == OOM_ADJUST_MAX) + return OOM_SCORE_ADJ_MAX; + else + return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; +} + +static void lowmem_autodetect_oom_adj_values(void) +{ + int i; + short oom_adj; + short oom_score_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + + if (array_size <= 0) + return; + + oom_adj = lowmem_adj[array_size - 1]; + if (oom_adj > OOM_ADJUST_MAX) + return; + + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + if (oom_score_adj <= OOM_ADJUST_MAX) + return; + + lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n"); + for (i = 0; i < array_size; i++) { + oom_adj = lowmem_adj[i]; + oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj); + lowmem_adj[i] = oom_score_adj; + lowmem_print(1, "oom_adj %d => oom_score_adj %d\n", + oom_adj, oom_score_adj); + } +} + +static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp) +{ + int ret; + + ret = param_array_ops.set(val, kp); + + /* HACK: Autodetect oom_adj values in lowmem_adj array */ + lowmem_autodetect_oom_adj_values(); + + return ret; +} + +static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp) +{ + return param_array_ops.get(buffer, kp); +} + +static void lowmem_adj_array_free(void *arg) +{ + param_array_ops.free(arg); +} + +static struct kernel_param_ops lowmem_adj_array_ops = { + .set = lowmem_adj_array_set, + .get = lowmem_adj_array_get, + .free = lowmem_adj_array_free, +}; + +static const struct kparam_array __param_arr_adj = { + .max = ARRAY_SIZE(lowmem_adj), + .num = &lowmem_adj_size, + .ops = ¶m_ops_short, + .elemsize = sizeof(lowmem_adj[0]), + .elem = lowmem_adj, +}; +#endif + +/* + * not really modular, but the easiest way to keep compat with existing + * bootargs behaviour is to continue using module_param here. + */ +module_param_named(cost, lowmem_shrinker.seeks, int, 0644); +#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES +module_param_cb(adj, &lowmem_adj_array_ops, + .arr = &__param_arr_adj, + 0644); +__MODULE_PARM_TYPE(adj, "array of short"); +#else +module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644); +#endif +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, + S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); +module_param_named(lmk_fast_run, lmk_fast_run, int, S_IRUGO | S_IWUSR); + diff --git a/drivers/staging/android/trace/lowmemorykiller.h b/drivers/staging/android/trace/lowmemorykiller.h new file mode 100644 index 0000000000000000000000000000000000000000..f43d3fae75eeb236a785b5bd3b8c4ca2d79808a3 --- /dev/null +++ b/drivers/staging/android/trace/lowmemorykiller.h @@ -0,0 +1,41 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM lowmemorykiller + +#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LOWMEMORYKILLER_H + +#include + +TRACE_EVENT(lowmemory_kill, + TP_PROTO(struct task_struct *killed_task, long cache_size, \ + long cache_limit, long free), + + TP_ARGS(killed_task, cache_size, cache_limit, free), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(long, pagecache_size) + __field(long, pagecache_limit) + __field(long, free) + ), + + TP_fast_assign( + memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN); + __entry->pid = killed_task->pid; + __entry->pagecache_size = cache_size; + __entry->pagecache_limit = cache_limit; + __entry->free = free; + ), + + TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb", + __entry->comm, __entry->pid, __entry->pagecache_size, + __entry->pagecache_limit, __entry->free) +); + + +#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h index a725b84bcf4537b94a27e21c347019fe80fc1b1d..d6fbd510495bf91450bc55f0e63f4ae79f834c6c 100644 --- a/drivers/staging/android/uapi/msm_ion.h +++ b/drivers/staging/android/uapi/msm_ion.h @@ -17,6 +17,7 @@ enum msm_ion_heap_types { ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START, ION_HEAP_TYPE_SYSTEM_SECURE, ION_HEAP_TYPE_HYP_CMA, + ION_HEAP_TYPE_SECURE_CARVEOUT, }; /** @@ -44,6 +45,7 @@ enum ion_heap_ids { * Newly added heap ids have to be #define(d) since all API changes must * include a new #define. */ +#define ION_SECURE_CARVEOUT_HEAP_ID 14 #define ION_QSECOM_TA_HEAP_ID 19 #define ION_AUDIO_HEAP_ID 28 #define ION_CAMERA_HEAP_ID 20 @@ -62,6 +64,8 @@ enum ion_heap_ids { #define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25) #define ION_FLAG_CP_APP ION_BIT(26) #define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27) +/* ION_FLAG_ALLOW_NON_CONTIG uses ION_BIT(28) */ +#define ION_FLAG_CP_CDSP ION_BIT(29) #define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30) #define ION_FLAGS_CP_MASK 0x7FFF0000 @@ -92,15 +96,15 @@ enum ion_heap_ids { #define ION_IOC_MSM_MAGIC 'M' struct ion_prefetch_regions { + __u64 sizes; __u32 vmid; - __u64 __user *sizes; __u32 nr_sizes; }; struct ion_prefetch_data { - __u32 heap_id; __u64 len; - struct ion_prefetch_regions __user *regions; + __u64 regions; + __u32 heap_id; __u32 nr_regions; }; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 0d99b242e82e3f84da25a47564f96db60be4b5f5..6cb933ecc084029f420fb2e13784a01f9c97e434 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, bytes = min(bytes, data_len); if (!bio) { +new_bio: nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); nr_pages -= nr_vecs; /* @@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, * be allocated with pscsi_get_bio() above. */ bio = NULL; + goto new_bio; } data_len -= bytes; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 58a5009eacc388b45231d747e7d142163f38e627..a548c369579773a5d19914b0f6d5e725bbc55bb7 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -181,6 +181,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, if (IS_ERR(shm)) return PTR_ERR(shm); + /* + * Ensure offset + size does not overflow offset + * and does not overflow the size of the referred + * shared memory object. + */ + if ((ip.a + ip.b) < ip.a || + (ip.a + ip.b) > shm->size) { + tee_shm_put(shm); + return -EINVAL; + } + params[n].u.memref.shm_offs = ip.a; params[n].u.memref.size = ip.b; params[n].u.memref.shm = shm; diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index a4d6a0e2e9938190a3bd0c6d081a02bbd6f209fd..23ad4f9f21438e45a819da46962025eeeb922590 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -213,8 +213,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) rate = clk_get_rate(data->clk); if ((rate < 1920000) || (rate > 5000000)) dev_warn(&pdev->dev, - "Clock %pCn running at %pCr Hz is outside of the recommended range: 1.92 to 5MHz\n", - data->clk, data->clk); + "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n", + data->clk, rate); /* register of thermal sensor and get info from DT */ tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data, diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 8a7f24dd9315e3be809ef98e4513506561be04fb..0c19fcd56a0da02713e93778afc78c84017aeeef 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv) return -EFAULT; } + priv->priv = obj; obj->max_state = p->package.count - 1; obj->cdev = thermal_cooling_device_register(acpi_device_bid(priv->adev), @@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv) if (IS_ERR(obj->cdev)) result = PTR_ERR(obj->cdev); - priv->priv = obj; - kfree(buf.pointer); /* TODO: add ACPI notification support */ diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index 31fcfde67a0bc2c0605a55c5db237db4acd93bc2..9c202319844024a19ff2e4120577eb616e57988e 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -120,6 +120,25 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { "mhm-usr"}, .logic = VIRT_MAXIMUM, }, + { + .virt_zone_name = "cpuss0-max-step", + .num_sensors = 4, + .sensor_names = {"cpuss-0-usr", + "cpuss-1-usr", + "cpuss-2-usr", + "cpuss-3-usr"}, + .logic = VIRT_MAXIMUM, + }, + { + .virt_zone_name = "apc1-max-step", + .num_sensors = 4, + .sensor_names = {"cpu-1-0-usr", + "cpu-1-1-usr", + "cpu-1-2-usr", + "cpu-1-3-usr"}, + .logic = VIRT_MAXIMUM, + }, + }; int qti_virtual_sensor_register(struct device *dev) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 833771bca0a593008967905d259c6780cb512cae..da04ba1ecf68a7c6235a8d35a565c2d150881da9 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1100,13 +1100,14 @@ static int omap8250_no_handle_irq(struct uart_port *port) return 0; } +static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE; static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE; static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE; static const struct of_device_id omap8250_dt_ids[] = { { .compatible = "ti,omap2-uart" }, { .compatible = "ti,omap3-uart" }, - { .compatible = "ti,omap4-uart" }, + { .compatible = "ti,omap4-uart", .data = &omap4_habit, }, { .compatible = "ti,am3352-uart", .data = &am3352_habit, }, { .compatible = "ti,am4372-uart", .data = &am3352_habit, }, { .compatible = "ti,dra742-uart", .data = &dra742_habit, }, @@ -1343,6 +1344,19 @@ static int omap8250_soft_reset(struct device *dev) int sysc; int syss; + /* + * At least on omap4, unused uarts may not idle after reset without + * a basic scr dma configuration even with no dma in use. The + * module clkctrl status bits will be 1 instead of 3 blocking idle + * for the whole clockdomain. The softreset below will clear scr, + * and we restore it on resume so this is safe to do on all SoCs + * needing omap8250_soft_reset() quirk. Do it in two writes as + * recommended in the comment for omap8250_update_scr(). + */ + serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1); + serial_out(up, UART_OMAP_SCR, + OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL); + sysc = serial_in(up, UART_OMAP_SYSC); /* softreset the UART */ diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 111e6a95077978c2c30bfe2a2de4a0d095e8d76e..c9f701aca677fc2e45ddc02961859d85e80e8890 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1747,10 +1747,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) */ static void pl011_enable_interrupts(struct uart_amba_port *uap) { + unsigned int i; + spin_lock_irq(&uap->port.lock); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); + + /* + * RXIS is asserted only when the RX FIFO transitions from below + * to above the trigger threshold. If the RX FIFO is already + * full to the threshold this can't happen and RXIS will now be + * stuck off. Drain the RX FIFO explicitly to fix this: + */ + for (i = 0; i < uap->fifosize * 2; ++i) { + if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) + break; + + pl011_read(uap, REG_DR); + } + uap->im = UART011_RTIM; if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a0b24bc0978384d362797202edcb63900ef9cb06..2286e9d73115e9d2f9a34ea7ff3ff73ac9442d01 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1786,7 +1786,6 @@ static int atmel_startup(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct tty_struct *tty = port->state->port.tty; int retval; /* @@ -1801,8 +1800,8 @@ static int atmel_startup(struct uart_port *port) * Allocate the IRQ */ retval = request_irq(port->irq, atmel_interrupt, - IRQF_SHARED | IRQF_COND_SUSPEND, - tty ? tty->name : "atmel_serial", port); + IRQF_SHARED | IRQF_COND_SUSPEND, + dev_name(&pdev->dev), port); if (retval) { dev_err(port->dev, "atmel_startup - Can't get irq\n"); return retval; diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 185a9e2675e256afdaa9453a1c470142af7dd287..2ec317c1f02f826f4595b990454dfe4f4d4f4812 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -96,6 +96,7 @@ /* UART S_CMD OP codes */ #define UART_START_READ (0x1) #define UART_PARAM (0x1) +#define UART_PARAM_RFR_OPEN (BIT(7)) /* UART DMA Rx GP_IRQ_BITS */ #define UART_DMA_RX_PARITY_ERR BIT(5) @@ -610,6 +611,26 @@ static void msm_geni_serial_abort_rx(struct uart_port *uport) geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG); } +static void msm_geni_serial_complete_rx_eot(struct uart_port *uport) +{ + int poll_done = 0, tries = 0; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); + + do { + poll_done = msm_geni_serial_poll_bit(uport, SE_DMA_RX_IRQ_STAT, + RX_EOT, true); + tries++; + } while (!poll_done && tries < 5); + + if (!poll_done) + IPC_LOG_MSG(port->ipc_log_misc, + "%s: RX_EOT, GENI:0x%x, DMA_DEBUG:0x%x\n", __func__, + geni_read_reg_nolog(uport->membase, SE_GENI_STATUS), + geni_read_reg_nolog(uport->membase, SE_DMA_DEBUG_REG0)); + else + geni_write_reg_nolog(RX_EOT, uport->membase, SE_DMA_RX_IRQ_CLR); +} + #ifdef CONFIG_CONSOLE_POLL static int msm_geni_serial_get_char(struct uart_port *uport) { @@ -996,12 +1017,14 @@ static void start_rx_sequencer(struct uart_port *uport) unsigned int geni_status; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); int ret; + u32 geni_se_param = UART_PARAM_RFR_OPEN; geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); if (geni_status & S_GENI_CMD_ACTIVE) msm_geni_serial_stop_rx(uport); - geni_setup_s_cmd(uport->membase, UART_START_READ, 0); + /* Start RX with the RFR_OPEN to keep RFR in always ready state */ + geni_setup_s_cmd(uport->membase, UART_START_READ, geni_se_param); if (port->xfer_mode == FIFO_MODE) { geni_s_irq_en = geni_read_reg_nolog(uport->membase, @@ -1075,7 +1098,7 @@ static void stop_rx_sequencer(struct uart_port *uport) unsigned int geni_m_irq_en; unsigned int geni_status; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); - u32 irq_clear = S_CMD_DONE_EN; + u32 irq_clear = S_CMD_CANCEL_EN; bool done; IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__); @@ -1097,22 +1120,33 @@ static void stop_rx_sequencer(struct uart_port *uport) /* Possible stop rx is called multiple times. */ if (!(geni_status & S_GENI_CMD_ACTIVE)) goto exit_rx_seq; + geni_cancel_s_cmd(uport->membase); /* * Ensure that the cancel goes through before polling for the * cancel control bit. */ mb(); + if (!uart_console(uport)) + msm_geni_serial_complete_rx_eot(uport); + done = msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG, S_GENI_CMD_CANCEL, false); - geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); - if (!done) + if (done) { + geni_write_reg_nolog(irq_clear, uport->membase, + SE_GENI_S_IRQ_CLEAR); + goto exit_rx_seq; + } else { IPC_LOG_MSG(port->ipc_log_misc, "%s Cancel fail 0x%x\n", - __func__, geni_status); + __func__, geni_status); + } - geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR); - if ((geni_status & S_GENI_CMD_ACTIVE)) + geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS); + if ((geni_status & S_GENI_CMD_ACTIVE)) { + IPC_LOG_MSG(port->ipc_log_misc, "%s:Abort Rx, GENI:0x%x\n", + __func__, geni_status); msm_geni_serial_abort_rx(uport); + } exit_rx_seq: if (port->xfer_mode == SE_DMA && port->rx_dma) { msm_geni_serial_rx_fsm_rst(uport); @@ -1690,6 +1724,9 @@ static int msm_geni_serial_startup(struct uart_port *uport) ret = -ENXIO; goto exit_startup; } + IPC_LOG_MSG(msm_port->ipc_log_misc, "%s: FW Ver:0x%x%x\n", + __func__, + get_se_m_fw(uport->membase), get_se_s_fw(uport->membase)); get_tx_fifo_size(msm_port); if (!msm_port->port_setup) { @@ -1817,6 +1854,7 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, unsigned long ser_clk_cfg = 0; struct msm_geni_serial_port *port = GET_DEV_PORT(uport); unsigned long clk_rate; + unsigned long flags; if (!uart_console(uport)) { int ret = msm_geni_serial_power_on(uport); @@ -1828,7 +1866,13 @@ static void msm_geni_serial_set_termios(struct uart_port *uport, return; } } + /* Take a spinlock else stop_rx causes a race with an ISR due to Cancel + * and FSM_RESET. This also has a potential race with the dma_map/unmap + * operations of ISR. + */ + spin_lock_irqsave(&uport->lock, flags); msm_geni_serial_stop_rx(uport); + spin_unlock_irqrestore(&uport->lock, flags); /* baud rate */ baud = uart_get_baud_rate(uport, termios, old, 300, 4000000); port->cur_baud = baud; @@ -2514,7 +2558,6 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct msm_geni_serial_port *port = platform_get_drvdata(pdev); int ret = 0; - u32 uart_manual_rfr = 0; u32 geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); @@ -2526,23 +2569,8 @@ static int msm_geni_serial_runtime_suspend(struct device *dev) * Resources off */ disable_irq(port->uport.irq); - /* - * If the clients haven't done a manual flow on/off then go ahead and - * set this to manual flow on. - */ - if (!port->manual_flow) { - uart_manual_rfr |= (UART_MANUAL_RFR_EN | UART_RFR_READY); - geni_write_reg_nolog(uart_manual_rfr, port->uport.membase, - SE_UART_MANUAL_RFR); - /* - * Ensure that the manual flow on writes go through before - * doing a stop_rx else we could end up flowing off the peer. - */ - mb(); - IPC_LOG_MSG(port->ipc_log_pwr, "%s: Manual Flow ON 0x%x\n", - __func__, uart_manual_rfr); - } stop_rx_sequencer(&port->uport); + geni_status = geni_read_reg_nolog(port->uport.membase, SE_GENI_STATUS); if ((geni_status & M_GENI_CMD_ACTIVE)) stop_tx_sequencer(&port->uport); ret = se_geni_resources_off(&port->serial_rsc); @@ -2587,9 +2615,6 @@ static int msm_geni_serial_runtime_resume(struct device *dev) goto exit_runtime_resume; } start_rx_sequencer(&port->uport); - if (!port->manual_flow) - geni_write_reg_nolog(0, port->uport.membase, - SE_UART_MANUAL_RFR); /* Ensure that the Rx is running before enabling interrupts */ mb(); if (pm_runtime_enabled(dev)) diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index bedd4bdec4ab972b9bd323f0c2d3d806811c22f6..57baa84ccf865da0b9bd4b8571b787734f19a0ab 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -865,15 +865,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) dma->rx_conf.direction = DMA_DEV_TO_MEM; dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH; - dma->rx_conf.src_maxburst = 16; + dma->rx_conf.src_maxburst = 1; dma->tx_conf.direction = DMA_MEM_TO_DEV; dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH; - if (dma_get_cache_alignment() >= 16) - dma->tx_conf.dst_maxburst = 16; - else - dma->tx_conf.dst_maxburst = 1; + dma->tx_conf.dst_maxburst = 1; dma->rx_chan = dma_request_chan(p->port.dev, "rx"); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8a58ee32ff618ce9f3260521ed1d7689a0c1598c..8bc8fe2b75f7a34cad43ac693a1faf7c4de8720d 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2669,8 +2669,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev) dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i], PTR_ERR(clk)); else - dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i], - clk, clk); + dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i], + clk, clk_get_rate(clk)); sci_port->clks[i] = IS_ERR(clk) ? NULL : clk; } return 0; @@ -2854,16 +2854,15 @@ static void serial_console_write(struct console *co, const char *s, unsigned long flags; int locked = 1; - local_irq_save(flags); #if defined(SUPPORT_SYSRQ) if (port->sysrq) locked = 0; else #endif if (oops_in_progress) - locked = spin_trylock(&port->lock); + locked = spin_trylock_irqsave(&port->lock, flags); else - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); /* first save SCSCR then disable interrupts, keep clock source */ ctrl = serial_port_in(port, SCSCR); @@ -2883,8 +2882,7 @@ static void serial_console_write(struct console *co, const char *s, serial_port_out(port, SCSCR, ctrl); if (locked) - spin_unlock(&port->lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&port->lock, flags); } static int serial_console_setup(struct console *co, char *options) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 377b3592384e0d9421c86866f5609dd567c8dcc3..4e6a3713fae0dc68375afbfc9425ef1e6d9accba 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -58,10 +58,11 @@ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; -static bool sysrq_on(void) +bool sysrq_on(void) { return sysrq_enabled || sysrq_always_enabled; } +EXPORT_SYMBOL(sysrq_on); /* * A value of 1 means 'all', other nonzero values are an op mask: diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b0eb86530af9f1f6878d683e4e8329cd64729971..5390b6894ccb08a7346cd8c50e24b5dd83ee09a1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4532,7 +4532,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, * reset. But only on the first attempt, * lest we get into a time out/reset loop */ - if (r == 0 || (r == -ETIMEDOUT && retries == 0)) + if (r == 0 || (r == -ETIMEDOUT && + retries == 0 && + udev->speed > USB_SPEED_FULL)) break; } udev->descriptor.bMaxPacketSize0 = diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index a6e2a21e5089190f72cd11e0699feb670e7580fa..5e3c665e5e7b6e709699f62f2b92b8f93bf28e43 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -132,6 +132,27 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) dwc3_writel(dwc->regs, DWC3_GCTL, reg); } +void dwc3_en_sleep_mode(struct dwc3 *dwc) +{ + u32 reg; + + if (dwc->dis_enblslpm_quirk) + return; + + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg |= DWC3_GUSB2PHYCFG_ENBLSLPM; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); +} + +void dwc3_dis_sleep_mode(struct dwc3 *dwc) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); +} + void dwc3_set_mode(struct dwc3 *dwc, u32 mode) { unsigned long flags; @@ -660,6 +681,7 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc) /* Detected DWC_usb31 IP */ dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER); dwc->revision |= DWC3_REVISION_IS_DWC31; + dwc->versiontype = dwc3_readl(dwc->regs, DWC3_VER_TYPE); } else { return false; } @@ -845,8 +867,48 @@ int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } + /* + * STAR: 9001346572:Host stops transfers to other EPs when a single + * USB2.0 EP NAKs continuously requires to disable internal retry + * feature + */ + if ((dwc->revision == DWC3_USB31_REVISION_170A) && + (dwc->versiontype == DWC3_USB31_VER_TYPE_GA)) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_USB20_RETRY_DISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } + dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT); + /* + * Workaround for STAR 9001198391 which affects dwc3 core + * version 3.20a only. Default HP timer value is incorrectly + * set to 3us. Reprogram HP timer value to support USB 3.1 + * HP timer ECN. + */ + if (!dwc3_is_usb31(dwc) && dwc->revision == DWC3_REVISION_320A) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); + reg &= ~DWC3_GUCTL2_HP_TIMER_MASK; + reg |= DWC3_GUCTL2_HP_TIMER(11); + dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); + } + + /* + * Workaround for STAR 9001285599 which affects dwc3 core version 3.20a + * only. If the PM TIMER ECN is enabled thru GUCTL2[19], then link + * compliance test (TD7.21) may fail. If the ECN is not enabled + * GUCTL2[19] = 0), the controller will use the old timer value (5us), + * which is still fine for Link Compliance test. Hence Do not enable + * PM TIMER ECN in V3.20a by setting GUCTL2[19] by default, + * instead use GUCTL2[19] = 0. + */ + if (dwc->revision == DWC3_REVISION_320A) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL2); + reg &= ~DWC3_GUCTL2_LC_TIMER; + dwc3_writel(dwc->regs, DWC3_GUCTL2, reg); + } + return 0; err3: diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 64e0fd0b33cf654fd70dccc4267dc1f972ca023a..aaf2311bb4fb5fdcda597e2bcf9ad482be0b3010 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -113,6 +113,7 @@ #define DWC3_GPRTBIMAP_FS0 0xc188 #define DWC3_GPRTBIMAP_FS1 0xc18c #define DWC3_GUCTL2 0xc19c +#define DWC3_GUCTL3 0xc60c #define DWC3_VER_NUMBER 0xc1a0 #define DWC3_VER_TYPE 0xc1a4 @@ -160,6 +161,11 @@ #define DWC3_OSTS 0xcc10 /* DWC 3.1 Link Registers */ +#define DWC31_LINK_LU3LFPSRXTIM(n) (0xd010 + ((n) * 0x80)) +#define GEN2_U3_EXIT_RSP_RX_CLK(n) ((n) << 16) +#define GEN2_U3_EXIT_RSP_RX_CLK_MASK GEN2_U3_EXIT_RSP_RX_CLK(0xff) +#define GEN1_U3_EXIT_RSP_RX_CLK(n) (n) +#define GEN1_U3_EXIT_RSP_RX_CLK_MASK GEN1_U3_EXIT_RSP_RX_CLK(0xff) #define DWC31_LINK_GDBGLTSSM 0xd050 /* Bit fields */ @@ -320,6 +326,12 @@ /* Global User Control Register 2 */ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) +#define DWC3_GUCTL2_HP_TIMER(n) ((n) << 21) +#define DWC3_GUCTL2_HP_TIMER_MASK DWC3_GUCTL2_HP_TIMER(0x1f) +#define DWC3_GUCTL2_LC_TIMER (1 << 19) + +/* Global User Control Register 3 */ +#define DWC3_GUCTL3_USB20_RETRY_DISABLE BIT(16) /* Device Configuration Register */ #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) @@ -1053,6 +1065,7 @@ struct dwc3 { #define DWC3_REVISION_290A 0x5533290a #define DWC3_REVISION_300A 0x5533300a #define DWC3_REVISION_310A 0x5533310a +#define DWC3_REVISION_320A 0x5533320a /* * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really @@ -1061,6 +1074,12 @@ struct dwc3 { #define DWC3_REVISION_IS_DWC31 0x80000000 #define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31) #define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31) +#define DWC3_USB31_REVISION_170A (0x3137302a | DWC3_REVISION_IS_DWC31) + + /* valid only for dwc31 configuraitons */ + u32 versiontype; +#define DWC3_USB31_VER_TYPE_EA06 0x65613036 +#define DWC3_USB31_VER_TYPE_GA 0x67612a2a enum dwc3_ep0_next ep0_next_event; enum dwc3_ep0_state ep0state; @@ -1322,6 +1341,8 @@ struct dwc3_gadget_ep_cmd_params { /* prototypes */ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); +void dwc3_en_sleep_mode(struct dwc3 *dwc); +void dwc3_dis_sleep_mode(struct dwc3 *dwc); void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 85142b7e5d7ed6bd842d397ccb61fb513252b09c..a4a5d7ee2686e63d5fef64bc033ec7ff0f8f016b 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -148,6 +148,14 @@ enum msm_usb_irq { USB_MAX_IRQ }; +enum bus_vote { + BUS_VOTE_NONE, + BUS_VOTE_NOMINAL, + BUS_VOTE_SVS, + BUS_VOTE_MIN, + BUS_VOTE_MAX +}; + struct usb_irq { char *name; int irq; @@ -224,6 +232,7 @@ struct dwc3_msm { unsigned int max_power; bool charging_disabled; enum usb_otg_state otg_state; + enum bus_vote override_bus_vote; u32 bus_perf_client; struct msm_bus_scale_pdata *bus_scale_table; struct power_supply *usb_psy; @@ -2113,60 +2122,39 @@ static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc, } } -enum bus_vote { - BUS_VOTE_INVALID, - BUS_VOTE_SUSPEND, - BUS_VOTE_NOMINAL, - BUS_VOTE_SVS -}; - static int dwc3_msm_update_bus_bw(struct dwc3_msm *mdwc, enum bus_vote bv) { int ret = 0; struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); + unsigned int bv_index = mdwc->override_bus_vote ?: bv; if (!mdwc->bus_perf_client) return 0; dbg_event(0xFF, "bus_vote_start", bv); - switch (bv) { - case BUS_VOTE_SVS: - /* On some platforms SVS does not have separate vote. Vote for - * nominal if svs usecase does not exist - */ - if (mdwc->bus_scale_table->num_usecases == 2) - goto nominal_vote; + /* On some platforms SVS does not have separate vote. + * Vote for nominal if svs usecase does not exist. + * If the request is to set the bus_vote to _NONE, + * set it to _NONE irrespective of the requested vote + * from userspace. + */ + if (bv >= mdwc->bus_scale_table->num_usecases) + bv_index = BUS_VOTE_NOMINAL; + else if (bv == BUS_VOTE_NONE) + bv_index = BUS_VOTE_NONE; - /* index starts from zero */ - ret = msm_bus_scale_client_update_request( - mdwc->bus_perf_client, 2); - if (ret) - dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); - break; - case BUS_VOTE_NOMINAL: -nominal_vote: - ret = msm_bus_scale_client_update_request( - mdwc->bus_perf_client, 1); - if (ret) - dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); - break; - case BUS_VOTE_SUSPEND: - ret = msm_bus_scale_client_update_request( - mdwc->bus_perf_client, 0); - if (ret) - dev_err(mdwc->dev, "bus bw voting failed %d\n", ret); - break; - default: - dev_err(mdwc->dev, "Unsupported bus vote:%d\n", bv); - ret = -EINVAL; - } + ret = msm_bus_scale_client_update_request( + mdwc->bus_perf_client, bv_index); + if (ret) + dev_err(mdwc->dev, "bus bw voting %d failed %d\n", + bv_index, ret); - dbg_event(0xFF, "bus_vote_end", bv); + dbg_event(0xFF, "bus_vote_end", bv_index); return ret; - } + static int dwc3_msm_suspend(struct dwc3_msm *mdwc) { int ret; @@ -2298,7 +2286,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) } } - dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_SUSPEND); + dwc3_msm_update_bus_bw(mdwc, BUS_VOTE_NONE); /* * release wakeup source with timeout to defer system suspend to @@ -2468,8 +2456,7 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) /* Disable HSPHY auto suspend */ dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0), dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) & - ~(DWC3_GUSB2PHYCFG_ENBLSLPM | - DWC3_GUSB2PHYCFG_SUSPHY)); + ~DWC3_GUSB2PHYCFG_SUSPHY); /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */ if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) { @@ -2725,8 +2712,11 @@ static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc) int ret; mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC"); - if (IS_ERR(mdwc->dwc3_gdsc)) + if (IS_ERR(mdwc->dwc3_gdsc)) { + if (PTR_ERR(mdwc->dwc3_gdsc) == -EPROBE_DEFER) + return PTR_ERR(mdwc->dwc3_gdsc); mdwc->dwc3_gdsc = NULL; + } mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo"); if (IS_ERR(mdwc->xo_clk)) { @@ -2917,9 +2907,17 @@ static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc) struct extcon_dev *edev; int idx, extcon_cnt, ret = 0; bool check_vbus_state, check_id_state, phandle_found = false; + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); - if (!of_property_read_bool(node, "extcon")) + if (!of_property_read_bool(node, "extcon")) { + if (dwc->dr_mode == USB_DR_MODE_OTG) { + dev_dbg(mdwc->dev, "%s: no extcon, simulate vbus connect\n", + __func__); + mdwc->vbus_active = true; + queue_work(mdwc->dwc3_wq, &mdwc->resume_work); + } return 0; + } extcon_cnt = of_count_phandle_with_args(node, "extcon", NULL); if (extcon_cnt < 0) { @@ -3146,6 +3144,65 @@ static ssize_t usb_compliance_mode_store(struct device *dev, } static DEVICE_ATTR_RW(usb_compliance_mode); +static ssize_t bus_vote_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc3_msm *mdwc = dev_get_drvdata(dev); + + if (mdwc->override_bus_vote == BUS_VOTE_MIN) + return snprintf(buf, PAGE_SIZE, "%s\n", + "Fixed bus vote: min"); + else if (mdwc->override_bus_vote == BUS_VOTE_MAX) + return snprintf(buf, PAGE_SIZE, "%s\n", + "Fixed bus vote: max"); + else + return snprintf(buf, PAGE_SIZE, "%s\n", + "Do not have fixed bus vote"); +} + +static ssize_t bus_vote_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct dwc3_msm *mdwc = dev_get_drvdata(dev); + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); + bool bv_fixed = false; + enum bus_vote bv; + + if (sysfs_streq(buf, "min") + && (mdwc->bus_scale_table->num_usecases + >= (BUS_VOTE_MIN + 1))) { + bv_fixed = true; + mdwc->override_bus_vote = BUS_VOTE_MIN; + } else if (sysfs_streq(buf, "max") + && (mdwc->bus_scale_table->num_usecases + >= (BUS_VOTE_MAX + 1))) { + bv_fixed = true; + mdwc->override_bus_vote = BUS_VOTE_MAX; + } else if (sysfs_streq(buf, "cancel")) { + bv_fixed = false; + mdwc->override_bus_vote = BUS_VOTE_NONE; + } else { + dev_err(dev, "min/max/cancel only.\n"); + return -EINVAL; + } + + /* Update bus vote value only when not suspend */ + if (!atomic_read(&dwc->in_lpm)) { + if (bv_fixed) + bv = mdwc->override_bus_vote; + else if (mdwc->in_host_mode + && (mdwc->max_rh_port_speed == USB_SPEED_HIGH)) + bv = BUS_VOTE_SVS; + else + bv = BUS_VOTE_NOMINAL; + + dwc3_msm_update_bus_bw(mdwc, bv); + } + + return count; +} +static DEVICE_ATTR_RW(bus_vote); static int dwc3_msm_probe(struct platform_device *pdev) { @@ -3411,6 +3468,7 @@ static int dwc3_msm_probe(struct platform_device *pdev) device_create_file(&pdev->dev, &dev_attr_mode); device_create_file(&pdev->dev, &dev_attr_speed); device_create_file(&pdev->dev, &dev_attr_usb_compliance_mode); + device_create_file(&pdev->dev, &dev_attr_bus_vote); host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST; if (host_mode) { @@ -3664,6 +3722,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) usb_register_notify(&mdwc->host_nb); dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_en_sleep_mode(dwc); mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify; usb_register_atomic_notify(&mdwc->usbdev_nb); ret = dwc3_host_init(dwc); @@ -3686,6 +3745,21 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) mdwc->in_host_mode = true; dwc3_usb3_phy_suspend(dwc, true); + /* Reduce the U3 exit handshake timer from 8us to approximately + * 300ns to avoid lfps handshake interoperability issues + */ + if (dwc->revision == DWC3_USB31_REVISION_170A) { + dwc3_msm_write_reg_field(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0), + GEN2_U3_EXIT_RSP_RX_CLK_MASK, 6); + dwc3_msm_write_reg_field(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0), + GEN1_U3_EXIT_RSP_RX_CLK_MASK, 5); + dev_dbg(mdwc->dev, "LU3:%08x\n", + dwc3_msm_read_reg(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0))); + } + /* xHCI should have incremented child count as necessary */ dbg_event(0xFF, "StrtHost psync", atomic_read(&mdwc->dev->power.usage_count)); @@ -3791,7 +3865,24 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on) */ dwc3_msm_block_reset(mdwc, false); dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_dis_sleep_mode(dwc); mdwc->in_device_mode = true; + + /* Reduce the U3 exit handshake timer from 8us to approximately + * 300ns to avoid lfps handshake interoperability issues + */ + if (dwc->revision == DWC3_USB31_REVISION_170A) { + dwc3_msm_write_reg_field(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0), + GEN2_U3_EXIT_RSP_RX_CLK_MASK, 6); + dwc3_msm_write_reg_field(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0), + GEN1_U3_EXIT_RSP_RX_CLK_MASK, 5); + dev_dbg(mdwc->dev, "LU3:%08x\n", + dwc3_msm_read_reg(mdwc->base, + DWC31_LINK_LU3LFPSRXTIM(0))); + } + usb_gadget_vbus_connect(&dwc->gadget); #ifdef CONFIG_SMP mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ; diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index e4b82bdcd006b1779c6a7d53de2bcaf153a90043..00cd200665f6d908a39a746d04e39763f3ef8da4 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -682,12 +682,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) DWC3_ENDPOINTS_NUM); num += 2) { dep = dwc->eps[num]; size = 0; - /* Don't change TXFRAMNUM on usb31 version */ - if (dwc3_is_usb31(dwc)) - size = dwc3_readl(dwc->regs, - DWC3_GTXFIFOSIZ(num >> 1)) & - DWC31_GTXFIFOSIZ_TXFRAMNUM; - dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size); dep->fifo_depth = 0; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 94a84547699c326aae1dd1fd66851e1ac29a4ba1..178981ad587537e05d83d08d39a0886be7f6118e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -225,14 +225,15 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) /* MDWIDTH is represented in bits, we need it in bytes */ mdwidth >>= 3; - if (dep->endpoint.ep_type == EP_TYPE_GSI || dep->endpoint.endless) - mult = 3; - if (((dep->endpoint.maxburst > 1) && usb_endpoint_xfer_bulk(dep->endpoint.desc)) || usb_endpoint_xfer_isoc(dep->endpoint.desc)) mult = 3; + if ((dep->endpoint.maxburst > 2) && + dep->endpoint.ep_type == EP_TYPE_GSI) + mult = 6; + tmp = ((max_packet + mdwidth) * mult) + mdwidth; fifo_size = DIV_ROUND_UP(tmp, mdwidth); dep->fifo_depth = fifo_size; @@ -270,6 +271,11 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) return -ENOMEM; } + if ((dwc->revision == DWC3_USB31_REVISION_170A) && + (dwc->versiontype == DWC3_USB31_VER_TYPE_EA06) && + usb_endpoint_xfer_isoc(dep->endpoint.desc)) + fifo_size |= DWC31_GTXFIFOSIZ_TXFRAMNUM; + dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->endpoint.ep_num), fifo_size); return 0; @@ -2183,7 +2189,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) * Per databook, when we want to stop the gadget, if a control transfer * is still in process, complete it and get the core into setup phase. */ - if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) { + if (!is_on && (dwc->ep0state != EP0_SETUP_PHASE || + dwc->ep0_next_event != DWC3_EP0_COMPLETE)) { reinit_completion(&dwc->ep0_in_setup); ret = wait_for_completion_timeout(&dwc->ep0_in_setup, @@ -3082,7 +3089,8 @@ void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force) dep->flags &= ~DWC3_EP_BUSY; if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) { - dep->flags |= DWC3_EP_END_TRANSFER_PENDING; + if (dep->endpoint.ep_type != EP_TYPE_GSI) + dep->flags |= DWC3_EP_END_TRANSFER_PENDING; udelay(100); } } diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 085f6f4f3552be00ae74084763dc19558f7bd89e..0cd823c3d43124185c7eeb0abfef78e96c33c67b 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -142,21 +142,28 @@ struct gadget_config_name { struct list_head list; }; +#define MAX_USB_STRING_LEN 126 +#define MAX_USB_STRING_WITH_NULL_LEN (MAX_USB_STRING_LEN+1) + static int usb_string_copy(const char *s, char **s_copy) { int ret; char *str; char *copy = *s_copy; ret = strlen(s); - if (ret > 126) + if (ret > MAX_USB_STRING_LEN) return -EOVERFLOW; - str = kstrdup(s, GFP_KERNEL); - if (!str) - return -ENOMEM; + if (copy) { + str = copy; + } else { + str = kmalloc(MAX_USB_STRING_WITH_NULL_LEN, GFP_KERNEL); + if (!str) + return -ENOMEM; + } + strncpy(str, s, MAX_USB_STRING_WITH_NULL_LEN); if (str[ret - 1] == '\n') str[ret - 1] = '\0'; - kfree(copy); *s_copy = str; return 0; } diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 7aa2656a2328b84cfa55cd3dc10fab7427a13c10..1242ba78111efa04f1a52b4407ff9817ad3588ef 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -567,8 +567,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf, { struct acc_dev *dev = fp->private_data; struct usb_request *req; - ssize_t r = count; - unsigned xfer; + ssize_t r = count, xfer, len; int ret = 0; pr_debug("acc_read(%zu)\n", count); @@ -589,6 +588,8 @@ static ssize_t acc_read(struct file *fp, char __user *buf, goto done; } + len = ALIGN(count, dev->ep_out->maxpacket); + if (dev->rx_done) { // last req cancelled. try to get it. req = dev->rx_req[0]; @@ -598,7 +599,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf, requeue_req: /* queue a request */ req = dev->rx_req[0]; - req->length = count; + req->length = len; dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); if (ret < 0) { @@ -906,6 +907,8 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, memset(dev->serial, 0, sizeof(dev->serial)); dev->start_requested = 0; dev->audio_mode = 0; + strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE); + strlcpy(dev->model, "Android", ACC_STRING_SIZE); } } @@ -1208,13 +1211,13 @@ static int acc_setup(void) INIT_DELAYED_WORK(&dev->start_work, acc_start_work); INIT_WORK(&dev->hid_work, acc_hid_work); - /* _acc_dev must be set before calling usb_gadget_register_driver */ - _acc_dev = dev; - ret = misc_register(&acc_device); if (ret) goto err; + /* _acc_dev must be set before calling usb_gadget_register_driver */ + _acc_dev = dev; + return 0; err: diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index fbd79142c8a21ab8920da148858513035271ff14..09a1430c84a5edcbccf9b9b536de143164a413ab 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -1251,6 +1251,7 @@ ssize_t f_cdev_write(struct file *file, ret = -EFAULT; } else { req->length = xfer_size; + req->zero = 1; ret = usb_ep_queue(in, req, GFP_KERNEL); if (ret) { pr_err("EP QUEUE failed:%d\n", ret); diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index e8b96378a50d185a3c00d1ea5db9425fd32c35ee..3717ab5270087f219394293b7cf28374f73db34c 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -676,6 +676,11 @@ static void ipa_work_handler(struct work_struct *w) log_event_dbg("%s: ST_CON_IN_PROG_EVT_HOST_READY", __func__); } else if (event == EVT_CONNECTED) { + if (peek_event(d_port) == EVT_SUSPEND) { + log_event_dbg("%s: ST_CON_IN_PROG_EVT_SUSPEND", + __func__); + break; + } ipa_data_path_enable(d_port); d_port->sm_state = STATE_CONNECTED; log_event_dbg("%s: ST_CON_IN_PROG_EVT_CON %d", @@ -2569,7 +2574,7 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f) info.ss_desc_hdr = gsi_eth_ss_function; info.in_epname = "gsi-epin"; info.out_epname = "gsi-epout"; - info.in_req_buf_len = GSI_IN_BUFF_SIZE; + info.in_req_buf_len = GSI_IN_RNDIS_BUFF_SIZE; gsi->d_port.in_aggr_size = GSI_IN_RNDIS_AGGR_SIZE; info.in_req_num_buf = GSI_NUM_IN_RNDIS_BUFFERS; gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE; @@ -2758,8 +2763,8 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f) info.in_epname = "gsi-epin"; info.out_epname = "gsi-epout"; gsi->d_port.in_aggr_size = GSI_IN_RMNET_AGGR_SIZE; - info.in_req_buf_len = GSI_IN_BUFF_SIZE; - info.in_req_num_buf = GSI_NUM_IN_BUFFERS; + info.in_req_buf_len = GSI_IN_RMNET_BUFF_SIZE; + info.in_req_num_buf = GSI_NUM_IN_RMNET_BUFFERS; gsi->d_port.out_aggr_size = GSI_OUT_AGGR_SIZE; info.out_req_buf_len = GSI_OUT_RMNET_BUF_LEN; info.out_req_num_buf = GSI_NUM_OUT_BUFFERS; diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 19f872b44f8ef706ea06f67eb26c863a23d277a5..e9ff9341fdbdf31fefdb12b09fac5fbcc379663e 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -35,8 +35,11 @@ #define GSI_CTRL_DTR (1 << 0) #define GSI_NUM_IN_RNDIS_BUFFERS 50 +#define GSI_NUM_IN_RMNET_BUFFERS 50 #define GSI_NUM_IN_BUFFERS 15 #define GSI_IN_BUFF_SIZE 2048 +#define GSI_IN_RMNET_BUFF_SIZE 31744 +#define GSI_IN_RNDIS_BUFF_SIZE 16384 #define GSI_NUM_OUT_BUFFERS 14 #define GSI_OUT_AGGR_SIZE 24576 diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index ea0da35a44e2e9f3f2c0e11921568da68b70fafd..e6d4fa5eeff106ecc19138c93b624e803a1c8021 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -635,19 +635,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return -EAGAIN; } + list_add(&req->list, &dev->tx_reqs_active); + /* here, we unlock, and only unlock, to avoid deadlock. */ spin_unlock(&dev->lock); value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC); spin_lock(&dev->lock); if (value) { + list_del(&req->list); list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->lock, flags); mutex_unlock(&dev->lock_printer_io); return -EAGAIN; } - - list_add(&req->list, &dev->tx_reqs_active); - } spin_unlock_irqrestore(&dev->lock, flags); diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c index fbc8cc49ae2042efe91b430db00a42f4a94b9238..4313fa8790b261d3978f605629881989704496e1 100644 --- a/drivers/usb/gadget/function/f_qdss.c +++ b/drivers/usb/gadget/function/f_qdss.c @@ -117,6 +117,40 @@ static struct usb_ss_ep_comp_descriptor qdss_ctrl_out_ep_comp_desc = { .wBytesPerInterval = 0, }; +/* Full speed support */ +static struct usb_endpoint_descriptor qdss_fs_data_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor qdss_fs_ctrl_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor qdss_fs_ctrl_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(64), +}; + +static struct usb_descriptor_header *qdss_fs_desc[] = { + (struct usb_descriptor_header *) &qdss_data_intf_desc, + (struct usb_descriptor_header *) &qdss_fs_data_desc, + (struct usb_descriptor_header *) &qdss_ctrl_intf_desc, + (struct usb_descriptor_header *) &qdss_fs_ctrl_in_desc, + (struct usb_descriptor_header *) &qdss_fs_ctrl_out_desc, + NULL, +}; + static struct usb_descriptor_header *qdss_hs_desc[] = { (struct usb_descriptor_header *) &qdss_data_intf_desc, (struct usb_descriptor_header *) &qdss_hs_data_desc, @@ -138,6 +172,12 @@ static struct usb_descriptor_header *qdss_ss_desc[] = { NULL, }; +static struct usb_descriptor_header *qdss_fs_data_only_desc[] = { + (struct usb_descriptor_header *) &qdss_data_intf_desc, + (struct usb_descriptor_header *) &qdss_fs_data_desc, + NULL, +}; + static struct usb_descriptor_header *qdss_hs_data_only_desc[] = { (struct usb_descriptor_header *) &qdss_data_intf_desc, (struct usb_descriptor_header *) &qdss_hs_data_desc, @@ -374,11 +414,6 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) pr_debug("%s\n", __func__); - if (!gadget_is_dualspeed(gadget) && !gadget_is_superspeed(gadget)) { - pr_err("%s: full-speed is not supported\n", __func__); - return -ENOTSUPP; - } - /* Allocate data I/F */ iface = usb_interface_id(c, f); if (iface < 0) { @@ -443,6 +478,16 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) ep->driver_data = qdss; } + /*update fs descriptors*/ + qdss_fs_data_desc.bEndpointAddress = + qdss_ss_data_desc.bEndpointAddress; + if (qdss->debug_inface_enabled) { + qdss_fs_ctrl_in_desc.bEndpointAddress = + qdss_ss_ctrl_in_desc.bEndpointAddress; + qdss_fs_ctrl_out_desc.bEndpointAddress = + qdss_ss_ctrl_out_desc.bEndpointAddress; + } + /*update descriptors*/ qdss_hs_data_desc.bEndpointAddress = qdss_ss_data_desc.bEndpointAddress; @@ -454,10 +499,10 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f) } if (qdss->debug_inface_enabled) - ret = usb_assign_descriptors(f, qdss_hs_desc, qdss_hs_desc, + ret = usb_assign_descriptors(f, qdss_fs_desc, qdss_hs_desc, qdss_ss_desc, qdss_ss_desc); else - ret = usb_assign_descriptors(f, qdss_hs_data_only_desc, + ret = usb_assign_descriptors(f, qdss_fs_data_only_desc, qdss_hs_data_only_desc, qdss_ss_data_only_desc, qdss_ss_data_only_desc); @@ -636,7 +681,7 @@ static int qdss_set_alt(struct usb_function *f, unsigned int intf, goto fail1; } - if (intf == qdss->data_iface_id) { + if (intf == qdss->data_iface_id && !qdss->data_enabled) { /* Increment usage count on connect */ usb_gadget_autopm_get_async(qdss->gadget); @@ -1138,7 +1183,7 @@ static struct usb_function *qdss_alloc(struct usb_function_instance *fi) struct f_qdss *usb_qdss = opts->usb_qdss; usb_qdss->port.function.name = "usb_qdss"; - usb_qdss->port.function.fs_descriptors = qdss_hs_desc; + usb_qdss->port.function.fs_descriptors = qdss_fs_desc; usb_qdss->port.function.hs_descriptors = qdss_hs_desc; usb_qdss->port.function.strings = qdss_strings; usb_qdss->port.function.bind = qdss_bind; diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index 6b457fbf610a3532443cf0c3f432a48f9fb2aa86..c14b02aa1ade1705358c6173d58bdb85d4e20bae 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -752,6 +752,27 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis_data_intf.bInterfaceNumber = status; rndis_union_desc.bSlaveInterface0 = status; + if (rndis_opts->wceis) { + /* "Wireless" RNDIS; auto-detected by Windows */ + rndis_iad_descriptor.bFunctionClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_iad_descriptor.bFunctionSubClass = 0x01; + rndis_iad_descriptor.bFunctionProtocol = 0x03; + rndis_control_intf.bInterfaceClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_control_intf.bInterfaceSubClass = 0x01; + rndis_control_intf.bInterfaceProtocol = 0x03; + } else { + rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM; + rndis_iad_descriptor.bFunctionSubClass = + USB_CDC_SUBCLASS_ETHERNET; + rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE; + rndis_control_intf.bInterfaceClass = USB_CLASS_COMM; + rndis_control_intf.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM; + rndis_control_intf.bInterfaceProtocol = + USB_CDC_ACM_PROTO_VENDOR; + } + status = -ENODEV; /* allocate instance-specific endpoints */ @@ -890,6 +911,9 @@ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, subclass); /* f_rndis_opts_protocol */ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, protocol); +/* f_rndis_opts_wceis */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis); + static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_dev_addr, &rndis_opts_attr_host_addr, @@ -898,6 +922,7 @@ static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_class, &rndis_opts_attr_subclass, &rndis_opts_attr_protocol, + &rndis_opts_attr_wceis, NULL, }; @@ -962,6 +987,9 @@ static struct usb_function_instance *rndis_alloc_inst(void) } opts->rndis_interf_group = rndis_interf_group; + /* Enable "Wireless" RNDIS by default */ + opts->wceis = true; + return &opts->func_inst; } diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 97cb2dfd6369751a5d03f5cbcce78b6e41f54154..6355dd442c1381cdbf4a1820fe84e485f625e9cb 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -18,6 +18,9 @@ #include "u_audio.h" #include "u_uac2.h" +/* Keep everyone on toes */ +#define USB_XFERS 8 + /* * The driver implements a simple UAC_2 topology. * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture @@ -292,6 +295,13 @@ static struct usb_endpoint_descriptor hs_epout_desc = { .bInterval = 4, }; +static struct usb_ss_ep_comp_descriptor ss_epout_comp_desc = { + .bLength = sizeof(ss_epout_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .wBytesPerInterval = cpu_to_le16(1024), +}; + /* CS AS ISO OUT Endpoint */ static struct uac2_iso_endpoint_descriptor as_iso_out_desc = { .bLength = sizeof as_iso_out_desc, @@ -369,6 +379,13 @@ static struct usb_endpoint_descriptor hs_epin_desc = { .bInterval = 4, }; +static struct usb_ss_ep_comp_descriptor ss_epin_comp_desc = { + .bLength = sizeof(ss_epin_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .wBytesPerInterval = cpu_to_le16(1024), +}; + /* CS AS ISO IN Endpoint */ static struct uac2_iso_endpoint_descriptor as_iso_in_desc = { .bLength = sizeof as_iso_in_desc, @@ -441,6 +458,38 @@ static struct usb_descriptor_header *hs_audio_desc[] = { NULL, }; +static struct usb_descriptor_header *ss_audio_desc[] = { + (struct usb_descriptor_header *)&iad_desc, + (struct usb_descriptor_header *)&std_ac_if_desc, + + (struct usb_descriptor_header *)&ac_hdr_desc, + (struct usb_descriptor_header *)&in_clk_src_desc, + (struct usb_descriptor_header *)&out_clk_src_desc, + (struct usb_descriptor_header *)&usb_out_it_desc, + (struct usb_descriptor_header *)&io_in_it_desc, + (struct usb_descriptor_header *)&usb_in_ot_desc, + (struct usb_descriptor_header *)&io_out_ot_desc, + + (struct usb_descriptor_header *)&std_as_out_if0_desc, + (struct usb_descriptor_header *)&std_as_out_if1_desc, + + (struct usb_descriptor_header *)&as_out_hdr_desc, + (struct usb_descriptor_header *)&as_out_fmt1_desc, + (struct usb_descriptor_header *)&hs_epout_desc, + (struct usb_descriptor_header *)&ss_epout_comp_desc, + (struct usb_descriptor_header *)&as_iso_out_desc, + + (struct usb_descriptor_header *)&std_as_in_if0_desc, + (struct usb_descriptor_header *)&std_as_in_if1_desc, + + (struct usb_descriptor_header *)&as_in_hdr_desc, + (struct usb_descriptor_header *)&as_in_fmt1_desc, + (struct usb_descriptor_header *)&hs_epin_desc, + (struct usb_descriptor_header *)&ss_epin_comp_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + struct cntrl_cur_lay3 { __u32 dCUR; }; @@ -531,6 +580,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) iad_desc.bFirstInterface = ret; std_ac_if_desc.bInterfaceNumber = ret; + iad_desc.bFirstInterface = ret; uac2->ac_intf = ret; uac2->ac_alt = 0; @@ -582,8 +632,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; - ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL, - NULL); + ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, + ss_audio_desc, NULL); if (ret) return ret; @@ -1011,6 +1061,19 @@ static struct usb_function *afunc_alloc(struct usb_function_instance *fi) } DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc); + +static int afunc_init(void) +{ + return usb_function_register(&uac2usb_func); +} +module_init(afunc_init); + +static void __exit afunc_exit(void) +{ + usb_function_unregister(&uac2usb_func); +} +module_exit(afunc_exit); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yadwinder Singh"); MODULE_AUTHOR("Jaswinder Singh"); diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h index e4c3f84af4c37d86fb0ca0905011dabbad751775..cea4a0abeaaea31fadfaf3b35584da963b038b8e 100644 --- a/drivers/usb/gadget/function/u_ether_configfs.h +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -188,4 +188,50 @@ out: \ \ CONFIGFS_ATTR(_f_##_opts_, _n_) +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_) \ + static ssize_t _f_##_opts_wceis_show(struct config_item *item, \ + char *page) \ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + wceis = opts->wceis; \ + mutex_unlock(&opts->lock); \ + return snprintf(page, PAGE_SIZE, "%d", wceis); \ + } \ + \ + static ssize_t _f_##_opts_wceis_store(struct config_item *item, \ + const char *page, size_t len)\ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + int ret; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + \ + ret = kstrtobool(page, &wceis); \ + if (ret) \ + goto out; \ + \ + opts->wceis = wceis; \ + ret = len; \ +out: \ + mutex_unlock(&opts->lock); \ + \ + return ret; \ + } \ + \ + CONFIGFS_ATTR(_f_##_opts_, wceis) + #endif /* __U_ETHER_CONFIGFS_H */ diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index efdb7ac381d973c6cca8f4665ea4ab9ba0083294..9a183914dd0034c6de86fac6b20d110df57bcd02 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h @@ -42,6 +42,9 @@ struct f_rndis_opts { */ struct mutex lock; int refcnt; + + /* "Wireless" RNDIS; auto-detected by Windows */ + bool wceis; }; void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net); diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h index 19eeb83538a5adcb951bcb63e5fc764608dd8019..f2d1a2b83eff74ade6a952e2398151ef55e5fa19 100644 --- a/drivers/usb/gadget/function/u_uac2.h +++ b/drivers/usb/gadget/function/u_uac2.h @@ -22,7 +22,7 @@ #define UAC2_DEF_PSRATE 48000 #define UAC2_DEF_PSSIZE 2 #define UAC2_DEF_CCHMASK 0x3 -#define UAC2_DEF_CSRATE 64000 +#define UAC2_DEF_CSRATE 44100 #define UAC2_DEF_CSSIZE 2 #define UAC2_DEF_REQ_NUM 2 diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 6b3e8adb64e6832f1a5c9bde3080ecf09ac8370a..4cfa72cb0a91443ffe3ca1d9446b8f8dda380c1b 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -623,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3) usb3_usb2_pullup(usb3, 0); usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON); usb3_reset_epc(usb3); + usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP | + USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE | + USB_INT_1_SPEED | USB_INT_1_B3_WRMRST | + USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND | + USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST); + usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON); + usb3_init_epc_registers(usb3); if (usb3->driver) usb3->driver->disconnect(&usb3->gadget); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 4fa503a37c7bc34a5496ed779efa9dd8dfa29962..b52c8955209b94dfdbfd964f0ab0fcbcc16d676e 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -437,35 +437,6 @@ static int xhci_plat_remove(struct platform_device *dev) return 0; } -static int __maybe_unused xhci_plat_suspend(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - /* - * xhci_suspend() needs `do_wakeup` to know whether host is allowed - * to do wakeup during suspend. Since xhci_plat_suspend is currently - * only designed for system suspend, device_may_wakeup() is enough - * to dertermine whether host is allowed to do wakeup. Need to - * reconsider this when xhci_plat_suspend enlarges its scope, e.g., - * also applies to runtime suspend. - */ - return xhci_suspend(xhci, device_may_wakeup(dev)); -} - -static int __maybe_unused xhci_plat_resume(struct device *dev) -{ - struct usb_hcd *hcd = dev_get_drvdata(dev); - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int ret; - - ret = xhci_priv_resume_quirk(hcd); - if (ret) - return ret; - - return xhci_resume(xhci, 0); -} - static int __maybe_unused xhci_plat_runtime_idle(struct device *dev) { /* @@ -507,6 +478,10 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) dev_dbg(dev, "xhci-plat runtime resume\n"); + ret = xhci_priv_resume_quirk(hcd); + if (ret) + return ret; + ret = xhci_resume(xhci, false); pm_runtime_mark_last_busy(dev); @@ -514,7 +489,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev) } static const struct dev_pm_ops xhci_plat_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) + SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL) SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7dbc66fb514f05299c61b27148249c46c7ae84f5..83d54232ebc7bdd7681389ca40f8a043175a9775 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3559,6 +3559,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); } + virt_dev->udev = NULL; xhci_disable_slot(xhci, udev->slot_id); /* * Event command completion handler will free any data structures diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 0f9f25db91630433c8e46f4a8066dedae17a1ce0..740db57464fc21814afc0102bd13d7aeda0b55c5 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -275,3 +275,9 @@ config USB_CHAOSKEY To compile this driver as a module, choose M here: the module will be called chaoskey. + +config USB_REDRIVER_NB7VPQ904M + tristate "USB 3.1 Gen1/Gen2 10Gbps re-driver driver for NB7VPQ904M" + depends on USB_PHY + help + Say Y here if you want to support USB super speed re-driver NB7VPQ904M. diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 109f54f5b9aa4b8ecc69c9f301e6ff3d668cfd93..4f7d20242470f7e9b7057f2d97512f7bb8225291 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -31,3 +31,4 @@ obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o +obj-$(CONFIG_USB_REDRIVER_NB7VPQ904M) += ssusb-redriver-nb7vpq904m.o diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c new file mode 100644 index 0000000000000000000000000000000000000000..df69da4a697d804a48bf32c64d683dfeedaf3ee9 --- /dev/null +++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c @@ -0,0 +1,1079 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* priority: INT_MAX >= x >= 0 */ +#define NOTIFIER_PRIORITY 1 + +/* Registers Address */ +#define GEN_DEV_SET_REG 0x00 +#define CHIP_VERSION_REG 0x17 + +#define REDRIVER_REG_MAX 0x1f + +#define EQ_SET_REG_BASE 0x01 +#define FLAT_GAIN_REG_BASE 0x18 +#define OUT_COMP_AND_POL_REG_BASE 0x02 +#define LOSS_MATCH_REG_BASE 0x19 + +/* Default Register Value */ +#define GEN_DEV_SET_REG_DEFAULT 0xFB + +/* Register bits */ +/* General Device Settings Register Bits */ +#define CHIP_EN BIT(0) +#define CHNA_EN BIT(4) +#define CHNB_EN BIT(5) +#define CHNC_EN BIT(6) +#define CHND_EN BIT(7) + +#define CHANNEL_NUM 4 + +#define OP_MODE_SHIFT 1 + +#define EQ_SETTING_MASK 0x07 +#define OUTPUT_COMPRESSION_MASK 0x03 +#define LOSS_MATCH_MASK 0x03 +#define FLAT_GAIN_MASK 0x03 + +#define EQ_SETTING_SHIFT 0x01 +#define OUTPUT_COMPRESSION_SHIFT 0x01 +#define LOSS_MATCH_SHIFT 0x00 +#define FLAT_GAIN_SHIFT 0x00 + +/* for type c cable */ +enum plug_orientation { + ORIENTATION_NONE, + ORIENTATION_CC1, + ORIENTATION_CC2, +}; + +/* + * Three Modes of Operations: + * - One/Two ports of USB 3.1 Gen1/Gen2 (Default Mode) + * - Two lanes of DisplayPort 1.4 + One port of USB 3.1 Gen1/Gen2 + * - Four lanes of DisplayPort 1.4 + */ +enum operation_mode { + OP_MODE_USB, /* One/Two ports of USB */ + OP_MODE_DP, /* DP 4 Lane and DP 2 Lane */ + OP_MODE_USB_AND_DP, /* One port of USB and DP 2 Lane */ +}; + +/** + * struct ssusb_redriver - representation of USB re-driver + * @dev: struct device pointer + * @regmap: used for I2C communication on accessing registers + * @client: i2c client structure pointer + * @config_work: used to configure re-driver + * @redriver_wq: work queue used for @config_work + * @usb_psy: structure that holds USB power supply status + * @host_active: used to indicate USB host mode is enabled or not + * @vbus_active: used to indicate USB device mode is enabled or not + * @is_usb3: used to indicate USB3 or not + * @typec_orientation: used to inditate Type C orientation + * @op_mode: used to store re-driver operation mode + * @extcon_usb: external connector used for USB host/device mode + * @extcon_dp: external connector used for DP + * @vbus_nb: used for vbus event reception + * @id_nb: used for id event reception + * @dp_nb: used for DP event reception + * @panic_nb: used for panic event reception + * @eq: equalization register value + * @output_comp: output compression register value + * @loss_match: loss profile matching control register value + * @flat_gain: flat gain control register value + * @debug_root: debugfs entry for this context + */ +struct ssusb_redriver { + struct device *dev; + struct regmap *regmap; + struct i2c_client *client; + + struct work_struct config_work; + struct workqueue_struct *redriver_wq; + + struct power_supply *usb_psy; + bool host_active; + bool vbus_active; + bool is_usb3; + enum plug_orientation typec_orientation; + enum operation_mode op_mode; + + struct extcon_dev *extcon_usb; + struct extcon_dev *extcon_dp; + struct notifier_block vbus_nb; + struct notifier_block id_nb; + struct notifier_block dp_nb; + + struct notifier_block panic_nb; + + u8 eq[CHANNEL_NUM]; + u8 output_comp[CHANNEL_NUM]; + u8 loss_match[CHANNEL_NUM]; + u8 flat_gain[CHANNEL_NUM]; + + struct dentry *debug_root; +}; + +static void ssusb_redriver_debugfs_entries(struct ssusb_redriver *redriver); + +static int redriver_i2c_reg_get(struct ssusb_redriver *redriver, + u8 reg, u8 *val) +{ + int ret; + unsigned int val_tmp; + + ret = regmap_read(redriver->regmap, (unsigned int)reg, &val_tmp); + if (ret < 0) { + dev_err(redriver->dev, "reading reg 0x%02x failure\n", reg); + return ret; + } + + *val = (u8)val_tmp; + + dev_dbg(redriver->dev, "reading reg 0x%02x=0x%02x\n", reg, *val); + + return 0; +} + +static int redriver_i2c_reg_set(struct ssusb_redriver *redriver, + u8 reg, u8 val) +{ + int ret; + + ret = regmap_write(redriver->regmap, (unsigned int)reg, + (unsigned int)val); + if (ret < 0) { + dev_err(redriver->dev, "writing reg 0x%02x failure\n", reg); + return ret; + } + + dev_dbg(redriver->dev, "writing reg 0x%02x=0x%02x\n", reg, val); + + return 0; +} + +/** + * Handle Re-driver chip operation mode and channel settings. + * + * Three Modes of Operations: + * - One/Two ports of USB 3.1 Gen1/Gen2 (Default Mode) + * - Two lanes of DisplayPort 1.4 + One port of USB 3.1 Gen1/Gen2 + * - Four lanes of DisplayPort 1.4 + * + * @redriver - contain redriver status + * @on - re-driver chip enable or not + */ +static void ssusb_redriver_gen_dev_set( + struct ssusb_redriver *redriver, bool on) +{ + int ret; + u8 val; + + val = 0; + + switch (redriver->op_mode) { + case OP_MODE_USB: + /* Use source side I/O mapping */ + if (redriver->typec_orientation + == ORIENTATION_CC1) { + /* Enable channel C and D */ + val &= ~(CHNA_EN | CHNB_EN); + val |= (CHNC_EN | CHND_EN); + } else if (redriver->typec_orientation + == ORIENTATION_CC2) { + /* Enable channel A and B*/ + val |= (CHNA_EN | CHNB_EN); + val &= ~(CHNC_EN | CHND_EN); + } else { + /* Enable channel A, B, C and D */ + val |= (CHNA_EN | CHNB_EN); + val |= (CHNC_EN | CHND_EN); + } + + /* Set to default USB Mode */ + val |= (0x5 << OP_MODE_SHIFT); + + break; + case OP_MODE_DP: + /* Enable channel A, B, C and D */ + val |= (CHNA_EN | CHNB_EN); + val |= (CHNC_EN | CHND_EN); + + /* Set to DP 4 Lane Mode (OP Mode 2) */ + val |= (0x2 << OP_MODE_SHIFT); + + break; + case OP_MODE_USB_AND_DP: + /* Enable channel A, B, C and D */ + val |= (CHNA_EN | CHNB_EN); + val |= (CHNC_EN | CHND_EN); + + if (redriver->typec_orientation + == ORIENTATION_CC1) + /* Set to DP 4 Lane Mode (OP Mode 1) */ + val |= (0x1 << OP_MODE_SHIFT); + else if (redriver->typec_orientation + == ORIENTATION_CC2) + /* Set to DP 4 Lane Mode (OP Mode 0) */ + val |= (0x0 << OP_MODE_SHIFT); + else { + dev_err(redriver->dev, + "can't get orientation, op mode %d\n", + redriver->op_mode); + goto err_exit; + } + + break; + default: + dev_err(redriver->dev, + "Error: op mode: %d, vbus: %d, host: %d.\n", + redriver->op_mode, redriver->vbus_active, + redriver->host_active); + goto err_exit; + } + + /* exit/enter deep-sleep power mode */ + if (on) + val |= CHIP_EN; + else + val &= ~CHIP_EN; + + ret = redriver_i2c_reg_set(redriver, GEN_DEV_SET_REG, val); + if (ret < 0) + goto err_exit; + + dev_dbg(redriver->dev, + "successfully (%s) the redriver chip, reg 0x00 = 0x%x\n", + on ? "ENABLE":"DISABLE", val); + + return; + +err_exit: + dev_err(redriver->dev, + "failure to (%s) the redriver chip, reg 0x00 = 0x%x\n", + on ? "ENABLE":"DISABLE", val); +} + +static void ssusb_redriver_config_work(struct work_struct *w) +{ + struct ssusb_redriver *redriver = container_of(w, + struct ssusb_redriver, config_work); + struct extcon_dev *edev = NULL; + union extcon_property_value val; + unsigned int extcon_id = EXTCON_NONE; + int ret = 0; + + dev_dbg(redriver->dev, "%s: USB SS redriver config work\n", + __func__); + + edev = redriver->extcon_usb; + + if (redriver->vbus_active) + extcon_id = EXTCON_USB; + else if (redriver->host_active) + extcon_id = EXTCON_USB_HOST; + + if (edev && (extcon_id != EXTCON_NONE) + && extcon_get_state(edev, extcon_id)) { + ret = extcon_get_property(edev, extcon_id, + EXTCON_PROP_USB_SS, &val); + if (!ret) { + redriver->is_usb3 = (val.intval != 0); + + dev_dbg(redriver->dev, "SS Lane is used? [%s].\n", + redriver->is_usb3 ? "true" : "false"); + } else { + redriver->is_usb3 = true; + + dev_dbg(redriver->dev, "Default true as speed isn't reported.\n"); + } + + if (redriver->is_usb3 || (redriver->op_mode != OP_MODE_USB)) { + ret = extcon_get_property(edev, extcon_id, + EXTCON_PROP_USB_TYPEC_POLARITY, &val); + if (!ret) + redriver->typec_orientation = val.intval ? + ORIENTATION_CC2 : ORIENTATION_CC1; + else if (redriver->op_mode == OP_MODE_USB) + redriver->typec_orientation = ORIENTATION_NONE; + else + dev_err(redriver->dev, "fail to get orientation when has DP.\n"); + + ssusb_redriver_gen_dev_set(redriver, true); + } else { + dev_dbg(redriver->dev, + "Disable chip when not in SS USB mode.\n"); + + ssusb_redriver_gen_dev_set(redriver, false); + } + + dev_dbg(redriver->dev, "Type C orientation code is %d.\n", + redriver->typec_orientation); + } else { + dev_dbg(redriver->dev, "USB Cable is disconnected.\n"); + + /* Set back to USB only mode when cable disconnect */ + redriver->op_mode = OP_MODE_USB; + + ssusb_redriver_gen_dev_set(redriver, false); + } +} + +static int ssusb_redriver_dp_notifier(struct notifier_block *nb, + unsigned long dp_lane, void *ptr) +{ + struct ssusb_redriver *redriver = container_of(nb, + struct ssusb_redriver, dp_nb); + enum operation_mode op_mode; + + dev_dbg(redriver->dev, + "redriver op mode change: %ld event received\n", dp_lane); + + if (dp_lane == 0) + op_mode = OP_MODE_USB; + else if (dp_lane == 2) + op_mode = OP_MODE_USB_AND_DP; + else if (dp_lane == 4) + op_mode = OP_MODE_DP; + else + return 0; + + if (redriver->op_mode == op_mode) + return 0; + + redriver->op_mode = op_mode; + + queue_work(redriver->redriver_wq, &redriver->config_work); + + return 0; +} + +static int ssusb_redriver_vbus_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct ssusb_redriver *redriver = container_of(nb, + struct ssusb_redriver, vbus_nb); + + dev_dbg(redriver->dev, "vbus:%ld event received\n", event); + + if (redriver->vbus_active == event) + return NOTIFY_DONE; + + redriver->vbus_active = event; + + if (redriver->vbus_active) + queue_work(redriver->redriver_wq, &redriver->config_work); + + return NOTIFY_DONE; +} + +static int ssusb_redriver_id_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct ssusb_redriver *redriver = container_of(nb, + struct ssusb_redriver, id_nb); + bool host_active = (bool)event; + + dev_dbg(redriver->dev, "host_active:%s event received\n", + host_active ? "true" : "false"); + + if (redriver->host_active == host_active) + return NOTIFY_DONE; + + redriver->host_active = host_active; + + if (redriver->host_active) + queue_work(redriver->redriver_wq, &redriver->config_work); + + return NOTIFY_DONE; +} + +static int ssusb_redriver_extcon_register(struct ssusb_redriver *redriver) +{ + struct device_node *node = redriver->dev->of_node; + struct extcon_dev *edev; + int ret = 0; + + if (!of_find_property(node, "extcon", NULL)) { + dev_err(redriver->dev, "failed to get extcon for redriver\n"); + return 0; + } + + edev = extcon_get_edev_by_phandle(redriver->dev, 0); + if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) { + dev_err(redriver->dev, "failed to get phandle for redriver\n"); + return PTR_ERR(edev); + } + + if (!IS_ERR(edev)) { + redriver->extcon_usb = edev; + + redriver->vbus_nb.notifier_call = ssusb_redriver_vbus_notifier; + redriver->vbus_nb.priority = NOTIFIER_PRIORITY; + ret = extcon_register_notifier(edev, EXTCON_USB, + &redriver->vbus_nb); + if (ret < 0) { + dev_err(redriver->dev, + "failed to register notifier for redriver\n"); + return ret; + } + + redriver->id_nb.notifier_call = ssusb_redriver_id_notifier; + redriver->id_nb.priority = NOTIFIER_PRIORITY; + ret = extcon_register_notifier(edev, EXTCON_USB_HOST, + &redriver->id_nb); + if (ret < 0) { + dev_err(redriver->dev, + "failed to register notifier for USB-HOST\n"); + goto err; + } + } + + edev = NULL; + /* Use optional phandle (index 1) for DP lane events */ + if (of_count_phandle_with_args(node, "extcon", NULL) > 1) { + edev = extcon_get_edev_by_phandle(redriver->dev, 1); + if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) { + ret = PTR_ERR(edev); + goto err1; + } + } + + if (!IS_ERR_OR_NULL(edev)) { + redriver->extcon_dp = edev; + redriver->dp_nb.notifier_call = + ssusb_redriver_dp_notifier; + redriver->dp_nb.priority = NOTIFIER_PRIORITY; + ret = extcon_register_blocking_notifier(edev, EXTCON_DISP_DP, + &redriver->dp_nb); + if (ret < 0) { + dev_err(redriver->dev, + "failed to register blocking notifier\n"); + goto err1; + } + } + + /* Update initial VBUS/ID state from extcon */ + if (extcon_get_state(redriver->extcon_usb, EXTCON_USB)) + ssusb_redriver_vbus_notifier(&redriver->vbus_nb, true, + redriver->extcon_usb); + else if (extcon_get_state(redriver->extcon_usb, EXTCON_USB_HOST)) + ssusb_redriver_id_notifier(&redriver->id_nb, true, + redriver->extcon_usb); + + return 0; + +err1: + if (redriver->extcon_usb) + extcon_unregister_notifier(redriver->extcon_usb, + EXTCON_USB_HOST, &redriver->id_nb); +err: + if (redriver->extcon_usb) + extcon_unregister_notifier(redriver->extcon_usb, + EXTCON_USB, &redriver->vbus_nb); + return ret; +} + +static int ssusb_redriver_param_config(struct ssusb_redriver *redriver, + u8 reg_base, u8 channel, u8 mask, u8 shift, u8 val, + u8 *stored_val) +{ + int i, ret = -EINVAL; + u8 reg_addr, reg_val; + + if (channel == CHANNEL_NUM) { + for (i = 0; i < CHANNEL_NUM; i++) { + reg_addr = reg_base + (i << 1); + + ret = redriver_i2c_reg_get(redriver, + reg_addr, ®_val); + if (ret < 0) + return ret; + + reg_val &= ~(mask << shift); + reg_val |= (val << shift); + + ret = redriver_i2c_reg_set(redriver, + reg_addr, reg_val); + if (ret < 0) + return ret; + + stored_val[i] = val; + } + } else if (channel < CHANNEL_NUM) { + reg_addr = reg_base + (channel << 1); + + ret = redriver_i2c_reg_get(redriver, + reg_addr, ®_val); + if (ret < 0) + return ret; + + reg_val &= ~(mask << shift); + reg_val |= (val << shift); + + ret = redriver_i2c_reg_set(redriver, + reg_addr, reg_val); + if (ret < 0) + return ret; + + stored_val[channel] = val; + } else { + dev_err(redriver->dev, "error channel value.\n"); + return ret; + } + + return 0; +} + +static int ssusb_redriver_eq_config( + struct ssusb_redriver *redriver, u8 channel, u8 val) +{ + if (val <= EQ_SETTING_MASK) + return ssusb_redriver_param_config(redriver, + EQ_SET_REG_BASE, channel, EQ_SETTING_MASK, + EQ_SETTING_SHIFT, val, redriver->eq); + else + return -EINVAL; +} + +static int ssusb_redriver_flat_gain_config( + struct ssusb_redriver *redriver, u8 channel, u8 val) +{ + if (val <= FLAT_GAIN_MASK) + return ssusb_redriver_param_config(redriver, + FLAT_GAIN_REG_BASE, channel, FLAT_GAIN_MASK, + FLAT_GAIN_SHIFT, val, redriver->flat_gain); + else + return -EINVAL; +} + +static int ssusb_redriver_output_comp_config( + struct ssusb_redriver *redriver, u8 channel, u8 val) +{ + if (val <= OUTPUT_COMPRESSION_MASK) + return ssusb_redriver_param_config(redriver, + OUT_COMP_AND_POL_REG_BASE, channel, + OUTPUT_COMPRESSION_MASK, + OUTPUT_COMPRESSION_SHIFT, val, + redriver->output_comp); + else + return -EINVAL; +} + +static int ssusb_redriver_loss_match_config( + struct ssusb_redriver *redriver, u8 channel, u8 val) +{ + if (val <= LOSS_MATCH_MASK) + return ssusb_redriver_param_config(redriver, + LOSS_MATCH_REG_BASE, channel, LOSS_MATCH_MASK, + LOSS_MATCH_SHIFT, val, redriver->loss_match); + else + return -EINVAL; +} + +static int ssusb_redriver_default_config(struct ssusb_redriver *redriver) +{ + struct device_node *node = redriver->dev->of_node; + int ret = 0, i = 0; + + if (of_find_property(node, "eq", NULL)) { + ret = of_property_read_u8_array(node, "eq", redriver->eq, + ARRAY_SIZE(redriver->eq)); + if (!ret) { + for (i = 0; i < CHANNEL_NUM; i++) { + ret = ssusb_redriver_eq_config( + redriver, i, + redriver->eq[i]); + if (ret) + goto err; + } + } else + goto err; + } + + if (of_find_property(node, "flat-gain", NULL)) { + ret = of_property_read_u8_array(node, + "flat-gain", redriver->flat_gain, + ARRAY_SIZE(redriver->flat_gain)); + if (!ret) { + for (i = 0; i < CHANNEL_NUM; i++) { + ret = ssusb_redriver_flat_gain_config( + redriver, i, + redriver->flat_gain[i]); + if (ret) + goto err; + } + } else + goto err; + } + + if (of_find_property(node, "output-comp", NULL)) { + ret = of_property_read_u8_array(node, + "output-comp", redriver->output_comp, + ARRAY_SIZE(redriver->output_comp)); + if (!ret) { + for (i = 0; i < CHANNEL_NUM; i++) { + ret = ssusb_redriver_output_comp_config( + redriver, i, + redriver->output_comp[i]); + if (ret) + goto err; + } + } else + goto err; + } + + if (of_find_property(node, "loss-match", NULL)) { + ret = of_property_read_u8_array(node, + "loss-match", redriver->loss_match, + ARRAY_SIZE(redriver->loss_match)); + if (!ret) { + for (i = 0; i < CHANNEL_NUM; i++) { + ret = ssusb_redriver_loss_match_config( + redriver, i, + redriver->loss_match[i]); + if (ret) + goto err; + } + } else + goto err; + } + + return 0; + +err: + dev_err(redriver->dev, + "%s: set default parameters failure.\n", __func__); + return ret; +} + +static int ssusb_redriver_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct ssusb_redriver *redriver = container_of(this, + struct ssusb_redriver, panic_nb); + + pr_err("%s: op mode: %d, vbus: %d, host: %d\n", __func__, + redriver->op_mode, redriver->vbus_active, + redriver->host_active); + + return NOTIFY_OK; +} + +static const struct regmap_config redriver_regmap = { + .max_register = REDRIVER_REG_MAX, + .reg_bits = 8, + .val_bits = 8, +}; + +static int redriver_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct ssusb_redriver *redriver; + union power_supply_propval pval = {0}; + int ret; + + redriver = devm_kzalloc(&client->dev, sizeof(struct ssusb_redriver), + GFP_KERNEL); + if (!redriver) + return -ENOMEM; + + INIT_WORK(&redriver->config_work, ssusb_redriver_config_work); + + redriver->redriver_wq = alloc_ordered_workqueue("redriver_wq", + WQ_HIGHPRI); + if (!redriver->redriver_wq) { + dev_err(&client->dev, + "%s: Unable to create workqueue redriver_wq\n", + __func__); + return -ENOMEM; + } + + redriver->dev = &client->dev; + + redriver->regmap = devm_regmap_init_i2c(client, &redriver_regmap); + if (IS_ERR(redriver->regmap)) { + ret = PTR_ERR(redriver->regmap); + dev_err(&client->dev, + "Failed to allocate register map: %d\n", ret); + goto destroy_wq; + } + + redriver->client = client; + i2c_set_clientdata(client, redriver); + + /* Set default parameters for A/B/C/D channels. */ + ret = ssusb_redriver_default_config(redriver); + if (ret < 0) + goto destroy_wq; + + /* Set id_state as float by default*/ + redriver->host_active = false; + + /* Set to USB by default */ + redriver->op_mode = OP_MODE_USB; + + redriver->usb_psy = power_supply_get_by_name("usb"); + if (!redriver->usb_psy) { + dev_warn(&client->dev, "Could not get usb power_supply\n"); + pval.intval = -EINVAL; + } else { + power_supply_get_property(redriver->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + + /* USB cable is not connected */ + if (!pval.intval) + ssusb_redriver_gen_dev_set(redriver, false); + } + + ret = ssusb_redriver_extcon_register(redriver); + if (ret) + goto put_psy; + + redriver->panic_nb.notifier_call = ssusb_redriver_panic_notifier; + atomic_notifier_chain_register(&panic_notifier_list, + &redriver->panic_nb); + + ssusb_redriver_debugfs_entries(redriver); + + dev_dbg(&client->dev, "USB 3.1 Gen1/Gen2 Re-Driver Probed.\n"); + + return 0; + +put_psy: + if (redriver->usb_psy) + power_supply_put(redriver->usb_psy); + +destroy_wq: + destroy_workqueue(redriver->redriver_wq); + + return ret; +} + +static int redriver_i2c_remove(struct i2c_client *client) +{ + struct ssusb_redriver *redriver = i2c_get_clientdata(client); + + debugfs_remove(redriver->debug_root); + atomic_notifier_chain_unregister(&panic_notifier_list, + &redriver->panic_nb); + + if (redriver->usb_psy) + power_supply_put(redriver->usb_psy); + + destroy_workqueue(redriver->redriver_wq); + + return 0; +} + +static ssize_t channel_config_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos, + int (*config_func)(struct ssusb_redriver *redriver, + u8 channel, u8 val)) +{ + struct seq_file *s = file->private_data; + struct ssusb_redriver *redriver = s->private; + char buf[20]; + char *token_chan, *token_val, *this_buf; + int ret = 0; + + memset(buf, 0, sizeof(buf)); + + this_buf = buf; + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (isdigit(buf[0])) { + ret = config_func(redriver, CHANNEL_NUM, buf[0] - '0'); + if (ret < 0) + goto err; + } else if (isalpha(buf[0])) { + while ((token_chan = strsep(&this_buf, " ")) != NULL) { + if (isalpha(*token_chan) + && (__toupper(*token_chan) >= 'A') + && (__toupper(*token_chan) <= 'D')) { + token_val = strsep(&this_buf, " "); + if (!isdigit(*token_val)) + goto err; + } else + goto err; + + ret = config_func(redriver, *token_chan - 'A', + *token_val - '0'); + if (ret < 0) + goto err; + } + } else + goto err; + + + return count; + +err: + pr_err("Used to config redriver A/B/C/D channels' parameters\n" + "1. Set all channels to same value\n" + "echo n > [eq|output_comp|flat_gain|loss_match]\n" + "- eq: Equalization, range 0-7\n" + "- output_comp: Output Compression, range 0-3\n" + "- loss_match: LOSS Profile Matching, range 0-3\n" + "- flat_gain: Flat Gain, range 0-3\n" + "Example: Set all channels to same EQ value\n" + "echo 1 > eq\n" + "2. Set two channels to different values leave others unchanged\n" + "echo [A|B|C|D] n [A|B|C|D] n > [eq|output_comp|flat_gain|loss_match]\n" + "Example2: set channel B flat gain value 2, set channel C flat gain value 3\n" + "echo B 2 C 3 > flat_gain\n"); + + return -EFAULT; +} + +static int eq_status(struct seq_file *s, void *p) +{ + struct ssusb_redriver *redriver = s->private; + + seq_puts(s, "\t\t\t A\t B\t C\t D\n"); + seq_printf(s, "Equalization:\t\t %d\t %d\t %d\t %d\t\n", + redriver->eq[0], redriver->eq[1], + redriver->eq[2], redriver->eq[3]); + return 0; +} + +static int eq_status_open(struct inode *inode, + struct file *file) +{ + return single_open(file, eq_status, inode->i_private); +} + +static ssize_t eq_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return channel_config_write(file, ubuf, count, ppos, + ssusb_redriver_eq_config); +} + +static const struct file_operations eq_ops = { + .open = eq_status_open, + .read = seq_read, + .write = eq_write, +}; + +static int flat_gain_status(struct seq_file *s, void *p) +{ + struct ssusb_redriver *redriver = s->private; + + seq_puts(s, "\t\t\t A\t B\t C\t D\n"); + seq_printf(s, "TX/RX Flat Gain:\t %d\t %d\t %d\t %d\t\n", + redriver->flat_gain[0], redriver->flat_gain[1], + redriver->flat_gain[2], redriver->flat_gain[3]); + return 0; +} + +static int flat_gain_status_open(struct inode *inode, + struct file *file) +{ + return single_open(file, flat_gain_status, inode->i_private); +} + +static ssize_t flat_gain_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return channel_config_write(file, ubuf, count, ppos, + ssusb_redriver_flat_gain_config); +} + +static const struct file_operations flat_gain_ops = { + .open = flat_gain_status_open, + .read = seq_read, + .write = flat_gain_write, +}; + +static int output_comp_status(struct seq_file *s, void *p) +{ + struct ssusb_redriver *redriver = s->private; + + seq_puts(s, "\t\t\t A\t B\t C\t D\n"); + seq_printf(s, "Output Compression:\t %d\t %d\t %d\t %d\t\n", + redriver->output_comp[0], redriver->output_comp[1], + redriver->output_comp[2], redriver->output_comp[3]); + return 0; +} + +static int output_comp_status_open(struct inode *inode, + struct file *file) +{ + return single_open(file, output_comp_status, inode->i_private); +} + +static ssize_t output_comp_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return channel_config_write(file, ubuf, count, ppos, + ssusb_redriver_output_comp_config); +} + +static const struct file_operations output_comp_ops = { + .open = output_comp_status_open, + .read = seq_read, + .write = output_comp_write, +}; + +static int loss_match_status(struct seq_file *s, void *p) +{ + struct ssusb_redriver *redriver = s->private; + + seq_puts(s, "\t\t\t A\t B\t C\t D\n"); + seq_printf(s, "Loss Profile Match:\t %d\t %d\t %d\t %d\t\n", + redriver->loss_match[0], redriver->loss_match[1], + redriver->loss_match[2], redriver->loss_match[3]); + return 0; +} + +static int loss_match_status_open(struct inode *inode, + struct file *file) +{ + return single_open(file, loss_match_status, inode->i_private); +} + +static ssize_t loss_match_write(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + return channel_config_write(file, ubuf, count, ppos, + ssusb_redriver_loss_match_config); +} + +static const struct file_operations loss_match_ops = { + .open = loss_match_status_open, + .read = seq_read, + .write = loss_match_write, +}; + +static void ssusb_redriver_debugfs_entries( + struct ssusb_redriver *redriver) +{ + struct dentry *ent; + + redriver->debug_root = debugfs_create_dir("ssusb_redriver", NULL); + if (!redriver->debug_root) { + dev_warn(redriver->dev, "Couldn't create debug dir\n"); + return; + } + + ent = debugfs_create_file("eq", 0600, + redriver->debug_root, redriver, &eq_ops); + if (IS_ERR_OR_NULL(ent)) + dev_warn(redriver->dev, "Couldn't create eq file\n"); + + ent = debugfs_create_file("flat_gain", 0600, + redriver->debug_root, redriver, &flat_gain_ops); + if (IS_ERR_OR_NULL(ent)) + dev_warn(redriver->dev, "Couldn't create flat_gain file\n"); + + ent = debugfs_create_file("output_comp", 0600, + redriver->debug_root, redriver, &output_comp_ops); + if (IS_ERR_OR_NULL(ent)) + dev_warn(redriver->dev, "Couldn't create output_comp file\n"); + + ent = debugfs_create_file("loss_match", 0600, + redriver->debug_root, redriver, &loss_match_ops); + if (IS_ERR_OR_NULL(ent)) + dev_warn(redriver->dev, "Couldn't create loss_match file\n"); +} + +static int __maybe_unused redriver_i2c_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ssusb_redriver *redriver = i2c_get_clientdata(client); + + dev_dbg(redriver->dev, "%s: SS USB redriver suspend.\n", + __func__); + + /* Disable redriver chip when USB cable disconnected */ + if ((!redriver->vbus_active) + && (!redriver->host_active)) + ssusb_redriver_gen_dev_set(redriver, false); + + flush_workqueue(redriver->redriver_wq); + + return 0; +} + +static int __maybe_unused redriver_i2c_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ssusb_redriver *redriver = i2c_get_clientdata(client); + + dev_dbg(redriver->dev, "%s: SS USB redriver resume.\n", + __func__); + + flush_workqueue(redriver->redriver_wq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(redriver_i2c_pm, redriver_i2c_suspend, + redriver_i2c_resume); + +static void redriver_i2c_shutdown(struct i2c_client *client) +{ + struct ssusb_redriver *redriver = i2c_get_clientdata(client); + int ret; + + /* Set back to USB mode with four channel enabled */ + ret = redriver_i2c_reg_set(redriver, GEN_DEV_SET_REG, + GEN_DEV_SET_REG_DEFAULT); + if (ret < 0) + dev_err(&client->dev, + "%s: fail to set USB mode with 4 channel enabled.\n", + __func__); + else + dev_dbg(&client->dev, + "%s: successfully set back to USB mode.\n", + __func__); +} + +static const struct of_device_id redriver_match_table[] = { + { .compatible = "onnn,redriver",}, + { }, +}; + +static const struct i2c_device_id redriver_i2c_id[] = { + { "ssusb redriver", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, redriver_i2c_id); + +static struct i2c_driver redriver_i2c_driver = { + .driver = { + .name = "ssusb redriver", + .owner = THIS_MODULE, + .of_match_table = redriver_match_table, + .pm = &redriver_i2c_pm, + }, + + .probe = redriver_i2c_probe, + .remove = redriver_i2c_remove, + + .shutdown = redriver_i2c_shutdown, + + .id_table = redriver_i2c_id, +}; + +module_i2c_driver(redriver_i2c_driver); + +MODULE_DESCRIPTION("USB Super Speed Linear Re-Driver Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 5a6dca01a1d0c546ee84e79aa84ae68440db4dcd..802388bb42ba707ee4432456aaaf1718af151d9f 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2560,8 +2560,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); u8 devctl; + int ret; - musb_port_suspend(musb, true); + ret = musb_port_suspend(musb, true); + if (ret) + return ret; if (!is_host_active(musb)) return 0; diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h index 7bbf01bf4bb0b51a42104e7302a5d01012ee551f..54d02ed032df013c94e913a5a45b29bf2b8aa21b 100644 --- a/drivers/usb/musb/musb_host.h +++ b/drivers/usb/musb/musb_host.h @@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8); extern void musb_root_disconnect(struct musb *musb); extern void musb_host_resume_root_hub(struct musb *musb); extern void musb_host_poke_root_hub(struct musb *musb); -extern void musb_port_suspend(struct musb *musb, bool do_suspend); +extern int musb_port_suspend(struct musb *musb, bool do_suspend); extern void musb_port_reset(struct musb *musb, bool do_reset); extern void musb_host_finish_resume(struct work_struct *work); #else @@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {} static inline void musb_host_resume_root_hub(struct musb *musb) {} static inline void musb_host_poll_rh_status(struct musb *musb) {} static inline void musb_host_poke_root_hub(struct musb *musb) {} -static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} +static inline int musb_port_suspend(struct musb *musb, bool do_suspend) +{ + return 0; +} static inline void musb_port_reset(struct musb *musb, bool do_reset) {} static inline void musb_host_finish_resume(struct work_struct *work) {} #endif diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 0b4595439d51390ed8f57521f626d047520e37c4..5eca5d2d5e003189cf5349fba50743a4ffa6beab 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -73,14 +73,14 @@ void musb_host_finish_resume(struct work_struct *work) spin_unlock_irqrestore(&musb->lock, flags); } -void musb_port_suspend(struct musb *musb, bool do_suspend) +int musb_port_suspend(struct musb *musb, bool do_suspend) { struct usb_otg *otg = musb->xceiv->otg; u8 power; void __iomem *mbase = musb->mregs; if (!is_host_active(musb)) - return; + return 0; /* NOTE: this doesn't necessarily put PHY into low power mode, * turning off its clock; that's a function of PHY integration and @@ -91,16 +91,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) if (do_suspend) { int retries = 10000; - power &= ~MUSB_POWER_RESUME; - power |= MUSB_POWER_SUSPENDM; - musb_writeb(mbase, MUSB_POWER, power); + if (power & MUSB_POWER_RESUME) + return -EBUSY; - /* Needed for OPT A tests */ - power = musb_readb(mbase, MUSB_POWER); - while (power & MUSB_POWER_SUSPENDM) { + if (!(power & MUSB_POWER_SUSPENDM)) { + power |= MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, power); + + /* Needed for OPT A tests */ power = musb_readb(mbase, MUSB_POWER); - if (retries-- < 1) - break; + while (power & MUSB_POWER_SUSPENDM) { + power = musb_readb(mbase, MUSB_POWER); + if (retries-- < 1) + break; + } } musb_dbg(musb, "Root port suspended, power %02x", power); @@ -136,6 +140,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend) schedule_delayed_work(&musb->finish_resume_work, msecs_to_jiffies(USB_RESUME_TIMEOUT)); } + return 0; } void musb_port_reset(struct musb *musb, bool do_reset) diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 497cad2d78197589bdeee9860b3e392a0edd3518..63171c85f6d0034e1e8912d58edbc347473d6111 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -43,6 +43,7 @@ enum usbpd_state { PE_ERROR_RECOVERY, PE_SRC_DISABLED, PE_SRC_STARTUP, + PE_SRC_STARTUP_WAIT_FOR_VDM_RESP, PE_SRC_SEND_CAPABILITIES, PE_SRC_SEND_CAPABILITIES_WAIT, /* substate to wait for Request */ PE_SRC_NEGOTIATE_CAPABILITY, @@ -79,6 +80,7 @@ static const char * const usbpd_state_strings[] = { "ERROR_RECOVERY", "SRC_Disabled", "SRC_Startup", + "SRC_Startup_Wait_for_VDM_Resp", "SRC_Send_Capabilities", "SRC_Send_Capabilities (Wait for Request)", "SRC_Negotiate_Capability", @@ -316,11 +318,12 @@ static void *usbpd_ipc_log; #define ID_HDR_USB_HOST BIT(31) #define ID_HDR_USB_DEVICE BIT(30) #define ID_HDR_MODAL_OPR BIT(26) -#define ID_HDR_PRODUCT_TYPE(n) ((n) >> 27) +#define ID_HDR_PRODUCT_TYPE(n) (((n) >> 27) & 0x7) #define ID_HDR_PRODUCT_PER_MASK (2 << 27) #define ID_HDR_PRODUCT_HUB 1 #define ID_HDR_PRODUCT_PER 2 #define ID_HDR_PRODUCT_AMA 5 +#define ID_HDR_PRODUCT_VPD 6 #define ID_HDR_VID 0x05c6 /* qcom */ #define PROD_VDO_PID 0x0a00 /* TBD */ @@ -371,7 +374,6 @@ struct usbpd { struct rx_msg *rx_ext_msg; u32 received_pdos[PD_MAX_DATA_OBJ]; - u32 received_ado; u16 src_cap_id; u8 selected_pdo; u8 requested_pdo; @@ -432,6 +434,7 @@ struct usbpd { struct list_head instance; + bool has_dp; u16 ss_lane_svid; /* ext msg support */ @@ -439,6 +442,7 @@ struct usbpd { u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN]; bool send_get_pps_status; u32 pps_status_db; + bool send_get_status; u8 status_db[PD_STATUS_DB_LEN]; bool send_get_battery_cap; u8 get_battery_cap_db; @@ -453,9 +457,12 @@ static LIST_HEAD(_usbpd); /* useful for debugging */ static const unsigned int usbpd_extcon_cable[] = { EXTCON_USB, EXTCON_USB_HOST, + EXTCON_DISP_DP, EXTCON_NONE, }; +static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type); + enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd) { int ret; @@ -575,6 +582,13 @@ static int usbpd_release_ss_lane(struct usbpd *pd, pd->ss_lane_svid = hdlr->svid; + /* DP 4 Lane mode */ + ret = extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 4); + if (ret) { + usbpd_err(&pd->dev, "err(%d) for notify DP 4 Lane", ret); + goto err_exit; + } + err_exit: return ret; } @@ -1000,9 +1014,8 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop, unsigned long flags; u16 header; - if (sop != SOP_MSG) { - usbpd_err(&pd->dev, "invalid msg type (%d) received; only SOP supported\n", - sop); + if (sop == SOPII_MSG) { + usbpd_err(&pd->dev, "only SOP/SOP' supported\n"); return; } @@ -1101,6 +1114,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) .msg_rx_cb = phy_msg_received, .shutdown_cb = phy_shutdown, .frame_filter_val = FRAME_FILTER_EN_SOP | + FRAME_FILTER_EN_SOPI | FRAME_FILTER_EN_HARD_RESET, }; union power_supply_propval val = {0}; @@ -1181,6 +1195,20 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) POWER_SUPPLY_PROP_PR_SWAP, &val); } + if (pd->vconn_enabled) { + /* + * wait for tVCONNStable (50ms), until SOPI becomes + * ready for communication. + */ + usleep_range(50000, 51000); + usbpd_send_svdm(pd, USBPD_SID, + USBPD_SVDM_DISCOVER_IDENTITY, + SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0); + handle_vdm_tx(pd, SOPI_MSG); + pd->current_state = PE_SRC_STARTUP_WAIT_FOR_VDM_RESP; + kick_sm(pd, SENDER_RESPONSE_TIME); + return; + } /* * A sink might remove its terminations (during some Type-C * compliance tests or a sink attempting to do Try.SRC) @@ -1522,7 +1550,7 @@ int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos) { struct vdm_tx *vdm_tx; - if (!pd->in_explicit_contract || pd->vdm_tx) + if (pd->vdm_tx) return -EBUSY; vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL); @@ -1538,7 +1566,8 @@ int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos) pd->vdm_tx = vdm_tx; /* slight delay before queuing to prioritize handling of incoming VDM */ - kick_sm(pd, 2); + if (pd->in_explicit_contract) + kick_sm(pd, 2); return 0; } @@ -1559,6 +1588,7 @@ EXPORT_SYMBOL(usbpd_send_svdm); static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) { + int ret; u32 vdm_hdr = rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0; @@ -1568,11 +1598,19 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1; u8 cmd = SVDM_HDR_CMD(vdm_hdr); u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr); - bool has_dp = false; struct usbpd_svid_handler *handler; - usbpd_dbg(&pd->dev, "VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x\n", - svid, cmd, cmd_type, vdm_hdr); + usbpd_dbg(&pd->dev, + "VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x has_dp: %s\n", + svid, cmd, cmd_type, vdm_hdr, + pd->has_dp ? "true" : "false"); + + if ((svid == 0xFF01) && (pd->has_dp == false)) { + pd->has_dp = true; + + /* Set to USB and DP cocurrency mode */ + extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 2); + } /* if it's a supported SVID, pass the message to the handler */ handler = find_svid_handler(pd, svid); @@ -1630,6 +1668,24 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) kfree(pd->vdm_tx_retry); pd->vdm_tx_retry = NULL; + if (num_vdos && ID_HDR_PRODUCT_TYPE(vdos[0]) == + ID_HDR_PRODUCT_VPD) { + + usbpd_dbg(&pd->dev, "VPD detected turn off vbus\n"); + + if (pd->vbus_enabled) { + ret = regulator_disable(pd->vbus); + if (ret) + usbpd_err(&pd->dev, "Err disabling vbus (%d)\n", + ret); + else + pd->vbus_enabled = false; + } + } + + if (!pd->in_explicit_contract) + break; + pd->vdm_state = DISCOVERED_ID; usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_SVIDS, @@ -1718,9 +1774,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) handler->discovered = true; } } - - if (svid == 0xFF01) - has_dp = true; } break; @@ -1767,7 +1820,7 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) } } -static void handle_vdm_tx(struct usbpd *pd) +static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type) { int ret; unsigned long flags; @@ -1785,13 +1838,13 @@ static void handle_vdm_tx(struct usbpd *pd) spin_unlock_irqrestore(&pd->rx_lock, flags); ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data, - pd->vdm_tx->size, SOP_MSG); + pd->vdm_tx->size, sop_type); if (ret) { usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n", ret, SVDM_HDR_CMD(pd->vdm_tx->data[0])); /* retry when hitting PE_SRC/SNK_Ready again */ - if (ret != -EBUSY) + if (ret != -EBUSY && sop_type == SOP_MSG) usbpd_set_state(pd, pd->current_pr == PR_SRC ? PE_SRC_SEND_SOFT_RESET : PE_SNK_SEND_SOFT_RESET); @@ -1966,7 +2019,7 @@ static void usbpd_sm(struct work_struct *w) { struct usbpd *pd = container_of(w, struct usbpd, sm_work); union power_supply_propval val = {0}; - int ret; + int ret, ms; struct rx_msg *rx_msg = NULL; unsigned long flags; @@ -2063,6 +2116,13 @@ static void usbpd_sm(struct work_struct *w) kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); dual_role_instance_changed(pd->dual_role); + if (pd->has_dp) { + pd->has_dp = false; + + /* Set to USB only mode when cable disconnected */ + extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 0); + } + goto sm_done; } @@ -2142,6 +2202,21 @@ static void usbpd_sm(struct work_struct *w) } break; + case PE_SRC_STARTUP_WAIT_FOR_VDM_RESP: + if (IS_DATA(rx_msg, MSG_VDM)) + handle_vdm_rx(pd, rx_msg); + + /* tVCONNStable (50ms) elapsed */ + ms = FIRST_SOURCE_CAP_TIME - 50; + + /* if no vdm msg received SENDER_RESPONSE_TIME elapsed */ + if (!rx_msg) + ms -= SENDER_RESPONSE_TIME; + + pd->current_state = PE_SRC_SEND_CAPABILITIES; + kick_sm(pd, ms); + break; + case PE_SRC_STARTUP: usbpd_set_state(pd, PE_SRC_STARTUP); break; @@ -2256,7 +2331,7 @@ static void usbpd_sm(struct work_struct *w) pd->send_pr_swap = false; ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, "Error sending PR Swap\n"); + usbpd_err(&pd->dev, "Error sending PR Swap\n"); usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); break; } @@ -2267,7 +2342,7 @@ static void usbpd_sm(struct work_struct *w) pd->send_dr_swap = false; ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, "Error sending DR Swap\n"); + usbpd_err(&pd->dev, "Error sending DR Swap\n"); usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); break; } @@ -2275,7 +2350,7 @@ static void usbpd_sm(struct work_struct *w) pd->current_state = PE_DRS_SEND_DR_SWAP; kick_sm(pd, SENDER_RESPONSE_TIME); } else { - handle_vdm_tx(pd); + handle_vdm_tx(pd, SOP_MSG); } break; @@ -2549,8 +2624,7 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, - "Error sending get_src_cap_ext\n"); + usbpd_err(&pd->dev, "Error sending get_src_cap_ext\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2569,8 +2643,7 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, - "Error sending get_pps_status\n"); + usbpd_err(&pd->dev, "Error sending get_pps_status\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2585,23 +2658,32 @@ static void usbpd_sm(struct work_struct *w) sizeof(pd->pps_status_db)); complete(&pd->is_ready); } else if (IS_DATA(rx_msg, MSG_ALERT)) { - if (rx_msg->data_len != sizeof(pd->received_ado)) { + u32 ado; + + if (rx_msg->data_len != sizeof(ado)) { usbpd_err(&pd->dev, "Invalid ado\n"); break; } - memcpy(&pd->received_ado, rx_msg->payload, - sizeof(pd->received_ado)); - ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, - 0, SOP_MSG); + memcpy(&ado, rx_msg->payload, sizeof(ado)); + usbpd_dbg(&pd->dev, "Received Alert 0x%08x\n", ado); + + /* + * Don't send Get_Status right away so we can coalesce + * multiple Alerts. 150ms should be enough to not get + * in the way of any other AMS that might happen. + */ + pd->send_get_status = true; + kick_sm(pd, 150); + } else if (pd->send_get_status && is_sink_tx_ok(pd)) { + pd->send_get_status = false; + ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, - "Error sending get_status\n"); + usbpd_err(&pd->dev, "Error sending get_status\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } kick_sm(pd, SENDER_RESPONSE_TIME); - } else if (rx_msg && - IS_EXT(rx_msg, MSG_STATUS)) { + } else if (rx_msg && IS_EXT(rx_msg, MSG_STATUS)) { if (rx_msg->data_len != PD_STATUS_DB_LEN) { usbpd_err(&pd->dev, "Invalid status db\n"); break; @@ -2614,8 +2696,7 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP, &pd->get_battery_cap_db, 1, SOP_MSG); if (ret) { - dev_err(&pd->dev, - "Error sending get_battery_cap\n"); + usbpd_err(&pd->dev, "Error sending get_battery_cap\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2634,8 +2715,7 @@ static void usbpd_sm(struct work_struct *w) ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS, &pd->get_battery_status_db, 1, SOP_MSG); if (ret) { - dev_err(&pd->dev, - "Error sending get_battery_status\n"); + usbpd_err(&pd->dev, "Error sending get_battery_status\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2665,7 +2745,7 @@ static void usbpd_sm(struct work_struct *w) pd->send_pr_swap = false; ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, "Error sending PR Swap\n"); + usbpd_err(&pd->dev, "Error sending PR Swap\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2676,7 +2756,7 @@ static void usbpd_sm(struct work_struct *w) pd->send_dr_swap = false; ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG); if (ret) { - dev_err(&pd->dev, "Error sending DR Swap\n"); + usbpd_err(&pd->dev, "Error sending DR Swap\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); break; } @@ -2684,7 +2764,7 @@ static void usbpd_sm(struct work_struct *w) pd->current_state = PE_DRS_SEND_DR_SWAP; kick_sm(pd, SENDER_RESPONSE_TIME); } else if (is_sink_tx_ok(pd)) { - handle_vdm_tx(pd); + handle_vdm_tx(pd, SOP_MSG); } break; @@ -3293,9 +3373,9 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env) "explicit" : "implicit"); add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED); - add_uevent_var(env, "ADO=%08x", pd->received_ado); - for (i = 0; i < PD_STATUS_DB_LEN; i++) - add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]); + add_uevent_var(env, "SDB=%02x %02x %02x %02x %02x", pd->status_db[0], + pd->status_db[1], pd->status_db[2], pd->status_db[3], + pd->status_db[4]); return 0; } @@ -3686,38 +3766,56 @@ static ssize_t get_src_cap_ext_show(struct device *dev, return ret; for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++) - len += snprintf(buf + len, PAGE_SIZE - len, "%d\n", - pd->src_cap_ext_db[i]); + len += snprintf(buf + len, PAGE_SIZE - len, "%s0x%02x", + i ? " " : "", pd->src_cap_ext_db[i]); + + buf[len++] = '\n'; + buf[len] = '\0'; + return len; } static DEVICE_ATTR_RO(get_src_cap_ext); -static ssize_t get_pps_status_show(struct device *dev, +static ssize_t get_status_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret; + int i, ret, len = 0; struct usbpd *pd = dev_get_drvdata(dev); if (pd->spec_rev == USBPD_REV_20) return -EINVAL; - ret = trigger_tx_msg(pd, &pd->send_get_pps_status); + ret = trigger_tx_msg(pd, &pd->send_get_status); if (ret) return ret; - return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db); + for (i = 0; i < PD_STATUS_DB_LEN; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%s0x%02x", + i ? " " : "", pd->status_db[i]); + + buf[len++] = '\n'; + buf[len] = '\0'; + + return len; } -static DEVICE_ATTR_RO(get_pps_status); +static DEVICE_ATTR_RO(get_status); -static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t get_pps_status_show(struct device *dev, + struct device_attribute *attr, char *buf) { + int ret; struct usbpd *pd = dev_get_drvdata(dev); - /* dump the ADO as a hex string */ - return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado); + if (pd->spec_rev == USBPD_REV_20) + return -EINVAL; + + ret = trigger_tx_msg(pd, &pd->send_get_pps_status); + if (ret) + return ret; + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", pd->pps_status_db); } -static DEVICE_ATTR_RO(rx_ado); +static DEVICE_ATTR_RO(get_pps_status); static ssize_t get_battery_cap_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -3747,8 +3845,12 @@ static ssize_t get_battery_cap_show(struct device *dev, return -EINVAL; for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++) - len += snprintf(buf + len, PAGE_SIZE - len, "%d\n", - pd->battery_cap_db[i]); + len += snprintf(buf + len, PAGE_SIZE - len, "%s0x%02x", + i ? " " : "", pd->battery_cap_db[i]); + + buf[len++] = '\n'; + buf[len] = '\0'; + return len; } static DEVICE_ATTR_RW(get_battery_cap); @@ -3779,7 +3881,7 @@ static ssize_t get_battery_status_show(struct device *dev, if (pd->get_battery_status_db == -EINVAL) return -EINVAL; - return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj); + return snprintf(buf, PAGE_SIZE, "0x%08x\n", pd->battery_sts_dobj); } static DEVICE_ATTR_RW(get_battery_status); @@ -3803,8 +3905,8 @@ static struct attribute *usbpd_attrs[] = { &dev_attr_rdo_h.attr, &dev_attr_hard_reset.attr, &dev_attr_get_src_cap_ext.attr, + &dev_attr_get_status.attr, &dev_attr_get_pps_status.attr, - &dev_attr_rx_ado.attr, &dev_attr_get_battery_cap.attr, &dev_attr_get_battery_status.attr, NULL, diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c index dfbd53c7414e772dcb26d3f51c785983a20bf8d7..3a945ad9cb055d1e9d14cb1abeaa714dfdc2222f 100644 --- a/drivers/usb/pd/qpnp-pdphy.c +++ b/drivers/usb/pd/qpnp-pdphy.c @@ -50,7 +50,7 @@ #define TX_SIZE_MASK 0xF #define USB_PDPHY_TX_CONTROL 0x44 -#define TX_CONTROL_RETRY_COUNT (BIT(6) | BIT(5)) +#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5) #define TX_CONTROL_FRAME_TYPE (BIT(4) | BIT(3) | BIT(2)) #define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2) #define TX_CONTROL_SEND_SIGNAL BIT(1) @@ -77,6 +77,9 @@ #define VDD_PDPHY_VOL_MAX 3300000 /* uV */ #define VDD_PDPHY_HPM_LOAD 3000 /* uA */ +/* Message Spec Rev field */ +#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3) + /* timers */ #define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */ #define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */ @@ -443,12 +446,12 @@ int pd_phy_signal(enum pd_sig_type sig) if (ret) return ret; - ret = wait_event_interruptible_timeout(pdphy->tx_waitq, + ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq, pdphy->tx_status != -EINPROGRESS, - msecs_to_jiffies(HARD_RESET_COMPLETE_TIME)); - if (ret <= 0) { + ms_to_ktime(HARD_RESET_COMPLETE_TIME)); + if (ret) { dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret); - return ret ? ret : -ETIMEDOUT; + return ret; } ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0); @@ -526,18 +529,24 @@ int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop) usleep_range(2, 3); - val = TX_CONTROL_RETRY_COUNT | (sop << 2) | TX_CONTROL_SEND_MSG; + val = (sop << 2) | TX_CONTROL_SEND_MSG; + + /* nRetryCount == 2 for PD 3.0, 3 for PD 2.0 */ + if (PD_MSG_HDR_REV(hdr) == USBPD_REV_30) + val |= TX_CONTROL_RETRY_COUNT(2); + else + val |= TX_CONTROL_RETRY_COUNT(3); ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val); if (ret) return ret; - ret = wait_event_interruptible_timeout(pdphy->tx_waitq, + ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq, pdphy->tx_status != -EINPROGRESS, - msecs_to_jiffies(RECEIVER_RESPONSE_TIME)); - if (ret <= 0) { + ms_to_ktime(RECEIVER_RESPONSE_TIME)); + if (ret) { dev_err(pdphy->dev, "%s: failed ret %d", __func__, ret); - return ret ? ret : -ETIMEDOUT; + return ret; } if (hdr && !pdphy->tx_status) @@ -704,7 +713,7 @@ static irqreturn_t pdphy_msg_rx_irq(int irq, void *data) goto done; frame_type = rx_status & RX_FRAME_TYPE; - if (frame_type != SOP_MSG) { + if (frame_type == SOPII_MSG) { dev_err(pdphy->dev, "%s:unsupported frame type %d\n", __func__, frame_type); goto done; diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h index 76ffbf6e36696c4dcd1485e93319b0a025bf1b4d..fbddd747514a6beda479c5c6c62cd38d2e675bfe 100644 --- a/drivers/usb/pd/usbpd.h +++ b/drivers/usb/pd/usbpd.h @@ -58,6 +58,7 @@ enum pd_spec_rev { /* enable msg and signal to be received by phy */ #define FRAME_FILTER_EN_SOP BIT(0) +#define FRAME_FILTER_EN_SOPI BIT(1) #define FRAME_FILTER_EN_HARD_RESET BIT(5) struct pd_phy_params { diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 6c7f082406dc129efd83cdcc5192ac281922646b..e8a50972075e71dcb7d91e49f6869c662d694105 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -145,6 +145,16 @@ config USB_ISP1301 To compile this driver as a module, choose M here: the module will be called phy-isp1301. +config MSM_SNPS_FEMTO_PHY + tristate "MSM SNPS HSUSB PHY Driver" + depends on ARCH_QCOM + select USB_PHY + help + Enable this to support the 28nm HSUSB PHY on MSM chips. This driver + supports the high-speed PHY which is usually paired with either + the ChipIdea or Synopsys DWC3 USB IPs on MSM SOCs. This driver expects + to configure the PHY with a dedicated register I/O memory region. + config USB_MSM_OTG tristate "Qualcomm on-chip USB OTG controller support" depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST) @@ -206,6 +216,16 @@ config USB_MSM_SSPHY_QMP set for its control sequences, normally paired with newer DWC3-based SuperSpeed controllers. +config MSM_QUSB_PHY + tristate "MSM QUSB2 PHY Driver" + depends on ARCH_QCOM + select USB_PHY + help + Enable this to support the QUSB2 PHY on MSM chips. This driver supports + the high-speed PHY which is usually paired with either the ChipIdea or + Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the + PHY with a dedicated register I/O memory region. + config MSM_HSUSB_PHY tristate "MSM HSUSB PHY Driver" depends on ARCH_QCOM diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 444fa5c5d0beec591c489549f592c7a169f82c44..7bbed6e3005294c7624df82bb0b7010577ed9a11 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -31,4 +31,6 @@ obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o +obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o +obj-$(CONFIG_MSM_SNPS_FEMTO_PHY) += phy-qcom-snps-28nm-hs.o diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c new file mode 100644 index 0000000000000000000000000000000000000000..47db12bef9c64dc7ae95e776f3f0591a827300a0 --- /dev/null +++ b/drivers/usb/phy/phy-msm-qusb.c @@ -0,0 +1,1269 @@ +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define QUSB2PHY_PLL_STATUS 0x38 +#define QUSB2PHY_PLL_LOCK BIT(5) + +#define QUSB2PHY_PORT_QC1 0x70 +#define VDM_SRC_EN BIT(4) +#define VDP_SRC_EN BIT(2) + +#define QUSB2PHY_PORT_QC2 0x74 +#define RDM_UP_EN BIT(1) +#define RDP_UP_EN BIT(3) +#define RPUM_LOW_EN BIT(4) +#define RPUP_LOW_EN BIT(5) + +#define QUSB2PHY_PORT_POWERDOWN 0xB4 +#define CLAMP_N_EN BIT(5) +#define FREEZIO_N BIT(1) +#define POWER_DOWN BIT(0) + +#define QUSB2PHY_PORT_TEST_CTRL 0xB8 + +#define QUSB2PHY_PWR_CTRL1 0x210 +#define PWR_CTRL1_CLAMP_N_EN BIT(1) +#define PWR_CTRL1_POWR_DOWN BIT(0) + +#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0 +#define CORE_READY_STATUS BIT(0) + +#define QUSB2PHY_PORT_UTMI_CTRL1 0xC0 +#define TERM_SELECT BIT(4) +#define XCVR_SELECT_FS BIT(2) +#define OP_MODE_NON_DRIVE BIT(0) + +#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4 +#define UTMI_ULPI_SEL BIT(7) +#define UTMI_TEST_MUX_SEL BIT(6) + +#define QUSB2PHY_PLL_TEST 0x04 +#define CLK_REF_SEL BIT(7) + +#define QUSB2PHY_PORT_TUNE1 0x80 +#define QUSB2PHY_PORT_TUNE2 0x84 +#define QUSB2PHY_PORT_TUNE3 0x88 +#define QUSB2PHY_PORT_TUNE4 0x8C +#define QUSB2PHY_PORT_TUNE5 0x90 + +/* Get TUNE2's high nibble value read from efuse */ +#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask) ((val >> pos) & mask) + +#define QUSB2PHY_PORT_INTR_CTRL 0xBC +#define CHG_DET_INTR_EN BIT(4) +#define DMSE_INTR_HIGH_SEL BIT(3) +#define DMSE_INTR_EN BIT(2) +#define DPSE_INTR_HIGH_SEL BIT(1) +#define DPSE_INTR_EN BIT(0) + +#define QUSB2PHY_PORT_UTMI_STATUS 0xF4 +#define LINESTATE_DP BIT(0) +#define LINESTATE_DM BIT(1) + + +#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */ +#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */ +#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */ + +#define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */ +#define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */ +#define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */ + +#define QUSB2PHY_REFCLK_ENABLE BIT(0) + +static unsigned int tune1; +module_param(tune1, uint, 0644); +MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1"); + +static unsigned int tune2; +module_param(tune2, uint, 0644); +MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2"); + +static unsigned int tune3; +module_param(tune3, uint, 0644); +MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3"); + +static unsigned int tune4; +module_param(tune4, uint, 0644); +MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4"); + +static unsigned int tune5; +module_param(tune5, uint, 0644); +MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5"); + + +struct qusb_phy { + struct usb_phy phy; + void __iomem *base; + void __iomem *tune2_efuse_reg; + void __iomem *ref_clk_base; + void __iomem *tcsr_clamp_dig_n; + + struct clk *ref_clk_src; + struct clk *ref_clk; + struct clk *cfg_ahb_clk; + struct reset_control *phy_reset; + struct clk *iface_clk; + struct clk *core_clk; + + struct regulator *gdsc; + struct regulator *vdd; + struct regulator *vdda33; + struct regulator *vdda18; + int vdd_levels[3]; /* none, low, high */ + int init_seq_len; + int *qusb_phy_init_seq; + u32 major_rev; + + u32 tune2_val; + int tune2_efuse_bit_pos; + int tune2_efuse_num_of_bits; + int tune2_efuse_correction; + + bool power_enabled; + bool clocks_enabled; + bool cable_connected; + bool suspended; + bool ulpi_mode; + bool dpdm_enable; + bool is_se_clk; + + struct regulator_desc dpdm_rdesc; + struct regulator_dev *dpdm_rdev; + + /* emulation targets specific */ + void __iomem *emu_phy_base; + bool emulation; + int *emu_init_seq; + int emu_init_seq_len; + int *phy_pll_reset_seq; + int phy_pll_reset_seq_len; + int *emu_dcm_reset_seq; + int emu_dcm_reset_seq_len; + bool put_into_high_z_state; + struct mutex phy_lock; +}; + +static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on) +{ + dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n", + __func__, qphy->clocks_enabled, on); + + if (!qphy->clocks_enabled && on) { + clk_prepare_enable(qphy->ref_clk_src); + clk_prepare_enable(qphy->ref_clk); + clk_prepare_enable(qphy->iface_clk); + clk_prepare_enable(qphy->core_clk); + clk_prepare_enable(qphy->cfg_ahb_clk); + qphy->clocks_enabled = true; + } + + if (qphy->clocks_enabled && !on) { + clk_disable_unprepare(qphy->cfg_ahb_clk); + /* + * FSM depedency beween iface_clk and core_clk. + * Hence turned off core_clk before iface_clk. + */ + clk_disable_unprepare(qphy->core_clk); + clk_disable_unprepare(qphy->iface_clk); + clk_disable_unprepare(qphy->ref_clk); + clk_disable_unprepare(qphy->ref_clk_src); + qphy->clocks_enabled = false; + } + + dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__, + qphy->clocks_enabled); +} + +static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on) +{ + int ret; + + if (IS_ERR_OR_NULL(qphy->gdsc)) + return -EPERM; + + if (on) { + dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n"); + ret = regulator_enable(qphy->gdsc); + if (ret) { + dev_err(qphy->phy.dev, "unable to enable gdsc\n"); + return ret; + } + } else { + dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n"); + ret = regulator_disable(qphy->gdsc); + if (ret) { + dev_err(qphy->phy.dev, "unable to disable gdsc\n"); + return ret; + } + } + + return ret; +} + +static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high) +{ + int min, ret; + + min = high ? 1 : 0; /* low or none? */ + ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min], + qphy->vdd_levels[2]); + if (ret) { + dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n"); + return ret; + } + + dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n", + qphy->vdd_levels[min], qphy->vdd_levels[2]); + return ret; +} + +static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) +{ + int ret = 0; + + dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n", + __func__, on ? "on" : "off", qphy->power_enabled); + + if (qphy->power_enabled == on) { + dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n"); + return 0; + } + + if (!on) + goto disable_vdda33; + + ret = qusb_phy_config_vdd(qphy, true); + if (ret) { + dev_err(qphy->phy.dev, "Unable to config VDD:%d\n", + ret); + goto err_vdd; + } + + ret = regulator_enable(qphy->vdd); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable VDD\n"); + goto unconfig_vdd; + } + + ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD); + if (ret < 0) { + dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret); + goto disable_vdd; + } + + ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN, + QUSB2PHY_1P8_VOL_MAX); + if (ret) { + dev_err(qphy->phy.dev, + "Unable to set voltage for vdda18:%d\n", ret); + goto put_vdda18_lpm; + } + + ret = regulator_enable(qphy->vdda18); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret); + goto unset_vdda18; + } + + ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD); + if (ret < 0) { + dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret); + goto disable_vdda18; + } + + ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN, + QUSB2PHY_3P3_VOL_MAX); + if (ret) { + dev_err(qphy->phy.dev, + "Unable to set voltage for vdda33:%d\n", ret); + goto put_vdda33_lpm; + } + + ret = regulator_enable(qphy->vdda33); + if (ret) { + dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret); + goto unset_vdd33; + } + + qphy->power_enabled = true; + + pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__); + return ret; + +disable_vdda33: + ret = regulator_disable(qphy->vdda33); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret); + +unset_vdd33: + ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX); + if (ret) + dev_err(qphy->phy.dev, + "Unable to set (0) voltage for vdda33:%d\n", ret); + +put_vdda33_lpm: + ret = regulator_set_load(qphy->vdda33, 0); + if (ret < 0) + dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n"); + +disable_vdda18: + ret = regulator_disable(qphy->vdda18); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret); + +unset_vdda18: + ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX); + if (ret) + dev_err(qphy->phy.dev, + "Unable to set (0) voltage for vdda18:%d\n", ret); + +put_vdda18_lpm: + ret = regulator_set_load(qphy->vdda18, 0); + if (ret < 0) + dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n"); + +disable_vdd: + ret = regulator_disable(qphy->vdd); + if (ret) + dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n", + ret); + +unconfig_vdd: + ret = qusb_phy_config_vdd(qphy, false); + if (ret) + dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n", + ret); +err_vdd: + qphy->power_enabled = false; + dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n"); + return ret; +} + +static void qusb_phy_get_tune2_param(struct qusb_phy *qphy) +{ + u8 num_of_bits; + u32 bit_mask = 1; + u8 reg_val; + + pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__, + qphy->tune2_efuse_num_of_bits, + qphy->tune2_efuse_bit_pos); + + /* get bit mask based on number of bits to use with efuse reg */ + if (qphy->tune2_efuse_num_of_bits) { + num_of_bits = qphy->tune2_efuse_num_of_bits; + bit_mask = (bit_mask << num_of_bits) - 1; + } + + /* + * Read EFUSE register having TUNE2 parameter's high nibble. + * If efuse register shows value as 0x0, then use previous value + * as it is. Otherwise use efuse register based value for this purpose. + */ + qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg); + pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n", + __func__, bit_mask, qphy->tune2_val); + + qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val, + qphy->tune2_efuse_bit_pos, bit_mask); + + /* Update higher nibble of TUNE2 value for better rise/fall times */ + if (qphy->tune2_efuse_correction && qphy->tune2_val) { + if (qphy->tune2_efuse_correction > 5 || + qphy->tune2_efuse_correction < -10) + pr_warn("Correction value is out of range : %d\n", + qphy->tune2_efuse_correction); + else + qphy->tune2_val = qphy->tune2_val + + qphy->tune2_efuse_correction; + } + + reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2); + if (qphy->tune2_val) { + reg_val &= 0x0f; + reg_val |= (qphy->tune2_val << 4); + } + + qphy->tune2_val = reg_val; +} + +static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, + unsigned long delay) +{ + int i; + + pr_debug("Seq count:%d\n", cnt); + for (i = 0; i < cnt; i = i+2) { + pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]); + writel_relaxed(seq[i], base + seq[i+1]); + if (delay) + usleep_range(delay, (delay + 2000)); + } +} + +static int qusb_phy_init(struct usb_phy *phy) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + int ret, reset_val = 0; + u8 reg; + bool pll_lock_fail = false; + + dev_dbg(phy->dev, "%s\n", __func__); + + ret = qusb_phy_enable_power(qphy, true); + if (ret) + return ret; + + qusb_phy_enable_clocks(qphy, true); + + /* + * ref clock is enabled by default after power on reset. Linux clock + * driver will disable this clock as part of late init if peripheral + * driver(s) does not explicitly votes for it. Linux clock driver also + * does not disable the clock until late init even if peripheral + * driver explicitly requests it and cannot defer the probe until late + * init. Hence, Explicitly disable the clock using register write to + * allow QUSB PHY PLL to lock properly. + */ + if (qphy->ref_clk_base) { + writel_relaxed((readl_relaxed(qphy->ref_clk_base) & + ~QUSB2PHY_REFCLK_ENABLE), + qphy->ref_clk_base); + /* Make sure that above write complete to get ref clk OFF */ + wmb(); + } + + /* Perform phy reset */ + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__); + usleep_range(100, 150); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__); + + if (qphy->emulation) { + if (qphy->emu_init_seq) + qusb_phy_write_seq(qphy->emu_phy_base, + qphy->emu_init_seq, qphy->emu_init_seq_len, 0); + + if (qphy->qusb_phy_init_seq) + qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq, + qphy->init_seq_len, 0); + + /* Wait for 5ms as per QUSB2 RUMI sequence */ + usleep_range(5000, 7000); + + if (qphy->phy_pll_reset_seq) + qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq, + qphy->phy_pll_reset_seq_len, 10000); + + if (qphy->emu_dcm_reset_seq) + qusb_phy_write_seq(qphy->emu_phy_base, + qphy->emu_dcm_reset_seq, + qphy->emu_dcm_reset_seq_len, 10000); + + return 0; + } + + /* Disable the PHY */ + if (qphy->major_rev < 2) + writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, + qphy->base + QUSB2PHY_PORT_POWERDOWN); + else + writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) | + PWR_CTRL1_POWR_DOWN, + qphy->base + QUSB2PHY_PWR_CTRL1); + + /* configure for ULPI mode if requested */ + if (qphy->ulpi_mode) + writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2); + + /* save reset value to override based on clk scheme */ + if (qphy->ref_clk_base) + reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST); + + if (qphy->qusb_phy_init_seq) + qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq, + qphy->init_seq_len, 0); + + /* + * Check for EFUSE value only if tune2_efuse_reg is available + * and try to read EFUSE value only once i.e. not every USB + * cable connect case. + */ + if (qphy->tune2_efuse_reg && !tune2) { + if (!qphy->tune2_val) + qusb_phy_get_tune2_param(qphy); + + pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__, + qphy->tune2_val); + writel_relaxed(qphy->tune2_val, + qphy->base + QUSB2PHY_PORT_TUNE2); + } + + /* If tune modparam set, override tune value */ + + pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n", + __func__, tune1, tune2, tune3, tune4, tune5); + if (tune1) + writel_relaxed(tune1, + qphy->base + QUSB2PHY_PORT_TUNE1); + + if (tune2) + writel_relaxed(tune2, + qphy->base + QUSB2PHY_PORT_TUNE2); + + if (tune3) + writel_relaxed(tune3, + qphy->base + QUSB2PHY_PORT_TUNE3); + + if (tune4) + writel_relaxed(tune4, + qphy->base + QUSB2PHY_PORT_TUNE4); + + if (tune5) + writel_relaxed(tune5, + qphy->base + QUSB2PHY_PORT_TUNE5); + + /* ensure above writes are completed before re-enabling PHY */ + wmb(); + + /* Enable the PHY */ + if (qphy->major_rev < 2) + writel_relaxed(CLAMP_N_EN | FREEZIO_N, + qphy->base + QUSB2PHY_PORT_POWERDOWN); + else + writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) & + ~PWR_CTRL1_POWR_DOWN, + qphy->base + QUSB2PHY_PWR_CTRL1); + + /* Ensure above write is completed before turning ON ref clk */ + wmb(); + + /* Require to get phy pll lock successfully */ + usleep_range(150, 160); + + /* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */ + if (qphy->ref_clk_base) { + if (!qphy->is_se_clk) { + reset_val &= ~CLK_REF_SEL; + writel_relaxed((readl_relaxed(qphy->ref_clk_base) | + QUSB2PHY_REFCLK_ENABLE), + qphy->ref_clk_base); + } else { + reset_val |= CLK_REF_SEL; + writel_relaxed(reset_val, + qphy->base + QUSB2PHY_PLL_TEST); + } + + /* Make sure above write is completed to get PLL source clock */ + wmb(); + + /* Required to get PHY PLL lock successfully */ + usleep_range(100, 110); + } + + if (qphy->major_rev < 2) { + reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS); + dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg); + if (!(reg & QUSB2PHY_PLL_LOCK)) + pll_lock_fail = true; + } else { + reg = readb_relaxed(qphy->base + + QUSB2PHY_PLL_COMMON_STATUS_ONE); + dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg); + if (!(reg & CORE_READY_STATUS)) + pll_lock_fail = true; + } + + if (pll_lock_fail) { + dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg); + WARN_ON(1); + } + + return 0; +} + +static void qusb_phy_shutdown(struct usb_phy *phy) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + dev_dbg(phy->dev, "%s\n", __func__); + + qusb_phy_enable_clocks(qphy, true); + + /* Disable the PHY */ + if (qphy->major_rev < 2) + writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, + qphy->base + QUSB2PHY_PORT_POWERDOWN); + else + writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) | + PWR_CTRL1_POWR_DOWN, + qphy->base + QUSB2PHY_PWR_CTRL1); + + /* Make sure above write complete before turning off clocks */ + wmb(); + + qusb_phy_enable_clocks(qphy, false); +} +/** + * Performs QUSB2 PHY suspend/resume functionality. + * + * @uphy - usb phy pointer. + * @suspend - to enable suspend or not. 1 - suspend, 0 - resume + * + */ +static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + u32 linestate = 0, intr_mask = 0; + + if (qphy->suspended && suspend) { + dev_dbg(phy->dev, "%s: USB PHY is already suspended\n", + __func__); + return 0; + } + + if (suspend) { + /* Bus suspend case */ + if (qphy->cable_connected || + (qphy->phy.flags & PHY_HOST_MODE)) { + /* Clear all interrupts */ + writel_relaxed(0x00, + qphy->base + QUSB2PHY_PORT_INTR_CTRL); + + linestate = readl_relaxed(qphy->base + + QUSB2PHY_PORT_UTMI_STATUS); + + /* + * D+/D- interrupts are level-triggered, but we are + * only interested if the line state changes, so enable + * the high/low trigger based on current state. In + * other words, enable the triggers _opposite_ of what + * the current D+/D- levels are. + * e.g. if currently D+ high, D- low (HS 'J'/Suspend), + * configure the mask to trigger on D+ low OR D- high + */ + intr_mask = DPSE_INTR_EN | DMSE_INTR_EN; + if (!(linestate & LINESTATE_DP)) /* D+ low */ + intr_mask |= DPSE_INTR_HIGH_SEL; + if (!(linestate & LINESTATE_DM)) /* D- low */ + intr_mask |= DMSE_INTR_HIGH_SEL; + + writel_relaxed(intr_mask, + qphy->base + QUSB2PHY_PORT_INTR_CTRL); + + if (linestate & (LINESTATE_DP | LINESTATE_DM)) { + /* enable phy auto-resume */ + writel_relaxed(0x0C, + qphy->base + QUSB2PHY_PORT_TEST_CTRL); + /* flush the previous write before next write */ + wmb(); + writel_relaxed(0x04, + qphy->base + QUSB2PHY_PORT_TEST_CTRL); + } + + + dev_dbg(phy->dev, "%s: intr_mask = %x\n", + __func__, intr_mask); + + /* Makes sure that above write goes through */ + wmb(); + + qusb_phy_enable_clocks(qphy, false); + } else { /* Disconnect case */ + mutex_lock(&qphy->phy_lock); + /* Disable all interrupts */ + writel_relaxed(0x00, + qphy->base + QUSB2PHY_PORT_INTR_CTRL); + + /* Disable PHY */ + writel_relaxed(POWER_DOWN | + readl_relaxed(qphy->base + + QUSB2PHY_PORT_POWERDOWN), + qphy->base + QUSB2PHY_PORT_POWERDOWN); + /* Make sure that above write is completed */ + wmb(); + + qusb_phy_enable_clocks(qphy, false); + if (qphy->tcsr_clamp_dig_n) + writel_relaxed(0x0, + qphy->tcsr_clamp_dig_n); + /* Do not disable power rails if there is vote for it */ + if (!qphy->dpdm_enable) + qusb_phy_enable_power(qphy, false); + else + dev_dbg(phy->dev, "race with rm_pulldown. Keep ldo ON\n"); + mutex_unlock(&qphy->phy_lock); + + /* + * Set put_into_high_z_state to true so next USB + * cable connect, DPF_DMF request performs PHY + * reset and put it into high-z state. For bootup + * with or without USB cable, it doesn't require + * to put QUSB PHY into high-z state. + */ + qphy->put_into_high_z_state = true; + } + qphy->suspended = true; + } else { + /* Bus suspend case */ + if (qphy->cable_connected || + (qphy->phy.flags & PHY_HOST_MODE)) { + qusb_phy_enable_clocks(qphy, true); + /* Clear all interrupts on resume */ + writel_relaxed(0x00, + qphy->base + QUSB2PHY_PORT_INTR_CTRL); + } else { + qusb_phy_enable_power(qphy, true); + if (qphy->tcsr_clamp_dig_n) + writel_relaxed(0x1, + qphy->tcsr_clamp_dig_n); + qusb_phy_enable_clocks(qphy, true); + } + qphy->suspended = false; + } + + return 0; +} + +static int qusb_phy_notify_connect(struct usb_phy *phy, + enum usb_device_speed speed) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + qphy->cable_connected = true; + + dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n", + qphy->cable_connected); + return 0; +} + +static int qusb_phy_notify_disconnect(struct usb_phy *phy, + enum usb_device_speed speed) +{ + struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); + + qphy->cable_connected = false; + + dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n", + qphy->cable_connected); + return 0; +} + +static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev) +{ + int ret = 0; + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n", + __func__, qphy->dpdm_enable); + + mutex_lock(&qphy->phy_lock); + if (!qphy->dpdm_enable) { + ret = qusb_phy_enable_power(qphy, true); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator enable failed:%d\n", ret); + mutex_unlock(&qphy->phy_lock); + return ret; + } + qphy->dpdm_enable = true; + if (qphy->put_into_high_z_state) { + if (qphy->tcsr_clamp_dig_n) + writel_relaxed(0x1, + qphy->tcsr_clamp_dig_n); + + qusb_phy_gdsc(qphy, true); + qusb_phy_enable_clocks(qphy, true); + + dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n"); + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "phyassert failed\n"); + usleep_range(100, 150); + ret = reset_control_deassert(qphy->phy_reset); + if (ret) + dev_err(qphy->phy.dev, "deassert failed\n"); + + /* + * Phy in non-driving mode leaves Dp and Dm + * lines in high-Z state. Controller power + * collapse is not switching phy to non-driving + * mode causing charger detection failure. Bring + * phy to non-driving mode by overriding + * controller output via UTMI interface. + */ + writel_relaxed(TERM_SELECT | XCVR_SELECT_FS | + OP_MODE_NON_DRIVE, + qphy->base + QUSB2PHY_PORT_UTMI_CTRL1); + writel_relaxed(UTMI_ULPI_SEL | + UTMI_TEST_MUX_SEL, + qphy->base + QUSB2PHY_PORT_UTMI_CTRL2); + + + /* Disable PHY */ + writel_relaxed(CLAMP_N_EN | FREEZIO_N | + POWER_DOWN, + qphy->base + QUSB2PHY_PORT_POWERDOWN); + /* Make sure that above write is completed */ + wmb(); + + qusb_phy_enable_clocks(qphy, false); + qusb_phy_gdsc(qphy, false); + } + } + mutex_unlock(&qphy->phy_lock); + + return ret; +} + +static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev) +{ + int ret = 0; + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n", + __func__, qphy->dpdm_enable); + + mutex_lock(&qphy->phy_lock); + if (qphy->dpdm_enable) { + if (!qphy->cable_connected) { + if (qphy->tcsr_clamp_dig_n) + writel_relaxed(0x0, + qphy->tcsr_clamp_dig_n); + dev_dbg(qphy->phy.dev, "turn off for HVDCP case\n"); + ret = qusb_phy_enable_power(qphy, false); + if (ret < 0) { + dev_dbg(qphy->phy.dev, + "dpdm regulator disable failed:%d\n", + ret); + mutex_unlock(&qphy->phy_lock); + return ret; + } + } + qphy->dpdm_enable = false; + } + mutex_unlock(&qphy->phy_lock); + + return ret; +} + +static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct qusb_phy *qphy = rdev_get_drvdata(rdev); + + dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__, + qphy->dpdm_enable); + return qphy->dpdm_enable; +} + +static struct regulator_ops qusb_phy_dpdm_regulator_ops = { + .enable = qusb_phy_dpdm_regulator_enable, + .disable = qusb_phy_dpdm_regulator_disable, + .is_enabled = qusb_phy_dpdm_regulator_is_enabled, +}; + +static int qusb_phy_regulator_init(struct qusb_phy *qphy) +{ + struct device *dev = qphy->phy.dev; + struct regulator_config cfg = {}; + struct regulator_init_data *init_data; + + init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL); + if (!init_data) + return -ENOMEM; + + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; + qphy->dpdm_rdesc.owner = THIS_MODULE; + qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE; + qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops; + qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.driver_data = qphy; + cfg.of_node = dev->of_node; + + qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg); + if (IS_ERR(qphy->dpdm_rdev)) + return PTR_ERR(qphy->dpdm_rdev); + + return 0; +} + +static int qusb_phy_probe(struct platform_device *pdev) +{ + struct qusb_phy *qphy; + struct device *dev = &pdev->dev; + struct resource *res; + int ret = 0, size = 0; + const char *phy_type; + bool hold_phy_reset; + + qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); + if (!qphy) + return -ENOMEM; + + qphy->phy.dev = dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "qusb_phy_base"); + qphy->base = devm_ioremap_resource(dev, res); + if (IS_ERR(qphy->base)) + return PTR_ERR(qphy->base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "emu_phy_base"); + if (res) { + qphy->emu_phy_base = devm_ioremap_resource(dev, res); + if (IS_ERR(qphy->emu_phy_base)) { + dev_dbg(dev, "couldn't ioremap emu_phy_base\n"); + qphy->emu_phy_base = NULL; + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "tune2_efuse_addr"); + if (res) { + qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start, + resource_size(res)); + if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) { + ret = of_property_read_u32(dev->of_node, + "qcom,tune2-efuse-bit-pos", + &qphy->tune2_efuse_bit_pos); + if (!ret) { + ret = of_property_read_u32(dev->of_node, + "qcom,tune2-efuse-num-bits", + &qphy->tune2_efuse_num_of_bits); + } + of_property_read_u32(dev->of_node, + "qcom,tune2-efuse-correction", + &qphy->tune2_efuse_correction); + + if (ret) { + dev_err(dev, "DT Value for tune2 efuse is invalid.\n"); + return -EINVAL; + } + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ref_clk_addr"); + if (res) { + qphy->ref_clk_base = devm_ioremap_nocache(dev, + res->start, resource_size(res)); + if (IS_ERR(qphy->ref_clk_base)) { + dev_dbg(dev, "ref_clk_address is not available.\n"); + return PTR_ERR(qphy->ref_clk_base); + } + + ret = of_property_read_string(dev->of_node, + "qcom,phy-clk-scheme", &phy_type); + if (ret) { + dev_err(dev, "error need qsub_phy_clk_scheme.\n"); + return ret; + } + + if (!strcasecmp(phy_type, "cml")) { + qphy->is_se_clk = false; + } else if (!strcasecmp(phy_type, "cmos")) { + qphy->is_se_clk = true; + } else { + dev_err(dev, "erro invalid qusb_phy_clk_scheme\n"); + return -EINVAL; + } + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "tcsr_clamp_dig_n_1p8"); + if (res) { + qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev, + res->start, resource_size(res)); + if (IS_ERR(qphy->tcsr_clamp_dig_n)) { + dev_err(dev, "err reading tcsr_clamp_dig_n\n"); + qphy->tcsr_clamp_dig_n = NULL; + } + } + + qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src"); + if (IS_ERR(qphy->ref_clk_src)) + dev_dbg(dev, "clk get failed for ref_clk_src\n"); + + qphy->ref_clk = devm_clk_get(dev, "ref_clk"); + if (IS_ERR(qphy->ref_clk)) + dev_dbg(dev, "clk get failed for ref_clk\n"); + else + clk_set_rate(qphy->ref_clk, 19200000); + + qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk"); + if (IS_ERR(qphy->cfg_ahb_clk)) + return PTR_ERR(qphy->cfg_ahb_clk); + + qphy->phy_reset = devm_reset_control_get(dev, "phy_reset"); + if (IS_ERR(qphy->phy_reset)) + return PTR_ERR(qphy->phy_reset); + + if (of_property_match_string(dev->of_node, + "clock-names", "iface_clk") >= 0) { + qphy->iface_clk = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(qphy->iface_clk)) { + ret = PTR_ERR(qphy->iface_clk); + qphy->iface_clk = NULL; + if (ret == -EPROBE_DEFER) + return ret; + dev_err(dev, "couldn't get iface_clk(%d)\n", ret); + } + } + + if (of_property_match_string(dev->of_node, + "clock-names", "core_clk") >= 0) { + qphy->core_clk = devm_clk_get(dev, "core_clk"); + if (IS_ERR(qphy->core_clk)) { + ret = PTR_ERR(qphy->core_clk); + qphy->core_clk = NULL; + if (ret == -EPROBE_DEFER) + return ret; + dev_err(dev, "couldn't get core_clk(%d)\n", ret); + } + } + + qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC"); + if (IS_ERR(qphy->gdsc)) + qphy->gdsc = NULL; + + qphy->emulation = of_property_read_bool(dev->of_node, + "qcom,emulation"); + + of_get_property(dev->of_node, "qcom,emu-init-seq", &size); + if (size) { + qphy->emu_init_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->emu_init_seq) { + qphy->emu_init_seq_len = + (size / sizeof(*qphy->emu_init_seq)); + if (qphy->emu_init_seq_len % 2) { + dev_err(dev, "invalid emu_init_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-init-seq", + qphy->emu_init_seq, + qphy->emu_init_seq_len); + } else { + dev_dbg(dev, "error allocating memory for emu_init_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size); + if (size) { + qphy->phy_pll_reset_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->phy_pll_reset_seq) { + qphy->phy_pll_reset_seq_len = + (size / sizeof(*qphy->phy_pll_reset_seq)); + if (qphy->phy_pll_reset_seq_len % 2) { + dev_err(dev, "invalid phy_pll_reset_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,phy-pll-reset-seq", + qphy->phy_pll_reset_seq, + qphy->phy_pll_reset_seq_len); + } else { + dev_dbg(dev, "error allocating memory for phy_pll_reset_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size); + if (size) { + qphy->emu_dcm_reset_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->emu_dcm_reset_seq) { + qphy->emu_dcm_reset_seq_len = + (size / sizeof(*qphy->emu_dcm_reset_seq)); + if (qphy->emu_dcm_reset_seq_len % 2) { + dev_err(dev, "invalid emu_dcm_reset_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-dcm-reset-seq", + qphy->emu_dcm_reset_seq, + qphy->emu_dcm_reset_seq_len); + } else { + dev_dbg(dev, "error allocating memory for emu_dcm_reset_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size); + if (size) { + qphy->qusb_phy_init_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (qphy->qusb_phy_init_seq) { + qphy->init_seq_len = + (size / sizeof(*qphy->qusb_phy_init_seq)); + if (qphy->init_seq_len % 2) { + dev_err(dev, "invalid init_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,qusb-phy-init-seq", + qphy->qusb_phy_init_seq, + qphy->init_seq_len); + } else { + dev_err(dev, "error allocating memory for phy_init_seq\n"); + } + } + + qphy->ulpi_mode = false; + ret = of_property_read_string(dev->of_node, "phy_type", &phy_type); + + if (!ret) { + if (!strcasecmp(phy_type, "ulpi")) + qphy->ulpi_mode = true; + } else { + dev_err(dev, "error reading phy_type property\n"); + return ret; + } + + hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset"); + + /* use default major revision as 2 */ + qphy->major_rev = 2; + ret = of_property_read_u32(dev->of_node, "qcom,major-rev", + &qphy->major_rev); + + ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", + (u32 *) qphy->vdd_levels, + ARRAY_SIZE(qphy->vdd_levels)); + if (ret) { + dev_err(dev, "error reading qcom,vdd-voltage-level property\n"); + return ret; + } + + qphy->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(qphy->vdd)) { + dev_err(dev, "unable to get vdd supply\n"); + return PTR_ERR(qphy->vdd); + } + + qphy->vdda33 = devm_regulator_get(dev, "vdda33"); + if (IS_ERR(qphy->vdda33)) { + dev_err(dev, "unable to get vdda33 supply\n"); + return PTR_ERR(qphy->vdda33); + } + + qphy->vdda18 = devm_regulator_get(dev, "vdda18"); + if (IS_ERR(qphy->vdda18)) { + dev_err(dev, "unable to get vdda18 supply\n"); + return PTR_ERR(qphy->vdda18); + } + + mutex_init(&qphy->phy_lock); + platform_set_drvdata(pdev, qphy); + + qphy->phy.label = "msm-qusb-phy"; + qphy->phy.init = qusb_phy_init; + qphy->phy.set_suspend = qusb_phy_set_suspend; + qphy->phy.shutdown = qusb_phy_shutdown; + qphy->phy.type = USB_PHY_TYPE_USB2; + qphy->phy.notify_connect = qusb_phy_notify_connect; + qphy->phy.notify_disconnect = qusb_phy_notify_disconnect; + + /* + * On some platforms multiple QUSB PHYs are available. If QUSB PHY is + * not used, there is leakage current seen with QUSB PHY related voltage + * rail. Hence keep QUSB PHY into reset state explicitly here. + */ + if (hold_phy_reset) { + ret = reset_control_assert(qphy->phy_reset); + if (ret) + dev_err(dev, "%s:phy_reset assert failed\n", __func__); + } + + ret = usb_add_phy_dev(&qphy->phy); + if (ret) + return ret; + + ret = qusb_phy_regulator_init(qphy); + if (ret) + usb_remove_phy(&qphy->phy); + + /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */ + if (qphy->tcsr_clamp_dig_n) + writel_relaxed(0x0, qphy->tcsr_clamp_dig_n); + + return ret; +} + +static int qusb_phy_remove(struct platform_device *pdev) +{ + struct qusb_phy *qphy = platform_get_drvdata(pdev); + + usb_remove_phy(&qphy->phy); + + if (qphy->clocks_enabled) { + clk_disable_unprepare(qphy->cfg_ahb_clk); + clk_disable_unprepare(qphy->ref_clk); + clk_disable_unprepare(qphy->ref_clk_src); + qphy->clocks_enabled = false; + } + + qusb_phy_enable_power(qphy, false); + + return 0; +} + +static const struct of_device_id qusb_phy_id_table[] = { + { .compatible = "qcom,qusb2phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qusb_phy_id_table); + +static struct platform_driver qusb_phy_driver = { + .probe = qusb_phy_probe, + .remove = qusb_phy_remove, + .driver = { + .name = "msm-qusb-phy", + .of_match_table = of_match_ptr(qusb_phy_id_table), + }, +}; + +module_platform_driver(qusb_phy_driver); + +MODULE_DESCRIPTION("MSM QUSB2 PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c index a3624a15b1259a72953dd212dca9941416f2fede..3b67f831c340ce7f5ae255d0f876329adc93e97c 100644 --- a/drivers/usb/phy/phy-msm-snps-hs.c +++ b/drivers/usb/phy/phy-msm-snps-hs.c @@ -682,9 +682,6 @@ static int msm_hsphy_remove(struct platform_device *pdev) msm_hsphy_enable_clocks(phy, false); msm_hsphy_enable_power(phy, false); - - kfree(phy); - return 0; } diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 9321891e4533825e3531ea05668c8c5e6052d3da..b57ad1e25f1c2377d6f858cdcbed601b69ec5b0f 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -1091,7 +1091,6 @@ static int msm_ssphy_qmp_remove(struct platform_device *pdev) usb_remove_phy(&phy->phy); msm_ssphy_qmp_enable_clks(phy, false); msm_ssusb_qmp_ldo_enable(phy, 0); - kfree(phy); return 0; } diff --git a/drivers/usb/phy/phy-msm-ssusb.c b/drivers/usb/phy/phy-msm-ssusb.c index 91217e4092f7be0581bdc7b9439e3f3b2788af5a..d54cef45d87913be79bfe091a4a58e779347401d 100644 --- a/drivers/usb/phy/phy-msm-ssusb.c +++ b/drivers/usb/phy/phy-msm-ssusb.c @@ -59,15 +59,16 @@ MODULE_PARM_DESC(ss_phy_override_deemphasis, "Override SSPHY demphasis value"); struct msm_ssphy { struct usb_phy phy; void __iomem *base; - struct clk *core_clk; /* USB3 master clock */ + struct clk *ref_clk; struct reset_control *phy_com_reset; struct reset_control *phy_reset; struct regulator *vdd; struct regulator *vdda18; - atomic_t active_count; /* num of active instances */ bool suspended; int vdd_levels[3]; /* none, low, high */ int deemphasis_val; + + int power_enabled; }; static int msm_ssusb_config_vdd(struct msm_ssphy *phy, int high) @@ -93,47 +94,80 @@ static int msm_ssusb_ldo_enable(struct msm_ssphy *phy, int on) dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM"); + if (phy->power_enabled == on) { + dev_dbg(phy->phy.dev, "LDOs are already %s\n", + on ? "ON" : "OFF"); + return 0; + } + if (!on) goto disable_regulators; + rc = msm_ssusb_config_vdd(phy, 1); + if (rc) { + dev_err(phy->phy.dev, "Unable to config vdd: %d\n", rc); + return rc; + } + + rc = regulator_enable(phy->vdd); + if (rc) { + dev_err(phy->phy.dev, "Unable to enable vdd: %d\n", rc); + goto unset_vdd; + } rc = regulator_set_load(phy->vdda18, USB_SSPHY_1P8_HPM_LOAD); if (rc < 0) { - dev_err(phy->phy.dev, "Unable to set HPM of vdda18\n"); - return rc; + dev_err(phy->phy.dev, "Unable to set HPM of vdda18: %d\n", rc); + goto disable_vdd; } rc = regulator_set_voltage(phy->vdda18, USB_SSPHY_1P8_VOL_MIN, USB_SSPHY_1P8_VOL_MAX); if (rc) { - dev_err(phy->phy.dev, "unable to set voltage for vdda18\n"); + dev_err(phy->phy.dev, "unable to set voltage for vdda18: %d\n", + rc); goto put_vdda18_lpm; } rc = regulator_enable(phy->vdda18); if (rc) { - dev_err(phy->phy.dev, "Unable to enable vdda18\n"); + dev_err(phy->phy.dev, "Unable to enable vdda18: %d\n", rc); goto unset_vdda18; } + phy->power_enabled = 1; + return 0; disable_regulators: rc = regulator_disable(phy->vdda18); if (rc) - dev_err(phy->phy.dev, "Unable to disable vdda18\n"); + dev_err(phy->phy.dev, "Unable to disable vdda18: %d\n", rc); unset_vdda18: rc = regulator_set_voltage(phy->vdda18, 0, USB_SSPHY_1P8_VOL_MAX); if (rc) - dev_err(phy->phy.dev, "unable to set voltage for vdda18\n"); + dev_err(phy->phy.dev, "unable to set min voltage for vdda18: %d\n", + rc); put_vdda18_lpm: rc = regulator_set_load(phy->vdda18, 0); if (rc < 0) - dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n"); + dev_err(phy->phy.dev, "Unable to set LPM of vdda18: %d\n", rc); + +disable_vdd: + rc = regulator_disable(phy->vdd); + if (rc) + dev_err(phy->phy.dev, "Unable to disable vdd: %d\n", rc); +unset_vdd: + rc = msm_ssusb_config_vdd(phy, 0); + if (rc) + dev_err(phy->phy.dev, "unable to set min voltage for vdd: %d\n", + rc); + + phy->power_enabled = 0; - return rc < 0 ? rc : 0; + return rc; } static void msm_usb_write_readback(void *base, u32 offset, @@ -285,8 +319,9 @@ static int msm_ssphy_init(struct usb_phy *uphy) struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy); u32 val; - /* Ensure clock is on before accessing QSCRATCH registers */ - clk_prepare_enable(phy->core_clk); + msm_ssusb_ldo_enable(phy, 1); + + clk_prepare_enable(phy->ref_clk); /* read initial value */ val = readl_relaxed(phy->base + SS_PHY_CTRL_REG); @@ -320,8 +355,6 @@ static int msm_ssphy_init(struct usb_phy *uphy) */ msm_ssphy_set_params(uphy); - clk_disable_unprepare(phy->core_clk); - return 0; } @@ -329,24 +362,17 @@ static int msm_ssphy_set_suspend(struct usb_phy *uphy, int suspend) { struct msm_ssphy *phy = container_of(uphy, struct msm_ssphy, phy); void __iomem *base = phy->base; - int count; - /* Ensure clock is on before accessing QSCRATCH registers */ - clk_prepare_enable(phy->core_clk); + dev_dbg(uphy->dev, "%s: phy->suspended:%d suspend:%d", __func__, + phy->suspended, suspend); - if (suspend) { - count = atomic_dec_return(&phy->active_count); - if (count > 0 || phy->suspended) { - dev_dbg(uphy->dev, "Skipping suspend, active_count=%d phy->suspended=%d\n", - count, phy->suspended); - goto done; - } + if (phy->suspended == suspend) { + dev_dbg(uphy->dev, "PHY is already %s\n", + suspend ? "suspended" : "resumed"); + return 0; + } - if (count < 0) { - dev_WARN(uphy->dev, "Suspended too many times! active_count=%d\n", - count); - atomic_set(&phy->active_count, 0); - } + if (suspend) { /* Clear REF_SS_PHY_EN */ msm_usb_write_readback(base, SS_PHY_CTRL_REG, REF_SS_PHY_EN, 0); @@ -362,24 +388,17 @@ static int msm_ssphy_set_suspend(struct usb_phy *uphy, int suspend) reset_control_assert(phy->phy_reset); } + clk_disable_unprepare(phy->ref_clk); msm_ssusb_ldo_enable(phy, 0); - msm_ssusb_config_vdd(phy, 0); phy->suspended = true; } else { - count = atomic_inc_return(&phy->active_count); - if (count > 1 || !phy->suspended) { - dev_dbg(uphy->dev, "Skipping resume, active_count=%d phy->suspended=%d\n", - count, phy->suspended); - goto done; - } - phy->suspended = false; - msm_ssusb_config_vdd(phy, 1); msm_ssusb_ldo_enable(phy, 1); + clk_prepare_enable(phy->ref_clk); if (phy->phy.flags & ENABLE_SECONDARY_PHY) { dev_err(uphy->dev, "secondary PHY, skipping reset\n"); - goto done; + return 0; } if (phy->phy_com_reset) { @@ -405,10 +424,10 @@ static int msm_ssphy_set_suspend(struct usb_phy *uphy, int suspend) msm_usb_write_readback(base, SS_PHY_CTRL_REG, SS_PHY_RESET, 0); } + + phy->suspended = false; } -done: - clk_disable_unprepare(phy->core_clk); return 0; } @@ -467,10 +486,10 @@ static int msm_ssphy_probe(struct platform_device *pdev) return -ENODEV; } - phy->core_clk = devm_clk_get(dev, "core_clk"); - if (IS_ERR(phy->core_clk)) { - dev_err(dev, "unable to get core_clk\n"); - return PTR_ERR(phy->core_clk); + phy->ref_clk = devm_clk_get(dev, "ref_clk"); + if (IS_ERR(phy->ref_clk)) { + dev_err(dev, "unable to get ref_clk\n"); + return PTR_ERR(phy->ref_clk); } phy->phy_com_reset = devm_reset_control_get(dev, "phy_com_reset"); @@ -513,24 +532,6 @@ static int msm_ssphy_probe(struct platform_device *pdev) return PTR_ERR(phy->vdda18); } - ret = msm_ssusb_config_vdd(phy, 1); - if (ret) { - dev_err(dev, "ssusb vdd_dig configuration failed\n"); - return ret; - } - - ret = regulator_enable(phy->vdd); - if (ret) { - dev_err(dev, "unable to enable the ssusb vdd_dig\n"); - goto unconfig_ss_vdd; - } - - ret = msm_ssusb_ldo_enable(phy, 1); - if (ret) { - dev_err(dev, "ssusb vreg enable failed\n"); - goto disable_ss_vdd; - } - platform_set_drvdata(pdev, phy); if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override")) @@ -548,18 +549,9 @@ static int msm_ssphy_probe(struct platform_device *pdev) ret = usb_add_phy_dev(&phy->phy); if (ret) - goto disable_ss_ldo; + return ret; return 0; - -disable_ss_ldo: - msm_ssusb_ldo_enable(phy, 0); -disable_ss_vdd: - regulator_disable(phy->vdd); -unconfig_ss_vdd: - msm_ssusb_config_vdd(phy, 0); - - return ret; } static int msm_ssphy_remove(struct platform_device *pdev) @@ -569,10 +561,9 @@ static int msm_ssphy_remove(struct platform_device *pdev) if (!phy) return 0; + msm_ssphy_set_suspend(&phy->phy, 0); usb_remove_phy(&phy->phy); - msm_ssusb_ldo_enable(phy, 0); - regulator_disable(phy->vdd); - msm_ssusb_config_vdd(phy, 0); + msm_ssphy_set_suspend(&phy->phy, 1); kfree(phy); return 0; diff --git a/drivers/usb/phy/phy-qcom-snps-28nm-hs.c b/drivers/usb/phy/phy-qcom-snps-28nm-hs.c new file mode 100644 index 0000000000000000000000000000000000000000..f6540313a9a7faec8e5c10dced0773005ae46237 --- /dev/null +++ b/drivers/usb/phy/phy-qcom-snps-28nm-hs.c @@ -0,0 +1,660 @@ +/* Copyright (c) 2009-2018, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MSM_USB_PHY_CSR_BASE phy->phy_csr_regs +#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */ +#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */ +#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */ + +#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */ +#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */ +#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */ + +#define USB_HSPHY_MAX_REGULATORS 3 + +enum regulators { + MSM_HSPHY_DVDD_REGULATOR, + MSM_HSPHY_1P8_REGULATOR, + MSM_HSPHY_3P3_REGULATOR, + MSM_HSPHY_MAX_REGULATORS = USB_HSPHY_MAX_REGULATORS +}; + +struct msm_snps_hsphy { + struct usb_phy phy; + void __iomem *phy_csr_regs; + + struct clk *phy_csr_clk; + struct clk *ref_clk; + struct reset_control *phy_reset; + struct reset_control *phy_por_reset; + + bool dpdm_enable; + struct regulator_dev *dpdm_rdev; + struct regulator_desc dpdm_rdesc; + + int *phy_init_seq; + + bool suspended; + bool cable_connected; + bool clocks_enabled; + + int voltage_levels[USB_HSPHY_MAX_REGULATORS][3]; + struct regulator_bulk_data regulator[USB_HSPHY_MAX_REGULATORS]; + + struct mutex phy_lock; +}; + +static char *override_phy_init; +module_param(override_phy_init, charp, 0644); +MODULE_PARM_DESC(override_phy_init, + "Override SNPS HS PHY Init Settings"); + +struct hsphy_reg_val { + u32 offset; + u32 val; + u32 delay; +}; + +static void msm_snps_hsphy_disable_clocks(struct msm_snps_hsphy *phy) +{ + dev_dbg(phy->phy.dev, "%s: clocks_enabled:%d\n", + __func__, phy->clocks_enabled); + + if (!phy->clocks_enabled) + return; + + clk_disable_unprepare(phy->phy_csr_clk); + clk_disable_unprepare(phy->ref_clk); + + phy->clocks_enabled = false; +} + +static void msm_snps_hsphy_enable_clocks(struct msm_snps_hsphy *phy) +{ + dev_dbg(phy->phy.dev, "%s: clocks_enabled:%d\n", + __func__, phy->clocks_enabled); + + if (phy->clocks_enabled) + return; + + clk_prepare_enable(phy->ref_clk); + clk_prepare_enable(phy->phy_csr_clk); + + phy->clocks_enabled = true; +} + +static int msm_snps_hsphy_config_regulators(struct msm_snps_hsphy *phy, + int high) +{ + int min, ret, i; + + min = high ? 1 : 0; /* low or none? */ + + for (i = 0; i < USB_HSPHY_MAX_REGULATORS; i++) { + ret = regulator_set_voltage(phy->regulator[i].consumer, + phy->voltage_levels[i][min], + phy->voltage_levels[i][2]); + if (ret) { + dev_err(phy->phy.dev, "%s: unable to set voltage for hsusb %s regulator\n", + __func__, phy->regulator[i].supply); + return ret; + } + dev_dbg(phy->phy.dev, "%s: min_vol:%d max_vol:%d\n", + phy->regulator[i].supply, + phy->voltage_levels[i][min], + phy->voltage_levels[i][2]); + } + + return 0; +} +static int msm_snps_hsphy_disable_regulators(struct msm_snps_hsphy *phy) +{ + int ret = 0; + + dev_dbg(phy->phy.dev, "%s turn off regulators\n", __func__); + + mutex_lock(&phy->phy_lock); + ret = regulator_bulk_disable(USB_HSPHY_MAX_REGULATORS, phy->regulator); + + ret = regulator_set_load( + phy->regulator[MSM_HSPHY_1P8_REGULATOR].consumer, 0); + if (ret) + dev_err(phy->phy.dev, "Unable to set (0) HPM for vdda18\n"); + + ret = regulator_set_load( + phy->regulator[MSM_HSPHY_3P3_REGULATOR].consumer, 0); + if (ret) + dev_err(phy->phy.dev, "Unable to set (0) HPM for vdda33\n"); + + ret = msm_snps_hsphy_config_regulators(phy, 0); + + mutex_unlock(&phy->phy_lock); + + return ret; +} + +static int msm_snps_hsphy_enable_regulators(struct msm_snps_hsphy *phy) +{ + int ret = 0; + + dev_dbg(phy->phy.dev, "%s turn on regulators.\n", __func__); + + mutex_lock(&phy->phy_lock); + ret = msm_snps_hsphy_config_regulators(phy, 1); + if (ret) { + mutex_unlock(&phy->phy_lock); + return ret; + } + + ret = regulator_set_load( + phy->regulator[MSM_HSPHY_1P8_REGULATOR].consumer, + USB_HSPHY_1P8_HPM_LOAD); + if (ret < 0) { + dev_err(phy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret); + goto unconfig_regulators; + } + + ret = regulator_set_load( + phy->regulator[MSM_HSPHY_3P3_REGULATOR].consumer, + USB_HSPHY_3P3_HPM_LOAD); + if (ret < 0) { + dev_err(phy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret); + goto unset_1p8_load; + } + + ret = regulator_bulk_enable(USB_HSPHY_MAX_REGULATORS, phy->regulator); + if (ret) + goto unset_3p3_load; + + mutex_unlock(&phy->phy_lock); + + dev_dbg(phy->phy.dev, "%s(): HSUSB PHY's regulators are turned ON.\n", + __func__); + return 0; + +unset_3p3_load: + regulator_set_load(phy->regulator[MSM_HSPHY_3P3_REGULATOR].consumer, 0); +unset_1p8_load: + regulator_set_load(phy->regulator[MSM_HSPHY_1P8_REGULATOR].consumer, 0); +unconfig_regulators: + msm_snps_hsphy_config_regulators(phy, 0); + mutex_unlock(&phy->phy_lock); + + return ret; +} + +static void msm_snps_hsphy_enter_retention(struct msm_snps_hsphy *phy) +{ + u32 val; + + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0); + val |= SIDDQ; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0); + + dev_dbg(phy->phy.dev, "PHY is in retention"); +} + +static void msm_snps_hsphy_exit_retention(struct msm_snps_hsphy *phy) +{ + u32 val; + + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0); + val &= ~SIDDQ; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0); + + dev_dbg(phy->phy.dev, "PHY is out of retention"); +} + +static int msm_snps_phy_block_reset(struct msm_snps_hsphy *phy) +{ + int ret; + + msm_snps_hsphy_disable_clocks(phy); + + ret = reset_control_assert(phy->phy_reset); + if (ret) { + dev_err(phy->phy.dev, "phy_reset_clk assert failed %d\n", + ret); + return ret; + } + + usleep_range(10, 15); + + ret = reset_control_deassert(phy->phy_reset); + if (ret) { + dev_err(phy->phy.dev, "phy_reset_clk deassert failed %d\n", + ret); + return ret; + } + + usleep_range(80, 100); + + msm_snps_hsphy_enable_clocks(phy); + + return 0; +} + +static void msm_snps_hsphy_por(struct msm_snps_hsphy *phy) +{ + struct hsphy_reg_val *reg = NULL; + u32 aseq[20]; + u32 *seq, tmp; + + if (override_phy_init) { + dev_dbg(phy->phy.dev, "Override HS PHY Init:%s\n", + override_phy_init); + get_options(override_phy_init, ARRAY_SIZE(aseq), aseq); + seq = &aseq[1]; + } else { + seq = phy->phy_init_seq; + } + + reg = (struct hsphy_reg_val *)seq; + if (!reg) + return; + + while (reg->offset != -1) { + writeb_relaxed(reg->val, + phy->phy_csr_regs + reg->offset); + + tmp = readb_relaxed(phy->phy_csr_regs + reg->offset); + if (tmp != reg->val) + dev_err(phy->phy.dev, "write:%x to: %x failed\n", + reg->val, reg->offset); + if (reg->delay) + usleep_range(reg->delay, reg->delay + 10); + reg++; + } + + /* Ensure that the above parameter overrides is successful. */ + mb(); +} + +static int msm_snps_hsphy_reset(struct msm_snps_hsphy *phy) +{ + int ret; + + ret = reset_control_assert(phy->phy_por_reset); + if (ret) { + dev_err(phy->phy.dev, "phy_por_clk assert failed %d\n", ret); + return ret; + } + /* + * The Femto PHY is POR reset in the following scenarios. + * + * 1. After overriding the parameter registers. + * 2. Low power mode exit from PHY retention. + * + * Ensure that SIDDQ is cleared before bringing the PHY + * out of reset. + * + */ + msm_snps_hsphy_exit_retention(phy); + + /* + * As per databook, 10 usec delay is required between + * PHY POR assert and de-assert. + */ + usleep_range(10, 20); + ret = reset_control_deassert(phy->phy_por_reset); + if (ret) { + pr_err("phy_por_clk de-assert failed %d\n", ret); + return ret; + } + /* + * As per databook, it takes 75 usec for PHY to stabilize + * after the reset. + */ + usleep_range(80, 100); + + /* Ensure that RESET operation is completed. */ + mb(); + + return 0; +} + +static int msm_snps_hsphy_init(struct usb_phy *uphy) +{ + struct msm_snps_hsphy *phy = + container_of(uphy, struct msm_snps_hsphy, phy); + int ret; + + dev_dbg(phy->phy.dev, "%s: Initialize HS PHY\n", __func__); + ret = msm_snps_hsphy_enable_regulators(phy); + if (ret) + return ret; + + ret = msm_snps_phy_block_reset(phy); + if (ret) + return ret; + + msm_snps_hsphy_por(phy); + + ret = msm_snps_hsphy_reset(phy); + + return ret; +} + +static void msm_snps_hsphy_enable_hv_interrupts(struct msm_snps_hsphy *phy) +{ + u32 val; + + dev_dbg(phy->phy.dev, "%s\n", __func__); + val = readl_relaxed(USB_PHY_CSR_PHY_CTRL3); + val |= CLAMP_MPM_DPSE_DMSE_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3); +} + +static void msm_snps_hsphy_disable_hv_interrupts(struct msm_snps_hsphy *phy) +{ + u32 val; + + dev_dbg(phy->phy.dev, "%s\n", __func__); + val = readl_relaxed(USB_PHY_CSR_PHY_CTRL3); + val &= ~CLAMP_MPM_DPSE_DMSE_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3); +} + +static int msm_snps_hsphy_set_suspend(struct usb_phy *uphy, int suspend) +{ + struct msm_snps_hsphy *phy = container_of(uphy, + struct msm_snps_hsphy, phy); + + dev_dbg(phy->phy.dev, "%s: suspend:%d with phy->suspended:%d\n", + __func__, suspend, phy->suspended); + if (phy->suspended == suspend) { + dev_info(phy->phy.dev, "PHY is already suspended\n"); + return 0; + } + + if (suspend) { + if (phy->cable_connected) { + msm_snps_hsphy_enable_hv_interrupts(phy); + msm_snps_hsphy_disable_clocks(phy); + } else { + msm_snps_hsphy_enter_retention(phy); + msm_snps_hsphy_disable_clocks(phy); + msm_snps_hsphy_disable_regulators(phy); + } + + phy->suspended = true; + } else { + if (phy->cable_connected) { + msm_snps_hsphy_enable_clocks(phy); + msm_snps_hsphy_disable_hv_interrupts(phy); + } else { + msm_snps_hsphy_enable_regulators(phy); + msm_snps_hsphy_enable_clocks(phy); + msm_snps_hsphy_exit_retention(phy); + } + + phy->suspended = false; + } + + return 0; +} + +static int msm_snps_dpdm_regulator_enable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_snps_hsphy *phy = rdev_get_drvdata(rdev); + + if (phy->dpdm_enable) { + dev_dbg(phy->phy.dev, "%s: DP DM regulator already enabled\n", + __func__); + return 0; + } + + msm_snps_hsphy_enable_regulators(phy); + phy->dpdm_enable = true; + + return ret; +} + +static int msm_snps_dpdm_regulator_disable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_snps_hsphy *phy = rdev_get_drvdata(rdev); + + if (!phy->dpdm_enable) { + dev_dbg(phy->phy.dev, "%s: DP DM regulator already enabled\n", + __func__); + return 0; + } + + msm_snps_hsphy_disable_regulators(phy); + phy->dpdm_enable = false; + + return ret; +} + +static int msm_snps_dpdm_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct msm_snps_hsphy *phy = rdev_get_drvdata(rdev); + + return phy->dpdm_enable; +} + +static struct regulator_ops msm_snps_dpdm_regulator_ops = { + .enable = msm_snps_dpdm_regulator_enable, + .disable = msm_snps_dpdm_regulator_disable, + .is_enabled = msm_snps_dpdm_regulator_is_enabled, +}; + +static int msm_snps_dpdm_regulator_register(struct msm_snps_hsphy *phy) +{ + struct device *dev = phy->phy.dev; + struct regulator_config cfg = {}; + struct regulator_init_data *init_data; + + init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL); + if (!init_data) + return -ENOMEM; + + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; + phy->dpdm_rdesc.owner = THIS_MODULE; + phy->dpdm_rdesc.type = REGULATOR_VOLTAGE; + phy->dpdm_rdesc.ops = &msm_snps_dpdm_regulator_ops; + phy->dpdm_rdesc.name = kbasename(dev->of_node->full_name); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.driver_data = phy; + cfg.of_node = dev->of_node; + + phy->dpdm_rdev = devm_regulator_register(dev, &phy->dpdm_rdesc, &cfg); + if (IS_ERR(phy->dpdm_rdev)) + return PTR_ERR(phy->dpdm_rdev); + + return 0; +} + +static int msm_snps_hsphy_notify_connect(struct usb_phy *uphy, + enum usb_device_speed speed) +{ + struct msm_snps_hsphy *phy = container_of(uphy, + struct msm_snps_hsphy, phy); + + phy->cable_connected = true; + + dev_dbg(phy->phy.dev, "PHY: connect notification cable_connected=%d\n", + phy->cable_connected); + return 0; +} + +static int msm_snps_hsphy_notify_disconnect(struct usb_phy *uphy, + enum usb_device_speed speed) +{ + struct msm_snps_hsphy *phy = container_of(uphy, + struct msm_snps_hsphy, phy); + + phy->cable_connected = false; + + dev_dbg(phy->phy.dev, "PHY: connect notification cable_connected=%d\n", + phy->cable_connected); + return 0; +} + +static int msm_snps_hsphy_probe(struct platform_device *pdev) +{ + struct msm_snps_hsphy *phy; + struct device *dev = &pdev->dev; + struct resource *res; + int ret = 0; + int len = 0; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + dev_dbg(dev, "%s: probe\n", __func__); + phy->phy_reset = devm_reset_control_get(dev, "phy_reset"); + if (IS_ERR(phy->phy_reset)) { + dev_err(dev, "%s failed to get phy_reset %d\n", + __func__, ret); + return PTR_ERR(phy->phy_reset); + } + + phy->phy_por_reset = devm_reset_control_get(dev, "phy_por_reset"); + if (IS_ERR(phy->phy_por_reset)) { + dev_err(dev, "%s failed to get phy_por_reset %d\n", + __func__, ret); + return PTR_ERR(phy->phy_por_reset); + } + + phy->ref_clk = devm_clk_get(dev, "ref_clk"); + if (IS_ERR(phy->ref_clk)) { + dev_err(dev, "%s failed to get ref_clk %d\n", + __func__, ret); + return PTR_ERR(phy->ref_clk); + } + + phy->phy_csr_clk = devm_clk_get(dev, "phy_csr_clk"); + if (IS_ERR(phy->phy_csr_clk)) { + dev_err(dev, "%s failed to get phy_csr_clk %d\n", + __func__, ret); + return PTR_ERR(phy->phy_csr_clk); + } + + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "phy_csr"); + + phy->phy_csr_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->phy_csr_regs)) { + dev_err(dev, "%s: PHY CSR ioremap failed!\n", __func__); + return PTR_ERR(phy->phy_csr_regs); + } + + of_get_property(dev->of_node, "qcom,snps-hs-phy-init-seq", &len); + if (len) { + phy->phy_init_seq = devm_kzalloc(dev, len, GFP_KERNEL); + if (!phy->phy_init_seq) + return -ENOMEM; + of_property_read_u32_array(dev->of_node, + "qcom,snps-hs-phy-init-seq", phy->phy_init_seq, + (len/sizeof(*phy->phy_init_seq))); + } + + phy->regulator[MSM_HSPHY_DVDD_REGULATOR].supply = "vdd"; + phy->regulator[MSM_HSPHY_1P8_REGULATOR].supply = "vdda18"; + phy->regulator[MSM_HSPHY_3P3_REGULATOR].supply = "vdda33"; + + ret = devm_regulator_bulk_get(dev, USB_HSPHY_MAX_REGULATORS, + phy->regulator); + if (ret) + return ret; + + ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", + (u32 *) phy->voltage_levels[MSM_HSPHY_DVDD_REGULATOR], + ARRAY_SIZE(phy->voltage_levels[MSM_HSPHY_DVDD_REGULATOR])); + if (ret) { + dev_err(dev, "%s: error reading qcom,vdd-voltage-level property\n", + __func__); + return ret; + } + + phy->voltage_levels[MSM_HSPHY_1P8_REGULATOR][0] = 0; + phy->voltage_levels[MSM_HSPHY_1P8_REGULATOR][1] = USB_HSPHY_1P8_VOL_MIN; + phy->voltage_levels[MSM_HSPHY_1P8_REGULATOR][2] = USB_HSPHY_1P8_VOL_MAX; + + phy->voltage_levels[MSM_HSPHY_3P3_REGULATOR][0] = 0; + phy->voltage_levels[MSM_HSPHY_3P3_REGULATOR][1] = USB_HSPHY_3P3_VOL_MIN; + phy->voltage_levels[MSM_HSPHY_3P3_REGULATOR][2] = USB_HSPHY_3P3_VOL_MAX; + + platform_set_drvdata(pdev, phy); + + phy->phy.dev = dev; + phy->phy.init = msm_snps_hsphy_init; + phy->phy.set_suspend = msm_snps_hsphy_set_suspend; + phy->phy.notify_connect = msm_snps_hsphy_notify_connect; + phy->phy.notify_disconnect = msm_snps_hsphy_notify_disconnect; + + mutex_init(&phy->phy_lock); + ret = msm_snps_dpdm_regulator_register(phy); + if (ret) + return ret; + + ret = usb_add_phy_dev(&phy->phy); + + return ret; +} + +static int msm_snps_hsphy_remove(struct platform_device *pdev) +{ + struct msm_snps_hsphy *phy = platform_get_drvdata(pdev); + + usb_remove_phy(&phy->phy); + + msm_snps_hsphy_disable_clocks(phy); + msm_snps_hsphy_disable_regulators(phy); + + return 0; +} + +static const struct of_device_id msm_usb_hsphy_match[] = { + { + .compatible = "qcom,usb-snps-hsphy", + }, + { }, +}; + +static struct platform_driver msm_snps_hsphy_driver = { + .probe = msm_snps_hsphy_probe, + .remove = msm_snps_hsphy_remove, + .driver = { + .name = "msm-usb-snps-hsphy", + .of_match_table = msm_usb_hsphy_match, + }, +}; + +module_platform_driver(msm_snps_hsphy_driver); + +MODULE_DESCRIPTION("MSM USB SNPS HS PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 25a281f876b56ec3d74eb64fc83bbfbe4147ce99..33a6d624c8438b50e29c23bbf86af1cdf20d91d8 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_BROKEN_FUA) sdev->broken_fua = 1; + /* UAS also needs to support FL_ALWAYS_SYNC */ + if (devinfo->flags & US_FL_ALWAYS_SYNC) { + sdev->skip_ms_page_3f = 1; + sdev->skip_ms_page_8 = 1; + sdev->wce_default_on = 1; + } scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 52b3e6da0745ce34dd613bef2fd086b70fb18929..d100290628bd26beb11991ebbcd8e6c84d6d719a 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2340,6 +2340,15 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, "Micro Mini 1GB", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), +/* "G-DRIVE" external HDD hangs on write without these. + * Patch submitted by Alexander Kappner + */ +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999, + "SimpleTech", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_ALWAYS_SYNC), + /* * Nick Bowler * SCSI stack spams (otherwise harmless) error messages. diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 719ec68ae3099e78b1c11554884f87e43b5c1cde..f15aa47c54a9dccc6238faed23d6de6b8f43cfb5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -183,3 +183,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, "External HDD", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES), + +/* "G-DRIVE" external HDD hangs on write without these. + * Patch submitted by Alexander Kappner + */ +UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999, + "SimpleTech", + "External HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_ALWAYS_SYNC), diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index b57891c1fd31a7e70b14ada774f824d30b36196e..7afbea5122077b3dd0cbe217ad7c839837f499b4 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o typec_ucsi-y := ucsi.o -typec_ucsi-$(CONFIG_FTRACE) += trace.o +typec_ucsi-$(CONFIG_TRACING) += trace.o obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 84df63e3130d28c250dfe214cff14231e0a375c8..4a22a9f06d964e83f2459ce7989b64404b3cdefd 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -24,6 +24,9 @@ #include #include +/* Hardening for Spectre-v1 */ +#include + #include "usbip_common.h" #include "vhci.h" @@ -219,16 +222,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) return 0; } -static int valid_port(__u32 pdev_nr, __u32 rhport) +static int valid_port(__u32 *pdev_nr, __u32 *rhport) { - if (pdev_nr >= vhci_num_controllers) { - pr_err("pdev %u\n", pdev_nr); + if (*pdev_nr >= vhci_num_controllers) { + pr_err("pdev %u\n", *pdev_nr); return 0; } - if (rhport >= VHCI_HC_PORTS) { - pr_err("rhport %u\n", rhport); + *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers); + + if (*rhport >= VHCI_HC_PORTS) { + pr_err("rhport %u\n", *rhport); return 0; } + *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS); + return 1; } @@ -246,7 +253,7 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr, pdev_nr = port_to_pdev_nr(port); rhport = port_to_rhport(port); - if (!valid_port(pdev_nr, rhport)) + if (!valid_port(&pdev_nr, &rhport)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); @@ -272,7 +279,8 @@ static ssize_t store_detach(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach); -static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed) +static int valid_args(__u32 *pdev_nr, __u32 *rhport, + enum usb_device_speed speed) { if (!valid_port(pdev_nr, rhport)) { return 0; @@ -336,7 +344,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, sockfd, devid, speed); /* check received parameters */ - if (!valid_args(pdev_nr, rhport, speed)) + if (!valid_args(&pdev_nr, &rhport, speed)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 8e3ca44007664ac1bc56e91f00e15f0ee9ffffdb..244e5256c526f876b3afd2c41cdd333e7183dd4a 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -993,6 +993,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, { int ret = 0; + mutex_lock(&dev->mutex); vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: @@ -1024,6 +1025,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, } vhost_dev_unlock_vqs(dev); + mutex_unlock(&dev->mutex); + return ret; } ssize_t vhost_chr_write_iter(struct vhost_dev *dev, @@ -2379,6 +2382,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); if (!node) return NULL; + + /* Make sure all padding within the structure is initialized. */ + memset(&node->msg, 0, sizeof node->msg); node->vq = vq; node->msg.type = type; return node; diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c index 734a9158946b1f805a3738d9e9c31f25cf8b3314..e55304d5cf0716a231c5674e856a9ae205b78c65 100644 --- a/drivers/video/backlight/as3711_bl.c +++ b/drivers/video/backlight/as3711_bl.c @@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev, static int as3711_backlight_parse_dt(struct device *dev) { struct as3711_bl_pdata *pdata = dev_get_platdata(dev); - struct device_node *bl = - of_find_node_by_name(dev->parent->of_node, "backlight"), *fb; + struct device_node *bl, *fb; int ret; + bl = of_get_child_by_name(dev->parent->of_node, "backlight"); if (!bl) { dev_dbg(dev, "backlight node not found\n"); return -ENODEV; @@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev) if (pdata->su1_max_uA <= 0) ret = -EINVAL; if (ret < 0) - return ret; + goto err_put_bl; } fb = of_parse_phandle(bl, "su2-dev", 0); @@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev) if (pdata->su2_max_uA <= 0) ret = -EINVAL; if (ret < 0) - return ret; + goto err_put_bl; if (of_find_property(bl, "su2-feedback-voltage", NULL)) { pdata->su2_feedback = AS3711_SU2_VOLTAGE; @@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev) pdata->su2_feedback = AS3711_SU2_CURR_AUTO; count++; } - if (count != 1) - return -EINVAL; + if (count != 1) { + ret = -EINVAL; + goto err_put_bl; + } count = 0; if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) { @@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev) pdata->su2_fbprot = AS3711_SU2_GPIO4; count++; } - if (count != 1) - return -EINVAL; + if (count != 1) { + ret = -EINVAL; + goto err_put_bl; + } count = 0; if (of_find_property(bl, "su2-auto-curr1", NULL)) { @@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev) * At least one su2-auto-curr* must be specified iff * AS3711_SU2_CURR_AUTO is used */ - if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) - return -EINVAL; + if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) { + ret = -EINVAL; + goto err_put_bl; + } } + of_node_put(bl); + return 0; + +err_put_bl: + of_node_put(bl); + + return ret; } static int as3711_backlight_probe(struct platform_device *pdev) diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index 7b738d60ecc22e27560e42c9cef0669628e4aa28..f3aa6088f1d97805f23b9780a1db893cf1a46b00 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev) if (!pdata) return; - np = of_find_node_by_name(nproot, "backlight"); + np = of_get_child_by_name(nproot, "backlight"); if (!np) { dev_err(&pdev->dev, "failed to find backlight node\n"); return; @@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev) if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val)) pdata->dual_string = val; + of_node_put(np); + pdev->dev.platform_data = pdata; } diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c index 46c77a83b6f170eee319eab3432f56a2aa46e2a4..b5123b7c7c32825da82e5d98af3cb95abb330924 100644 --- a/drivers/video/backlight/qcom-spmi-wled.c +++ b/drivers/video/backlight/qcom-spmi-wled.c @@ -1527,6 +1527,7 @@ int wled_flash_led_prepare(struct led_trigger *trig, int options, led_cdev->name); return rc; } + break; case QUERY_MAX_AVAIL_CURRENT: rc = wled_get_max_avail_current(led_cdev, max_current); if (rc < 0) { diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c index fd524ad860a57bd4ac11166eeef278ef306358a4..f45d0c9467dbf1d909d59db5ead8ada41a225b60 100644 --- a/drivers/video/backlight/tps65217_bl.c +++ b/drivers/video/backlight/tps65217_bl.c @@ -184,11 +184,11 @@ static struct tps65217_bl_pdata * tps65217_bl_parse_dt(struct platform_device *pdev) { struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); - struct device_node *node = of_node_get(tps->dev->of_node); + struct device_node *node; struct tps65217_bl_pdata *pdata, *err; u32 val; - node = of_find_node_by_name(node, "backlight"); + node = of_get_child_by_name(tps->dev->of_node, "backlight"); if (!node) return ERR_PTR(-ENODEV); diff --git a/drivers/video/fbdev/msm/dsi_host_v2.c b/drivers/video/fbdev/msm/dsi_host_v2.c index 58927a6d26c69859a6dd6db53bd014c2cc5744ef..61ce052a365e3250645355387556ac23b379f7ec 100644 --- a/drivers/video/fbdev/msm/dsi_host_v2.c +++ b/drivers/video/fbdev/msm/dsi_host_v2.c @@ -1114,7 +1114,7 @@ static int msm_dsi_on(struct mdss_panel_data *pdata) if (!pdata->panel_info.dynamic_switch_pending) { for (i = 0; !ret && (i < DSI_MAX_PM); i++) { - ret = msm_mdss_enable_vreg( + ret = msm_dss_enable_vreg( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, 1); if (ret) { @@ -1215,7 +1215,7 @@ static int msm_dsi_on(struct mdss_panel_data *pdata) error_vreg: if (ret) { for (; i >= 0; i--) - msm_mdss_enable_vreg( + msm_dss_enable_vreg( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, 0); } @@ -1250,7 +1250,7 @@ static int msm_dsi_off(struct mdss_panel_data *pdata) if (!pdata->panel_info.dynamic_switch_pending) { for (i = DSI_MAX_PM - 1; i >= 0; i--) { - ret = msm_mdss_enable_vreg( + ret = msm_dss_enable_vreg( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, 0); if (ret) @@ -1287,7 +1287,7 @@ static int msm_dsi_cont_on(struct mdss_panel_data *pdata) pinfo = &pdata->panel_info; mutex_lock(&ctrl_pdata->mutex); for (i = 0; !ret && (i < DSI_MAX_PM); i++) { - ret = msm_mdss_enable_vreg( + ret = msm_dss_enable_vreg( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, 1); if (ret) { @@ -1314,7 +1314,7 @@ static int msm_dsi_cont_on(struct mdss_panel_data *pdata) error_vreg: if (ret) { for (; i >= 0; i--) - msm_mdss_enable_vreg( + msm_dss_enable_vreg( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, 0); } diff --git a/drivers/video/fbdev/msm/dsi_io_v2.c b/drivers/video/fbdev/msm/dsi_io_v2.c index 71c1d1d7c2a7dfab453acbeec3c4463a55e96aaa..a5e36612d5707e12168908c87523916193821ad8 100644 --- a/drivers/video/fbdev/msm/dsi_io_v2.c +++ b/drivers/video/fbdev/msm/dsi_io_v2.c @@ -50,7 +50,7 @@ void msm_dsi_ahb_ctrl(int enable) } } -int msm_dsi_io_init(struct platform_device *pdev, struct mdss_module_power *mp) +int msm_dsi_io_init(struct platform_device *pdev, struct dss_module_power *mp) { int rc; @@ -67,7 +67,7 @@ int msm_dsi_io_init(struct platform_device *pdev, struct mdss_module_power *mp) return rc; } - rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, + rc = msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 1); if (rc) { pr_err("fail to initialize DSI regulator\n"); @@ -78,11 +78,11 @@ int msm_dsi_io_init(struct platform_device *pdev, struct mdss_module_power *mp) } void msm_dsi_io_deinit(struct platform_device *pdev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { if (dsi_io_private) { msm_dsi_clk_deinit(); - msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, + msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0); kfree(dsi_io_private); dsi_io_private = NULL; diff --git a/drivers/video/fbdev/msm/dsi_io_v2.h b/drivers/video/fbdev/msm/dsi_io_v2.h index d0227ecc7b810a06c147daf6d578e441d05c7389..dd9adf90abf344afa28aace42ecee34e51c589b3 100644 --- a/drivers/video/fbdev/msm/dsi_io_v2.h +++ b/drivers/video/fbdev/msm/dsi_io_v2.h @@ -18,10 +18,10 @@ void msm_dsi_ahb_ctrl(int enable); int msm_dsi_io_init(struct platform_device *dev, - struct mdss_module_power *mp); + struct dss_module_power *mp); void msm_dsi_io_deinit(struct platform_device *dev, - struct mdss_module_power *mp); + struct dss_module_power *mp); int msm_dsi_clk_init(struct platform_device *dev); diff --git a/drivers/video/fbdev/msm/dsi_v2.c b/drivers/video/fbdev/msm/dsi_v2.c index bfd29416cd1b06bd1877defd77f5e0adae1813d8..60c57e45212da91c0dae4e829384a719d021efcd 100644 --- a/drivers/video/fbdev/msm/dsi_v2.c +++ b/drivers/video/fbdev/msm/dsi_v2.c @@ -236,7 +236,7 @@ static int dsi_parse_gpio(struct platform_device *pdev, } static void mdss_dsi_put_dt_vreg_data(struct device *dev, - struct mdss_module_power *module_power) + struct dss_module_power *module_power) { if (!module_power) { pr_err("%s: invalid input\n", __func__); @@ -251,7 +251,7 @@ static void mdss_dsi_put_dt_vreg_data(struct device *dev, } static int mdss_dsi_get_dt_vreg_data(struct device *dev, - struct mdss_module_power *mp, enum dsi_pm_type module) + struct dss_module_power *mp, enum dsi_pm_type module) { int i = 0, rc = 0; u32 tmp = 0; @@ -286,7 +286,7 @@ static int mdss_dsi_get_dt_vreg_data(struct device *dev, pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg); } - mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL); if (!mp->vreg_config) { rc = -ENOMEM; diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h index 22844df4882e450878b65cd0e34dab3f7d9d3f93..d0b5ddfa2851824f838fce8470e4f73f7bef9390 100644 --- a/drivers/video/fbdev/msm/mdss.h +++ b/drivers/video/fbdev/msm/mdss.h @@ -213,7 +213,7 @@ struct reg_bus_client { struct mdss_smmu_client { struct device *dev; struct dma_iommu_mapping *mmu_mapping; - struct mdss_module_power mp; + struct dss_module_power mp; struct reg_bus_client *reg_bus_clt; bool domain_attached; bool handoff_pending; @@ -295,9 +295,9 @@ struct mdss_data_type { unsigned long mdp_clk_rate; struct platform_device *pdev; - struct mdss_io_data mdss_io; - struct mdss_io_data vbif_io; - struct mdss_io_data vbif_nrt_io; + struct dss_io_data mdss_io; + struct dss_io_data vbif_io; + struct dss_io_data vbif_nrt_io; char __iomem *mdp_base; struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN]; @@ -604,14 +604,14 @@ static inline bool mdss_has_quirk(struct mdss_data_type *mdata, } #define MDSS_VBIF_WRITE(mdata, offset, value, nrt_vbif) \ - (nrt_vbif ? mdss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\ - mdss_reg_w(&mdata->vbif_io, offset, value, 0)) + (nrt_vbif ? dss_reg_w(&mdata->vbif_nrt_io, offset, value, 0) :\ + dss_reg_w(&mdata->vbif_io, offset, value, 0)) #define MDSS_VBIF_READ(mdata, offset, nrt_vbif) \ - (nrt_vbif ? mdss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\ - mdss_reg_r(&mdata->vbif_io, offset, 0)) + (nrt_vbif ? dss_reg_r(&mdata->vbif_nrt_io, offset, 0) :\ + dss_reg_r(&mdata->vbif_io, offset, 0)) #define MDSS_REG_WRITE(mdata, offset, value) \ - mdss_reg_w(&mdata->mdss_io, offset, value, 0) + dss_reg_w(&mdata->mdss_io, offset, value, 0) #define MDSS_REG_READ(mdata, offset) \ - mdss_reg_r(&mdata->mdss_io, offset, 0) + dss_reg_r(&mdata->mdss_io, offset, 0) #endif /* MDSS_H */ diff --git a/drivers/video/fbdev/msm/mdss_debug.h b/drivers/video/fbdev/msm/mdss_debug.h index 8858209ad35ee321f81ebe6de12c6adbe8fecb9b..c920418eb977b703f3e8c417a6b5b495ac0536bd 100644 --- a/drivers/video/fbdev/msm/mdss_debug.h +++ b/drivers/video/fbdev/msm/mdss_debug.h @@ -227,7 +227,7 @@ void mdss_mdp_debug_mid(u32 mid); int mdss_dump_misr_data(char **buf, u32 size); static inline int mdss_debug_register_io(const char *name, - struct mdss_io_data *io_data, struct mdss_debug_base **dbg_blk) + struct dss_io_data *io_data, struct mdss_debug_base **dbg_blk) { return mdss_debug_register_base(name, io_data->base, io_data->len, dbg_blk); diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 47aa2c433fe9fcb65653baae4281f05a1bd4822f..5bb6575bd05351fccdf8a0050f6be922fb88dbbe 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -253,14 +253,14 @@ static int mdss_dsi_regulator_init(struct platform_device *pdev, } for (i = DSI_CORE_PM; !rc && (i < DSI_MAX_PM); i++) { - rc = msm_mdss_config_vreg(&pdev->dev, + rc = msm_dss_config_vreg(&pdev->dev, sdata->power_data[i].vreg_config, sdata->power_data[i].num_vreg, 1); if (rc) { pr_err("%s: failed to init vregs for %s\n", __func__, __mdss_dsi_pm_name(i)); for (j = i-1; j >= DSI_CORE_PM; j--) { - msm_mdss_config_vreg(&pdev->dev, + msm_dss_config_vreg(&pdev->dev, sdata->power_data[j].vreg_config, sdata->power_data[j].num_vreg, 0); } @@ -293,7 +293,7 @@ static int mdss_dsi_panel_power_off(struct mdss_panel_data *pdata) if (mdss_dsi_pinctrl_set_state(ctrl_pdata, false)) pr_debug("reset disable: pinctrl not enabled\n"); - ret = msm_mdss_enable_vreg( + ret = msm_dss_enable_vreg( ctrl_pdata->panel_power_data.vreg_config, ctrl_pdata->panel_power_data.num_vreg, 0); if (ret) @@ -317,7 +317,7 @@ static int mdss_dsi_panel_power_on(struct mdss_panel_data *pdata) ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); - ret = msm_mdss_enable_vreg( + ret = msm_dss_enable_vreg( ctrl_pdata->panel_power_data.vreg_config, ctrl_pdata->panel_power_data.num_vreg, 1); if (ret) { @@ -378,11 +378,11 @@ static int mdss_dsi_panel_power_ulp(struct mdss_panel_data *pdata, if (i == DSI_CORE_PM) continue; if (i == DSI_PANEL_PM) - ret = msm_mdss_config_vreg_opt_mode( + ret = msm_dss_config_vreg_opt_mode( ctrl_pdata->panel_power_data.vreg_config, ctrl_pdata->panel_power_data.num_vreg, mode); else - ret = msm_mdss_config_vreg_opt_mode( + ret = msm_dss_config_vreg_opt_mode( sdata->power_data[i].vreg_config, sdata->power_data[i].num_vreg, mode); if (ret) { @@ -395,7 +395,7 @@ static int mdss_dsi_panel_power_ulp(struct mdss_panel_data *pdata, if (ret) { mode = enable ? DSS_REG_MODE_ENABLE : DSS_REG_MODE_ULP; for (; i >= 0; i--) - msm_mdss_config_vreg_opt_mode( + msm_dss_config_vreg_opt_mode( ctrl_pdata->power_data[i].vreg_config, ctrl_pdata->power_data[i].num_vreg, mode); } @@ -478,7 +478,7 @@ int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, } static void mdss_dsi_put_dt_vreg_data(struct device *dev, - struct mdss_module_power *module_power) + struct dss_module_power *module_power) { if (!module_power) { pr_err("%s: invalid input\n", __func__); @@ -493,7 +493,7 @@ static void mdss_dsi_put_dt_vreg_data(struct device *dev, } static int mdss_dsi_get_dt_vreg_data(struct device *dev, - struct device_node *of_node, struct mdss_module_power *mp, + struct device_node *of_node, struct dss_module_power *mp, enum dsi_pm_type module) { int i = 0, rc = 0; @@ -535,7 +535,7 @@ static int mdss_dsi_get_dt_vreg_data(struct device *dev, pr_debug("%s: vreg found. count=%d\n", __func__, mp->num_vreg); } - mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * mp->num_vreg, GFP_KERNEL); if (!mp->vreg_config) { rc = -ENOMEM; @@ -3451,7 +3451,7 @@ static void mdss_dsi_res_deinit(struct platform_device *pdev) goto res_release; for (i = (DSI_MAX_PM - 1); i >= DSI_CORE_PM; i--) { - if (msm_mdss_config_vreg(&pdev->dev, + if (msm_dss_config_vreg(&pdev->dev, sdata->power_data[i].vreg_config, sdata->power_data[i].num_vreg, 1) < 0) pr_err("%s: failed to de-init vregs for %s\n", @@ -3817,7 +3817,7 @@ static int mdss_dsi_ctrl_remove(struct platform_device *pdev) mdss_dsi_pm_qos_remove_request(ctrl_pdata->shared_data); - if (msm_mdss_config_vreg(&pdev->dev, + if (msm_dss_config_vreg(&pdev->dev, ctrl_pdata->panel_power_data.vreg_config, ctrl_pdata->panel_power_data.num_vreg, 1) < 0) pr_err("%s: failed to de-init vregs for %s\n", @@ -3825,9 +3825,9 @@ static int mdss_dsi_ctrl_remove(struct platform_device *pdev) mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->panel_power_data); mfd = platform_get_drvdata(pdev); - msm_mdss_iounmap(&ctrl_pdata->mmss_misc_io); - msm_mdss_iounmap(&ctrl_pdata->phy_io); - msm_mdss_iounmap(&ctrl_pdata->ctrl_io); + msm_dss_iounmap(&ctrl_pdata->mmss_misc_io); + msm_dss_iounmap(&ctrl_pdata->phy_io); + msm_dss_iounmap(&ctrl_pdata->ctrl_io); mdss_dsi_debugfs_cleanup(ctrl_pdata); if (ctrl_pdata->workq) @@ -3870,7 +3870,7 @@ int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode, return -EPERM; } - rc = msm_mdss_ioremap_byname(pdev, &ctrl->ctrl_io, "dsi_ctrl"); + rc = msm_dss_ioremap_byname(pdev, &ctrl->ctrl_io, "dsi_ctrl"); if (rc) { pr_err("%s:%d unable to remap dsi ctrl resources\n", __func__, __LINE__); @@ -3880,14 +3880,14 @@ int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode, ctrl->ctrl_base = ctrl->ctrl_io.base; ctrl->reg_size = ctrl->ctrl_io.len; - rc = msm_mdss_ioremap_byname(pdev, &ctrl->phy_io, "dsi_phy"); + rc = msm_dss_ioremap_byname(pdev, &ctrl->phy_io, "dsi_phy"); if (rc) { pr_err("%s:%d unable to remap dsi phy resources\n", __func__, __LINE__); return rc; } - rc = msm_mdss_ioremap_byname(pdev, &ctrl->phy_regulator_io, + rc = msm_dss_ioremap_byname(pdev, &ctrl->phy_regulator_io, "dsi_phy_regulator"); if (rc) pr_debug("%s:%d unable to remap dsi phy regulator resources\n", @@ -3901,7 +3901,7 @@ int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode, __func__, ctrl->ctrl_base, ctrl->reg_size, ctrl->phy_io.base, ctrl->phy_io.len); - rc = msm_mdss_ioremap_byname(pdev, &ctrl->mmss_misc_io, + rc = msm_dss_ioremap_byname(pdev, &ctrl->mmss_misc_io, "mmss_misc_phys"); if (rc) { pr_debug("%s:%d mmss_misc IO remap failed\n", @@ -4171,7 +4171,7 @@ int dsi_panel_device_register(struct platform_device *ctrl_pdev, return rc; } - rc = msm_mdss_config_vreg(&ctrl_pdev->dev, + rc = msm_dss_config_vreg(&ctrl_pdev->dev, ctrl_pdata->panel_power_data.vreg_config, ctrl_pdata->panel_power_data.num_vreg, 1); if (rc) { @@ -4253,7 +4253,7 @@ int dsi_panel_device_register(struct platform_device *ctrl_pdev, sdata = ctrl_pdata->shared_data; if (pinfo->ulps_suspend_enabled) { - rc = msm_mdss_enable_vreg( + rc = msm_dss_enable_vreg( sdata->power_data[DSI_PHY_PM].vreg_config, sdata->power_data[DSI_PHY_PM].num_vreg, 1); if (rc) { diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h index 058c27a6113064e55e80850eb6cdab6896eb078a..9eb3bc8b87f8b17fd38df389d086200ea201ba41 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.h +++ b/drivers/video/fbdev/msm/mdss_dsi.h @@ -272,7 +272,7 @@ struct dsi_shared_data { struct clk *pixel1_parent; /* DSI core regulators */ - struct mdss_module_power power_data[DSI_MAX_PM]; + struct dss_module_power power_data[DSI_MAX_PM]; /* Shared mutex for DSI PHY regulator */ struct mutex phy_reg_lock; @@ -408,10 +408,10 @@ struct mdss_dsi_ctrl_pdata { void (*switch_mode)(struct mdss_panel_data *pdata, int mode); struct mdss_panel_data panel_data; unsigned char *ctrl_base; - struct mdss_io_data ctrl_io; - struct mdss_io_data mmss_misc_io; - struct mdss_io_data phy_io; - struct mdss_io_data phy_regulator_io; + struct dss_io_data ctrl_io; + struct dss_io_data mmss_misc_io; + struct dss_io_data phy_io; + struct dss_io_data phy_regulator_io; int reg_size; u32 flags; struct clk *byte_clk; @@ -460,8 +460,8 @@ struct mdss_dsi_ctrl_pdata { u32 pclk_rate_bkp; u32 byte_clk_rate_bkp; bool refresh_clk_rate; /* flag to recalculate clk_rate */ - struct mdss_module_power panel_power_data; - struct mdss_module_power power_data[DSI_MAX_PM]; /* for 8x10 */ + struct dss_module_power panel_power_data; + struct dss_module_power power_data[DSI_MAX_PM]; /* for 8x10 */ u32 dsi_irq_mask; struct mdss_hw *dsi_hw; struct mdss_intf_recovery *recovery; diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index a122e8ccae38be4767ec83c83a7e565e031def61..b5a858d3fdf400578ebd75e158451cdbaffb6c90 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -1329,7 +1329,10 @@ static int mdss_fb_probe(struct platform_device *pdev) if (!lcd_backlight_registered) { backlight_led.brightness = mfd->panel_info->brightness_max; backlight_led.max_brightness = mfd->panel_info->brightness_max; - lcd_backlight_registered = 1; + if (led_classdev_register(&pdev->dev, &backlight_led)) + pr_err("led_classdev_register failed\n"); + else + lcd_backlight_registered = 1; } mdss_fb_init_panel_modes(mfd, pdata); @@ -1426,6 +1429,7 @@ static int mdss_fb_remove(struct platform_device *pdev) if (lcd_backlight_registered) { lcd_backlight_registered = 0; + led_classdev_unregister(&backlight_led); } return 0; @@ -2136,7 +2140,7 @@ void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd) int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size) { - int rc = 0, fd = 0; + int rc = 0; void *vaddr; int domain; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.c b/drivers/video/fbdev/msm/mdss_hdmi_audio.c index 446e8b4921b61df728da1481adaa54f991c67762..0effbcb00a6b5f3441c56a51e8d6acf802e4193d 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_audio.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.c @@ -63,7 +63,7 @@ enum hdmi_audio_sample_rates { }; struct hdmi_audio { - struct mdss_io_data *io; + struct dss_io_data *io; struct msm_hdmi_audio_setup_params params; struct extcon_dev sdev; u32 pclk; @@ -143,7 +143,7 @@ static void hdmi_audio_get_acr_param(u32 pclk, u32 fs, static void hdmi_audio_acr_enable(struct hdmi_audio *audio) { - struct mdss_io_data *io; + struct dss_io_data *io; struct hdmi_audio_acr acr; struct msm_hdmi_audio_setup_params *params; u32 pclk, layout, multiplier = 1, sample_rate; @@ -260,7 +260,7 @@ static void hdmi_audio_acr_setup(struct hdmi_audio *audio, bool on) static void hdmi_audio_infoframe_setup(struct hdmi_audio *audio, bool enabled) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; u32 channels, channel_allocation, level_shift, down_mix, layout; u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0; u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.h b/drivers/video/fbdev/msm/mdss_hdmi_audio.h index 2449123922c7c54b80903751d1aaeb95e6384254..7b33cb8d5954670197a9de7ecd9ccb0dbba10274 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_audio.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.h @@ -62,7 +62,7 @@ struct hdmi_audio_ops { * Defines the data needed to be provided while initializing audio module */ struct hdmi_audio_init_data { - struct mdss_io_data *io; + struct dss_io_data *io; struct hdmi_audio_ops *ops; }; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.c b/drivers/video/fbdev/msm/mdss_hdmi_cec.c index f15272e8a5ea090ea6ee6b5bf21823559ce77c3f..f1be3132b607563aa27aa985c2f06bae4b310a7c 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_cec.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.c @@ -52,7 +52,7 @@ static int hdmi_cec_msg_send(void *data, struct cec_msg *msg) u32 frame_retransmit = RETRANSMIT_MAX_NUM; bool frame_type; unsigned long flags; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)data; if (!cec_ctrl || !cec_ctrl->init_data.io || !msg) { @@ -169,7 +169,7 @@ static void hdmi_cec_msg_recv(struct work_struct *work) int i; u32 data; struct hdmi_cec_ctrl *cec_ctrl = NULL; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct cec_msg msg; struct cec_cbs *cbs; @@ -262,7 +262,7 @@ int hdmi_cec_isr(void *input) int rc = 0; u32 cec_intr, cec_status; unsigned long flags; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input; if (!cec_ctrl || !cec_ctrl->init_data.io) { @@ -368,7 +368,7 @@ static int hdmi_cec_enable(void *input, bool enable) { int ret = 0; u32 hdmi_hw_version, reg_val; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input; struct mdss_panel_info *pinfo; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.h b/drivers/video/fbdev/msm/mdss_hdmi_cec.h index de4bb356c9e1ee8a4ba50c42959e1703fb4e8394..57a76645e806cc216c88f78bcde1160ea1b31342 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_cec.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.h @@ -30,7 +30,7 @@ */ struct hdmi_cec_init_data { struct workqueue_struct *workq; - struct mdss_io_data *io; + struct dss_io_data *io; struct mdss_panel_info *pinfo; struct cec_cbs *cbs; struct cec_ops *ops; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c index 17b1f60e29a692367de6cae1128fe77f0385f6fc..0c76ad4e91bf67d9272527251bc9c9bb8b7d8445 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.c @@ -87,7 +87,7 @@ static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) int hdcp_ddc_status; int failure; int nack0; - struct mdss_io_data *io; + struct dss_io_data *io; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) { DEV_ERR("%s: invalid input\n", __func__); @@ -166,7 +166,7 @@ static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl) static void hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; u32 hdcp_ddc_status, ddc_hw_status; u32 ddc_xfer_done, ddc_xfer_req; u32 ddc_hw_req, ddc_hw_not_idle; @@ -254,8 +254,8 @@ static int hdmi_hdcp_load_keys(void *input) u32 ksv_lsb_addr, ksv_msb_addr; u32 aksv_lsb, aksv_msb; u8 aksv[5]; - struct mdss_io_data *io; - struct mdss_io_data *qfprom_io; + struct dss_io_data *io; + struct dss_io_data *qfprom_io; struct hdmi_hdcp_ctrl *hdcp_ctrl = input; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io || @@ -352,8 +352,8 @@ static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl) u32 link0_an_0, link0_an_1; u32 timeout_count; bool is_match; - struct mdss_io_data *io; - struct mdss_io_data *hdcp_io; + struct dss_io_data *io; + struct dss_io_data *hdcp_io; u8 aksv[5], *bksv = NULL; u8 an[8]; u8 bcaps = 0; @@ -678,7 +678,7 @@ static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl) static int read_write_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl, struct hdmi_tx_ddc_data ddc_data, - struct mdss_io_data *io, int off, char *name, + struct dss_io_data *io, int off, char *name, u32 reg, bool wr) { int rc = 0; @@ -715,7 +715,7 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) int rc = 0; u8 buf[4]; struct hdmi_tx_ddc_data ddc_data; - struct mdss_io_data *io; + struct dss_io_data *io; struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG]; u32 phy_addr; @@ -774,7 +774,7 @@ static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl) goto error; } } else if (hdcp_ctrl->hdmi_tx_ver_4) { - struct mdss_io_data *hdcp_io = hdcp_ctrl->init_data.hdcp_io; + struct dss_io_data *hdcp_io = hdcp_ctrl->init_data.hdcp_io; /* Read V'.HO 4 Byte at offset 0x20 */ if (read_write_v_h(hdcp_ctrl, ddc_data, hdcp_io, 0x20, "V' H0", @@ -843,7 +843,7 @@ static int hdmi_hdcp_authentication_part2(struct hdmi_hdcp_ctrl *hdcp_ctrl) u16 bstatus, max_devs_exceeded = 0, max_cascade_exceeded = 0; u32 link0_status; u32 ksv_bytes; - struct mdss_io_data *io; + struct dss_io_data *io; struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG]; u32 phy_addr; @@ -1247,7 +1247,7 @@ static void hdmi_hdcp_auth_work(struct work_struct *work) struct delayed_work *dw = to_delayed_work(work); struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw, struct hdmi_hdcp_ctrl, hdcp_auth_work); - struct mdss_io_data *io; + struct dss_io_data *io; if (!hdcp_ctrl) { DEV_ERR("%s: invalid input\n", __func__); @@ -1353,7 +1353,7 @@ int hdmi_hdcp_authenticate(void *input) int hdmi_hdcp_reauthenticate(void *input) { struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input; - struct mdss_io_data *io; + struct dss_io_data *io; u32 hdmi_hw_version; u32 ret = 0; @@ -1400,7 +1400,7 @@ int hdmi_hdcp_reauthenticate(void *input) void hdmi_hdcp_off(void *input) { struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input; - struct mdss_io_data *io; + struct dss_io_data *io; int rc = 0; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) { @@ -1453,7 +1453,7 @@ int hdmi_hdcp_isr(void *input) { struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input; int rc = 0; - struct mdss_io_data *io; + struct dss_io_data *io; u32 hdcp_int_val; if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) { diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h index 2276009133f29cc8fb87f00f80bbca0943dee0de..2098943f4dd7c1c526204181f8602a49a9c9f2e3 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp.h @@ -28,9 +28,9 @@ enum hdmi_hdcp_state { }; struct hdmi_hdcp_init_data { - struct mdss_io_data *core_io; - struct mdss_io_data *qfprom_io; - struct mdss_io_data *hdcp_io; + struct dss_io_data *core_io; + struct dss_io_data *qfprom_io; + struct dss_io_data *hdcp_io; struct mutex *mutex; struct kobject *sysfs_kobj; struct workqueue_struct *workq; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.c b/drivers/video/fbdev/msm/mdss_hdmi_panel.c index 3823d3be27f25f12535d7836d506c6a06b368769..9e082b3a0e2475a3d6a6e7f3a99972ed9dc69b03 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_panel.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.c @@ -108,7 +108,7 @@ struct hdmi_video_config { }; struct hdmi_panel { - struct mdss_io_data *io; + struct dss_io_data *io; struct hdmi_util_ds_data *ds_data; struct hdmi_panel_data *data; struct hdmi_video_config vid_cfg; @@ -275,7 +275,7 @@ static int hdmi_panel_setup_video(struct hdmi_panel *panel) u32 total_h, start_h, end_h; u32 total_v, start_v, end_v; u32 div = 0; - struct mdss_io_data *io = panel->io; + struct dss_io_data *io = panel->io; struct msm_hdmi_mode_timing_info *timing; timing = panel->vid_cfg.timing; @@ -342,7 +342,7 @@ static void hdmi_panel_set_avi_infoframe(struct hdmi_panel *panel) u8 avi_iframe[AVI_MAX_DATA_BYTES] = {0}; u8 checksum; u32 sum, reg_val; - struct mdss_io_data *io = panel->io; + struct dss_io_data *io = panel->io; struct hdmi_avi_infoframe_config *avi; struct msm_hdmi_mode_timing_info *timing; @@ -477,7 +477,7 @@ static void hdmi_panel_set_vendor_specific_infoframe(void *input) u32 sum, reg_val; u32 hdmi_vic, hdmi_video_format, s3d_struct = 0; struct hdmi_panel *panel = input; - struct mdss_io_data *io = panel->io; + struct dss_io_data *io = panel->io; /* HDMI Spec 1.4a Table 8-10 */ vs_iframe[0] = 0x81; /* type */ @@ -564,7 +564,7 @@ static void hdmi_panel_set_spd_infoframe(struct hdmi_panel *panel) u32 packet_control = 0; u8 *vendor_name = NULL; u8 *product_description = NULL; - struct mdss_io_data *io = panel->io; + struct dss_io_data *io = panel->io; vendor_name = panel->spd_vendor_name; product_description = panel->spd_product_description; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_panel.h b/drivers/video/fbdev/msm/mdss_hdmi_panel.h index 50e168af0d073abb41f515cee418263ae3c15653..4685b4ee7444498faf524ab9f9cda46dad5544ec 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_panel.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_panel.h @@ -73,7 +73,7 @@ struct hdmi_panel_ops { * @version: hardware version of the hdmi tx */ struct hdmi_panel_init_data { - struct mdss_io_data *io; + struct dss_io_data *io; struct hdmi_util_ds_data *ds_data; struct hdmi_panel_data *panel_data; struct hdmi_tx_ddc_ctrl *ddc; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c index 42e2181c22c95ed8b8eb4636d4efefefe998a21a..bfb5cd9cb06ba253a430d22594f3de5656f142eb 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c @@ -119,23 +119,23 @@ static struct mdss_hw hdmi_tx_hw = { .irq_handler = hdmi_tx_isr, }; -static struct mdss_gpio hpd_gpio_config[] = { +static struct dss_gpio hpd_gpio_config[] = { {0, 1, COMPATIBLE_NAME "-hpd"}, {0, 1, COMPATIBLE_NAME "-mux-en"}, {0, 0, COMPATIBLE_NAME "-mux-sel"}, {0, 1, COMPATIBLE_NAME "-mux-lpm"} }; -static struct mdss_gpio ddc_gpio_config[] = { +static struct dss_gpio ddc_gpio_config[] = { {0, 1, COMPATIBLE_NAME "-ddc-mux-sel"}, {0, 1, COMPATIBLE_NAME "-ddc-clk"}, {0, 1, COMPATIBLE_NAME "-ddc-data"} }; -static struct mdss_gpio core_gpio_config[] = { +static struct dss_gpio core_gpio_config[] = { }; -static struct mdss_gpio cec_gpio_config[] = { +static struct dss_gpio cec_gpio_config[] = { {0, 1, COMPATIBLE_NAME "-cec"} }; @@ -154,7 +154,7 @@ static int hdmi_tx_get_version(struct hdmi_tx_ctrl *hdmi_ctrl) { int rc; int reg_val; - struct mdss_io_data *io; + struct dss_io_data *io; rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true); if (rc) { @@ -394,7 +394,7 @@ static void hdmi_tx_wait_for_audio_engine(struct hdmi_tx_ctrl *hdmi_ctrl) { u64 status = 0; u32 wait_for_vote = 50; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; if (!hdmi_ctrl) { DEV_ERR("%s: invalid input\n", __func__); @@ -485,7 +485,7 @@ EXPORT_SYMBOL(hdmi_get_featuredata_from_sysfs_dev); static int hdmi_tx_config_5v(struct hdmi_tx_ctrl *hdmi_ctrl, bool enable) { - struct mdss_module_power *pd = NULL; + struct dss_module_power *pd = NULL; int ret = 0; if (!hdmi_ctrl) { @@ -646,7 +646,7 @@ static ssize_t hdmi_tx_sysfs_wta_audio_cb(struct device *dev, static int hdmi_tx_update_pixel_clk(struct hdmi_tx_ctrl *hdmi_ctrl) { - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; struct mdss_panel_info *pinfo; int rc = 0; @@ -677,7 +677,7 @@ static int hdmi_tx_update_pixel_clk(struct hdmi_tx_ctrl *hdmi_ctrl) DEV_DBG("%s: rate %ld\n", __func__, power_data->clk_config->rate); - msm_mdss_clk_set_rate(power_data->clk_config, power_data->num_clk); + msm_dss_clk_set_rate(power_data->clk_config, power_data->num_clk); end: return rc; } @@ -738,7 +738,7 @@ static ssize_t hdmi_tx_sysfs_wta_sim_mode(struct device *dev, { int sim_mode, rc; struct hdmi_tx_ctrl *hdmi_ctrl = NULL; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev); @@ -1224,7 +1224,7 @@ static ssize_t hdmi_tx_sysfs_wta_5v(struct device *dev, { int read, ret; struct hdmi_tx_ctrl *hdmi_ctrl = NULL; - struct mdss_module_power *pd = NULL; + struct dss_module_power *pd = NULL; hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev); if (!hdmi_ctrl) { @@ -1335,7 +1335,7 @@ static void hdmi_tx_sysfs_remove(struct hdmi_tx_ctrl *hdmi_ctrl) static int hdmi_tx_config_avmute(struct hdmi_tx_ctrl *hdmi_ctrl, bool set) { - struct mdss_io_data *io; + struct dss_io_data *io; u32 av_mute_status; bool av_pkt_en = false; @@ -1376,7 +1376,7 @@ static int hdmi_tx_config_avmute(struct hdmi_tx_ctrl *hdmi_ctrl, bool set) static bool hdmi_tx_is_encryption_set(struct hdmi_tx_ctrl *hdmi_ctrl) { - struct mdss_io_data *io; + struct dss_io_data *io; bool enc_en = true; u32 reg_val; @@ -1962,7 +1962,7 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl, static inline u32 hdmi_tx_is_controller_on(struct hdmi_tx_ctrl *hdmi_ctrl) { - struct mdss_io_data *io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO]; + struct dss_io_data *io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO]; return DSS_REG_R_ND(io, HDMI_CTRL) & BIT(0); } /* hdmi_tx_is_controller_on */ @@ -2098,7 +2098,7 @@ static void hdmi_tx_update_hdcp_info(struct hdmi_tx_ctrl *hdmi_ctrl) static void hdmi_tx_hpd_int_work(struct work_struct *work) { struct hdmi_tx_ctrl *hdmi_ctrl = NULL; - struct mdss_io_data *io; + struct dss_io_data *io; int rc = -EINVAL; int retry = MAX_EDID_READ_RETRY; @@ -2151,7 +2151,7 @@ static void hdmi_tx_hpd_int_work(struct work_struct *work) static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl) { u32 hdmi_disabled, hdcp_disabled, reg_val; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; int ret = 0; if (!hdmi_ctrl) { @@ -2210,7 +2210,7 @@ static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl) static void hdmi_tx_set_mode(struct hdmi_tx_ctrl *hdmi_ctrl, u32 power_on) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; /* Defaults: Disable block, HDMI mode */ u32 reg_val = BIT(1); @@ -2262,7 +2262,7 @@ static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl, { struct pinctrl_state *pin_state = NULL; int rc = -EFAULT; - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; u64 cur_pin_states; if (!hdmi_ctrl) { @@ -2359,7 +2359,7 @@ static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl, enum hdmi_tx_power_module_type module, int config) { int rc = 0; - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; char name[MAX_CLIENT_NAME_LEN]; if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) { @@ -2376,7 +2376,7 @@ static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl, } if (config) { - rc = msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev, + rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev, power_data->vreg_config, power_data->num_vreg, 1); if (rc) { DEV_ERR("%s: Failed to config %s vreg. Err=%d\n", @@ -2389,13 +2389,13 @@ static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl, mdss_reg_bus_vote_client_create(name); if (IS_ERR(hdmi_ctrl->pdata.reg_bus_clt[module])) { pr_err("reg bus client create failed\n"); - msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev, + msm_dss_config_vreg(&hdmi_ctrl->pdev->dev, power_data->vreg_config, power_data->num_vreg, 0); rc = PTR_ERR(hdmi_ctrl->pdata.reg_bus_clt[module]); goto exit; } - rc = msm_mdss_get_clk(&hdmi_ctrl->pdev->dev, + rc = msm_dss_get_clk(&hdmi_ctrl->pdev->dev, power_data->clk_config, power_data->num_clk); if (rc) { DEV_ERR("%s: Failed to get %s clk. Err=%d\n", @@ -2404,16 +2404,16 @@ static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl, mdss_reg_bus_vote_client_destroy( hdmi_ctrl->pdata.reg_bus_clt[module]); hdmi_ctrl->pdata.reg_bus_clt[module] = NULL; - msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev, + msm_dss_config_vreg(&hdmi_ctrl->pdev->dev, power_data->vreg_config, power_data->num_vreg, 0); } } else { - msm_mdss_put_clk(power_data->clk_config, power_data->num_clk); + msm_dss_put_clk(power_data->clk_config, power_data->num_clk); mdss_reg_bus_vote_client_destroy( hdmi_ctrl->pdata.reg_bus_clt[module]); hdmi_ctrl->pdata.reg_bus_clt[module] = NULL; - rc = msm_mdss_config_vreg(&hdmi_ctrl->pdev->dev, + rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev, power_data->vreg_config, power_data->num_vreg, 0); if (rc) DEV_ERR("%s: Fail to deconfig %s vreg. Err=%d\n", @@ -2429,7 +2429,7 @@ static int hdmi_tx_check_clk_state(struct hdmi_tx_ctrl *hdmi_ctrl, { int i; int rc = 0; - struct mdss_module_power *pd = NULL; + struct dss_module_power *pd = NULL; if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) { DEV_ERR("%s: Error: invalid input\n", __func__); @@ -2475,7 +2475,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, enum hdmi_tx_power_module_type module, int enable) { int rc = 0; - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) { DEV_ERR("%s: Error: invalid input\n", __func__); @@ -2497,7 +2497,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, } if (enable && !hdmi_ctrl->power_data_enable[module]) { - rc = msm_mdss_enable_vreg(power_data->vreg_config, + rc = msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 1); if (rc) { DEV_ERR("%s: Failed to enable %s vreg. Error=%d\n", @@ -2512,7 +2512,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, goto error; } - rc = msm_mdss_enable_gpio(power_data->gpio_config, + rc = msm_dss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 1); if (rc) { DEV_ERR("%s: Failed to enable %s gpio. Error=%d\n", @@ -2522,7 +2522,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module], VOTE_INDEX_LOW); - rc = msm_mdss_clk_set_rate(power_data->clk_config, + rc = msm_dss_clk_set_rate(power_data->clk_config, power_data->num_clk); if (rc) { DEV_ERR("%s: failed to set clks rate for %s. err=%d\n", @@ -2530,7 +2530,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, goto disable_gpio; } - rc = msm_mdss_enable_clk(power_data->clk_config, + rc = msm_dss_enable_clk(power_data->clk_config, power_data->num_clk, 1); if (rc) { DEV_ERR("%s: Failed to enable clks for %s. Error=%d\n", @@ -2541,14 +2541,14 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, } else if (!enable && hdmi_ctrl->power_data_enable[module] && (!hdmi_tx_is_cec_wakeup_en(hdmi_ctrl) || ((module != HDMI_TX_HPD_PM) && (module != HDMI_TX_CEC_PM)))) { - msm_mdss_enable_clk(power_data->clk_config, + msm_dss_enable_clk(power_data->clk_config, power_data->num_clk, 0); mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module], VOTE_INDEX_DISABLE); - msm_mdss_enable_gpio(power_data->gpio_config, + msm_dss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0); hdmi_tx_pinctrl_set_state(hdmi_ctrl, module, 0); - msm_mdss_enable_vreg(power_data->vreg_config, + msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0); hdmi_ctrl->power_data_enable[module] = false; } @@ -2558,9 +2558,9 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl, disable_gpio: mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module], VOTE_INDEX_DISABLE); - msm_mdss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0); + msm_dss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0); disable_vreg: - msm_mdss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0); + msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0); error: return rc; } /* hdmi_tx_enable_power */ @@ -2609,7 +2609,7 @@ static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl) unsigned int phy_reset_polarity = 0x0; unsigned int pll_reset_polarity = 0x0; unsigned int val; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; if (!hdmi_ctrl) { DEV_ERR("%s: invalid input\n", __func__); @@ -2867,7 +2867,7 @@ static inline bool hdmi_tx_hw_is_cable_connected(struct hdmi_tx_ctrl *hdmi_ctrl) static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl, bool polarity) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; bool cable_sense; if (!hdmi_ctrl) { @@ -2915,7 +2915,7 @@ static inline void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl) static int hdmi_tx_power_off(struct hdmi_tx_ctrl *hdmi_ctrl) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; void *pdata = NULL; if (!hdmi_ctrl) { @@ -3043,7 +3043,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl) static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl) { int rc = 0; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; unsigned long flags; if (!hdmi_ctrl) { @@ -3093,7 +3093,7 @@ static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl) { u32 reg_val; int rc = 0; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; if (!hdmi_ctrl) { DEV_ERR("%s: invalid input\n", __func__); @@ -3116,7 +3116,7 @@ static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl) return rc; } - mdss_reg_dump(io->base, io->len, "HDMI-INIT: ", REG_DUMP); + dss_reg_dump(io->base, io->len, "HDMI-INIT: ", REG_DUMP); if (!hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) { hdmi_tx_set_mode(hdmi_ctrl, false); @@ -3215,7 +3215,7 @@ static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on) static irqreturn_t hdmi_tx_isr(int irq, void *data) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)data; unsigned long flags; u32 hpd_current_state; @@ -3856,7 +3856,7 @@ static void hdmi_tx_deinit_resource(struct hdmi_tx_ctrl *hdmi_ctrl) /* IO */ for (i = HDMI_TX_MAX_IO - 1; i >= 0; i--) { if (hdmi_ctrl->pdata.io[i].base) - msm_mdss_iounmap(&hdmi_ctrl->pdata.io[i]); + msm_dss_iounmap(&hdmi_ctrl->pdata.io[i]); } } /* hdmi_tx_deinit_resource */ @@ -3876,7 +3876,7 @@ static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl) /* IO */ for (i = 0; i < HDMI_TX_MAX_IO; i++) { - rc = msm_mdss_ioremap_byname(hdmi_ctrl->pdev, &pdata->io[i], + rc = msm_dss_ioremap_byname(hdmi_ctrl->pdev, &pdata->io[i], hdmi_tx_io_name(i)); if (rc) { DEV_DBG("%s: '%s' remap failed or not available\n", @@ -3905,7 +3905,7 @@ static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl) } /* hdmi_tx_init_resource */ static void hdmi_tx_put_dt_clk_data(struct device *dev, - struct mdss_module_power *module_power) + struct dss_module_power *module_power) { if (!module_power) { DEV_ERR("%s: invalid input\n", __func__); @@ -3921,7 +3921,7 @@ static void hdmi_tx_put_dt_clk_data(struct device *dev, /* todo: once clk are moved to device tree then change this implementation */ static int hdmi_tx_get_dt_clk_data(struct device *dev, - struct mdss_module_power *mp, u32 module_type) + struct dss_module_power *mp, u32 module_type) { int rc = 0; @@ -3935,7 +3935,7 @@ static int hdmi_tx_get_dt_clk_data(struct device *dev, switch (module_type) { case HDMI_TX_HPD_PM: mp->num_clk = 4; - mp->clk_config = devm_kzalloc(dev, sizeof(struct mdss_clk) * + mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL); if (!mp->clk_config) { DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__, @@ -3968,7 +3968,7 @@ static int hdmi_tx_get_dt_clk_data(struct device *dev, case HDMI_TX_CORE_PM: mp->num_clk = 1; - mp->clk_config = devm_kzalloc(dev, sizeof(struct mdss_clk) * + mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL); if (!mp->clk_config) { DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__, @@ -4007,7 +4007,7 @@ static int hdmi_tx_get_dt_clk_data(struct device *dev, } /* hdmi_tx_get_dt_clk_data */ static void hdmi_tx_put_dt_vreg_data(struct device *dev, - struct mdss_module_power *module_power) + struct dss_module_power *module_power) { if (!module_power) { DEV_ERR("%s: invalid input\n", __func__); @@ -4022,7 +4022,7 @@ static void hdmi_tx_put_dt_vreg_data(struct device *dev, } /* hdmi_tx_put_dt_vreg_data */ static int hdmi_tx_get_dt_vreg_data(struct device *dev, - struct mdss_module_power *mp, u32 module_type) + struct dss_module_power *mp, u32 module_type) { int i, j, rc = 0; int dt_vreg_total = 0, mod_vreg_total = 0; @@ -4087,7 +4087,7 @@ static int hdmi_tx_get_dt_vreg_data(struct device *dev, if (mod_vreg_total > 0) { mp->num_vreg = mod_vreg_total; - mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * mod_vreg_total, GFP_KERNEL); if (!mp->vreg_config) { DEV_ERR("%s: can't alloc '%s' vreg mem\n", __func__, @@ -4200,7 +4200,7 @@ static int hdmi_tx_get_dt_vreg_data(struct device *dev, } /* hdmi_tx_get_dt_vreg_data */ static void hdmi_tx_put_dt_gpio_data(struct device *dev, - struct mdss_module_power *module_power) + struct dss_module_power *module_power) { if (!module_power) { DEV_ERR("%s: invalid input\n", __func__); @@ -4215,11 +4215,11 @@ static void hdmi_tx_put_dt_gpio_data(struct device *dev, } /* hdmi_tx_put_dt_gpio_data */ static int hdmi_tx_get_dt_gpio_data(struct device *dev, - struct mdss_module_power *mp, u32 module_type) + struct dss_module_power *mp, u32 module_type) { int i, j; int mp_gpio_cnt = 0, gpio_list_size = 0; - struct mdss_gpio *gpio_list = NULL; + struct dss_gpio *gpio_list = NULL; struct device_node *of_node = NULL; DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type)); @@ -4266,7 +4266,7 @@ static int hdmi_tx_get_dt_gpio_data(struct device *dev, DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt); mp->num_gpio = mp_gpio_cnt; - mp->gpio_config = devm_kzalloc(dev, sizeof(struct mdss_gpio) * + mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) * mp_gpio_cnt, GFP_KERNEL); if (!mp->gpio_config) { DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__, @@ -4285,7 +4285,7 @@ static int hdmi_tx_get_dt_gpio_data(struct device *dev, continue; } memcpy(&mp->gpio_config[j], &gpio_list[i], - sizeof(struct mdss_gpio)); + sizeof(struct dss_gpio)); mp->gpio_config[j].gpio = (unsigned int)gpio; @@ -4533,17 +4533,17 @@ static int hdmi_tx_probe(struct platform_device *pdev) if (hdmi_ctrl->panel_data.panel_info.cont_splash_enabled) { for (i = 0; i < HDMI_TX_MAX_PM; i++) { - msm_mdss_enable_vreg( + msm_dss_enable_vreg( hdmi_ctrl->pdata.power_data[i].vreg_config, hdmi_ctrl->pdata.power_data[i].num_vreg, 1); hdmi_tx_pinctrl_set_state(hdmi_ctrl, i, 1); - msm_mdss_enable_gpio( + msm_dss_enable_gpio( hdmi_ctrl->pdata.power_data[i].gpio_config, hdmi_ctrl->pdata.power_data[i].num_gpio, 1); - msm_mdss_enable_clk( + msm_dss_enable_clk( hdmi_ctrl->pdata.power_data[i].clk_config, hdmi_ctrl->pdata.power_data[i].num_clk, 1); diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h index 6a13c7529580e28db07a01e540c16e871a30cd33..d09ca3c1b958b302d3aa635a26fa30dc2b093d6d 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h @@ -41,8 +41,8 @@ struct hdmi_tx_platform_data { bool primary; bool cont_splash_enabled; bool cond_power_on; - struct mdss_io_data io[HDMI_TX_MAX_IO]; - struct mdss_module_power power_data[HDMI_TX_MAX_PM]; + struct dss_io_data io[HDMI_TX_MAX_IO]; + struct dss_module_power power_data[HDMI_TX_MAX_PM]; struct reg_bus_client *reg_bus_clt[HDMI_TX_MAX_PM]; /* bitfield representing each module's pin state */ u64 pin_states; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c index 834cc6ffce9bfa85e775be1e27ecc9b6e3ff3cc7..734b3dab36b67ccad4fc0fb3f1b767becf055de6 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c @@ -186,7 +186,7 @@ static int hdmi_scrambler_status_timer_setup(struct hdmi_tx_ddc_ctrl *ctrl, { u32 reg_val; int rc; - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; if (!ctrl || !ctrl->io) { pr_err("invalid input\n"); @@ -655,7 +655,7 @@ static void hdmi_ddc_trigger(struct hdmi_tx_ddc_ctrl *ddc_ctrl, enum trigger_mode mode, bool seg) { struct hdmi_tx_ddc_data *ddc_data = &ddc_ctrl->ddc_data; - struct mdss_io_data *io = ddc_ctrl->io; + struct dss_io_data *io = ddc_ctrl->io; u32 const seg_addr = 0x60, seg_num = 0x01; u32 ddc_ctrl_reg_val; @@ -886,7 +886,7 @@ static void hdmi_hdcp2p2_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ctrl) static int hdmi_ddc_hdcp2p2_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl) { - struct mdss_io_data *io = NULL; + struct dss_io_data *io = NULL; struct hdmi_tx_hdcp2p2_ddc_data *data; u32 intr0, intr2, intr5; u32 msg_size; @@ -1022,7 +1022,7 @@ static int hdmi_ddc_hdcp2p2_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl) static int hdmi_ddc_scrambling_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl) { - struct mdss_io_data *io; + struct dss_io_data *io; bool scrambler_timer_off = false; u32 intr2, intr5; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h index ecab9d5f67027b92d09b54e146eb42dbdddd3379..d26be9997a7cd8392b83ae2d63781d212213f041 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h @@ -447,7 +447,7 @@ struct hdmi_tx_ddc_ctrl { atomic_t write_busy_wait_done; atomic_t read_busy_wait_done; atomic_t rxstatus_busy_wait_done; - struct mdss_io_data *io; + struct dss_io_data *io; struct completion ddc_sw_done; struct hdmi_tx_ddc_data ddc_data; struct hdmi_tx_hdcp2p2_ddc_data hdcp2p2_ddc_data; diff --git a/drivers/video/fbdev/msm/mdss_io_util.c b/drivers/video/fbdev/msm/mdss_io_util.c index 311779347e086c0325581d61711423eac35674a3..5d344bdd82224efb0d85739364f3595f26b006fb 100644 --- a/drivers/video/fbdev/msm/mdss_io_util.c +++ b/drivers/video/fbdev/msm/mdss_io_util.c @@ -17,7 +17,7 @@ #include #define MAX_I2C_CMDS 16 -void mdss_reg_w(struct mdss_io_data *io, u32 offset, u32 value, u32 debug) +void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug) { u32 in_val; @@ -41,10 +41,10 @@ void mdss_reg_w(struct mdss_io_data *io, u32 offset, u32 value, u32 debug) value, in_val); } -} /* mdss_reg_w */ -EXPORT_SYMBOL(mdss_reg_w); +} /* dss_reg_w */ +EXPORT_SYMBOL(dss_reg_w); -u32 mdss_reg_r(struct mdss_io_data *io, u32 offset, u32 debug) +u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug) { u32 value; @@ -66,17 +66,17 @@ u32 mdss_reg_r(struct mdss_io_data *io, u32 offset, u32 debug) (u32)(unsigned long)(io->base + offset), value); return value; -} /* mdss_reg_r */ -EXPORT_SYMBOL(mdss_reg_r); +} /* dss_reg_r */ +EXPORT_SYMBOL(dss_reg_r); -void mdss_reg_dump(void __iomem *base, u32 length, const char *prefix, +void dss_reg_dump(void __iomem *base, u32 length, const char *prefix, u32 debug) { if (debug) print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4, (void *)base, length, false); -} /* mdss_reg_dump */ -EXPORT_SYMBOL(mdss_reg_dump); +} /* dss_reg_dump */ +EXPORT_SYMBOL(dss_reg_dump); static struct resource *msm_mdss_get_res_byname(struct platform_device *pdev, unsigned int type, const char *name) @@ -91,8 +91,8 @@ static struct resource *msm_mdss_get_res_byname(struct platform_device *pdev, } /* msm_mdss_get_res_byname */ EXPORT_SYMBOL(msm_mdss_get_res_byname); -int msm_mdss_ioremap_byname(struct platform_device *pdev, - struct mdss_io_data *io_data, const char *name) +int msm_dss_ioremap_byname(struct platform_device *pdev, + struct dss_io_data *io_data, const char *name) { struct resource *res = NULL; @@ -118,10 +118,10 @@ int msm_mdss_ioremap_byname(struct platform_device *pdev, } return 0; -} /* msm_mdss_ioremap_byname */ -EXPORT_SYMBOL(msm_mdss_ioremap_byname); +} /* msm_dss_ioremap_byname */ +EXPORT_SYMBOL(msm_dss_ioremap_byname); -void msm_mdss_iounmap(struct mdss_io_data *io_data) +void msm_dss_iounmap(struct dss_io_data *io_data) { if (!io_data) { DEV_ERR("%pS->%s: invalid input\n", @@ -134,15 +134,15 @@ void msm_mdss_iounmap(struct mdss_io_data *io_data) io_data->base = NULL; } io_data->len = 0; -} /* msm_mdss_iounmap */ -EXPORT_SYMBOL(msm_mdss_iounmap); +} /* msm_dss_iounmap */ +EXPORT_SYMBOL(msm_dss_iounmap); -int msm_mdss_config_vreg(struct device *dev, struct mdss_vreg *in_vreg, +int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg, int num_vreg, int config) { int i = 0, rc = 0; - struct mdss_vreg *curr_vreg = NULL; - enum mdss_vreg_type type; + struct dss_vreg *curr_vreg = NULL; + enum dss_vreg_type type; if (!in_vreg || !num_vreg) return rc; @@ -210,11 +210,11 @@ if (type == DSS_REG_LDO) goto vreg_unconfig; } return rc; -} /* msm_mdss_config_vreg */ -EXPORT_SYMBOL(msm_mdss_config_vreg); +} /* msm_dss_config_vreg */ +EXPORT_SYMBOL(msm_dss_config_vreg); -int msm_mdss_config_vreg_opt_mode(struct mdss_vreg *in_vreg, int num_vreg, - enum mdss_vreg_mode mode) +int msm_dss_config_vreg_opt_mode(struct dss_vreg *in_vreg, int num_vreg, + enum dss_vreg_mode mode) { int i = 0, rc = 0; @@ -257,9 +257,9 @@ int msm_mdss_config_vreg_opt_mode(struct mdss_vreg *in_vreg, int num_vreg, error: return rc; } -EXPORT_SYMBOL(msm_mdss_config_vreg_opt_mode); +EXPORT_SYMBOL(msm_dss_config_vreg_opt_mode); -int msm_mdss_enable_vreg(struct mdss_vreg *in_vreg, int num_vreg, int enable) +int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable) { int i = 0, rc = 0; bool need_sleep; @@ -332,10 +332,10 @@ int msm_mdss_enable_vreg(struct mdss_vreg *in_vreg, int num_vreg, int enable) } return rc; -} /* msm_mdss_enable_vreg */ -EXPORT_SYMBOL(msm_mdss_enable_vreg); +} /* msm_dss_enable_vreg */ +EXPORT_SYMBOL(msm_dss_enable_vreg); -int msm_mdss_enable_gpio(struct mdss_gpio *in_gpio, int num_gpio, int enable) +int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable) { int i = 0, rc = 0; @@ -372,10 +372,10 @@ int msm_mdss_enable_gpio(struct mdss_gpio *in_gpio, int num_gpio, int enable) gpio_free(in_gpio[i].gpio); return rc; -} /* msm_mdss_enable_gpio */ -EXPORT_SYMBOL(msm_mdss_enable_gpio); +} /* msm_dss_enable_gpio */ +EXPORT_SYMBOL(msm_dss_enable_gpio); -void msm_mdss_put_clk(struct mdss_clk *clk_arry, int num_clk) +void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk) { int i; @@ -384,10 +384,10 @@ void msm_mdss_put_clk(struct mdss_clk *clk_arry, int num_clk) clk_put(clk_arry[i].clk); clk_arry[i].clk = NULL; } -} /* msm_mdss_put_clk */ -EXPORT_SYMBOL(msm_mdss_put_clk); +} /* msm_dss_put_clk */ +EXPORT_SYMBOL(msm_dss_put_clk); -int msm_mdss_get_clk(struct device *dev, struct mdss_clk *clk_arry, int num_clk) +int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk) { int i, rc = 0; @@ -405,13 +405,13 @@ int msm_mdss_get_clk(struct device *dev, struct mdss_clk *clk_arry, int num_clk) return rc; error: - msm_mdss_put_clk(clk_arry, num_clk); + msm_dss_put_clk(clk_arry, num_clk); return rc; -} /* msm_mdss_get_clk */ -EXPORT_SYMBOL(msm_mdss_get_clk); +} /* msm_dss_get_clk */ +EXPORT_SYMBOL(msm_dss_get_clk); -int msm_mdss_clk_set_rate(struct mdss_clk *clk_arry, int num_clk) +int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk) { int i, rc = 0; @@ -442,10 +442,10 @@ int msm_mdss_clk_set_rate(struct mdss_clk *clk_arry, int num_clk) } return rc; -} /* msm_mdss_clk_set_rate */ -EXPORT_SYMBOL(msm_mdss_clk_set_rate); +} /* msm_dss_clk_set_rate */ +EXPORT_SYMBOL(msm_dss_clk_set_rate); -int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable) +int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable) { int i, rc = 0; @@ -469,7 +469,7 @@ int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable) } if (rc) { - msm_mdss_enable_clk(&clk_arry[i], + msm_dss_enable_clk(&clk_arry[i], i, false); break; } @@ -490,11 +490,11 @@ int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable) } return rc; -} /* msm_mdss_enable_clk */ -EXPORT_SYMBOL(msm_mdss_enable_clk); +} /* msm_dss_enable_clk */ +EXPORT_SYMBOL(msm_dss_enable_clk); -int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, +int dss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, uint8_t reg_offset, uint8_t *read_buf) { struct i2c_msg msgs[2]; @@ -521,9 +521,9 @@ int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf); return 0; } -EXPORT_SYMBOL(mdss_i2c_byte_read); +EXPORT_SYMBOL(dss_i2c_byte_read); -int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, +int dss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, uint8_t reg_offset, uint8_t *value) { struct i2c_msg msgs[1]; @@ -549,4 +549,4 @@ int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, pr_debug("%s: I2C write status=%x\n", __func__, status); return status; } -EXPORT_SYMBOL(mdss_i2c_byte_write); +EXPORT_SYMBOL(dss_i2c_byte_write); diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index ad87a5eb425b524b6c2fd6ffa4b1416f3cad130a..993446d8fab9334d2698b97dc723b7fb7a171bdf 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -2745,7 +2745,7 @@ static int mdss_mdp_probe(struct platform_device *pdev) mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type; mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status; - rc = msm_mdss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys"); + rc = msm_dss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys"); if (rc) { pr_err("unable to map MDP base\n"); goto probe_done; @@ -2754,7 +2754,7 @@ static int mdss_mdp_probe(struct platform_device *pdev) (int) (unsigned long) mdata->mdss_io.base, mdata->mdss_io.len); - rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys"); + rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys"); if (rc) { pr_err("unable to map MDSS VBIF base\n"); goto probe_done; @@ -2763,7 +2763,7 @@ static int mdss_mdp_probe(struct platform_device *pdev) (int) (unsigned long) mdata->vbif_io.base, mdata->vbif_io.len); - rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_nrt_io, + rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_nrt_io, "vbif_nrt_phys"); if (rc) pr_debug("unable to map MDSS VBIF non-realtime base\n"); @@ -2965,7 +2965,7 @@ static int mdss_mdp_probe(struct platform_device *pdev) } static void mdss_mdp_parse_dt_regs_array(const u32 *arr, - struct mdss_io_data *io, struct mdss_hw_settings *hws, int count) + struct dss_io_data *io, struct mdss_hw_settings *hws, int count) { u32 len, reg; int i; diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 64b420e4445515316dd2eba262ab916118dbe077..44c63aa8f173257daac06df45b989a26b9b43069 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -59,6 +59,7 @@ #define DFPS_DATA_MAX_FPS 0x7fffffff #define DFPS_DATA_MAX_CLK_RATE 250000 +struct mdp_overlay tmp; static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd); static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd); static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd); @@ -5354,7 +5355,9 @@ static int __mdss_overlay_map(struct mdp_overlay *ovs, static inline void __overlay_swap_func(void *a, void *b, int size) { - swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b); + tmp = *(struct mdp_overlay *)a; + *(struct mdp_overlay *)a = *(struct mdp_overlay *)b; + *(struct mdp_overlay *)b = tmp; } static inline int __zorder_dstx_cmp_func(const void *a, const void *b) diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c index aef51e3e50200115685f31967db5acb9e9dfe98f..b510e916b8d1459f0895d4ffb9af2708aa7c1bb6 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_pp.c +++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c @@ -3132,12 +3132,10 @@ static int pp_ad_calc_bl(struct msm_fb_data_type *mfd, int bl_in, int *bl_out, return -EPERM; } - if (!ad->bl_mfd || !ad->bl_mfd->panel_info || - !ad->bl_att_lut) { - pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n", + if (!ad->bl_mfd || !ad->bl_mfd->panel_info) { + pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK\n", ad->bl_mfd, - (!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info, - ad->bl_att_lut); + (!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info); mutex_unlock(&ad->lock); return -EINVAL; } @@ -6672,7 +6670,7 @@ static int is_valid_calib_dspp_addr(char __iomem *ptr) ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE; break; /* Dither enable/disable */ - } else if ((ptr == base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) { + } else if (ptr == (base + MDSS_MDP_REG_DSPP_DITHER_DEPTH)) { ret = MDP_PP_OPS_READ | MDP_PP_OPS_WRITE; break; /* Six zone and mem color */ diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c index 3223e7432e3f95596419d74624ec976a43614ae5..f285d793d520aeae3e82d6cefd7eedd0b0c12ada 100644 --- a/drivers/video/fbdev/msm/mdss_rotator.c +++ b/drivers/video/fbdev/msm/mdss_rotator.c @@ -254,7 +254,7 @@ static void mdss_rotator_footswitch_ctrl(struct mdss_rot_mgr *mgr, bool on) } pr_debug("%s: rotator regulators", on ? "Enable" : "Disable"); - ret = msm_mdss_enable_vreg(mgr->module_power.vreg_config, + ret = msm_dss_enable_vreg(mgr->module_power.vreg_config, mgr->module_power.num_vreg, on); if (ret) { pr_warn("Rotator regulator failed to %s\n", @@ -2658,14 +2658,14 @@ static int mdss_rotator_parse_dt(struct mdss_rot_mgr *mgr, } static void mdss_rotator_put_dt_vreg_data(struct device *dev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { if (!mp) { DEV_ERR("%s: invalid input\n", __func__); return; } - msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0); + msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0); if (mp->vreg_config) { devm_kfree(dev, mp->vreg_config); mp->vreg_config = NULL; @@ -2674,7 +2674,7 @@ static void mdss_rotator_put_dt_vreg_data(struct device *dev, } static int mdss_rotator_get_dt_vreg_data(struct device *dev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { const char *st = NULL; struct device_node *of_node = NULL; @@ -2696,7 +2696,7 @@ static int mdss_rotator_get_dt_vreg_data(struct device *dev, return 0; } mp->num_vreg = dt_vreg_total; - mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * dt_vreg_total, GFP_KERNEL); if (!mp->vreg_config) { DEV_ERR("%s: can't alloc vreg mem\n", __func__); @@ -2714,7 +2714,7 @@ static int mdss_rotator_get_dt_vreg_data(struct device *dev, } snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st); } - msm_mdss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1); + msm_dss_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1); for (i = 0; i < dt_vreg_total; i++) { DEV_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n", diff --git a/drivers/video/fbdev/msm/mdss_rotator_internal.h b/drivers/video/fbdev/msm/mdss_rotator_internal.h index 88f530addf262c32232e9a5c84e12f09b79c693f..6330582ba7687a2299aa5276ea35a2ee5f6968f7 100644 --- a/drivers/video/fbdev/msm/mdss_rotator_internal.h +++ b/drivers/video/fbdev/msm/mdss_rotator_internal.h @@ -172,7 +172,7 @@ struct mdss_rot_mgr { struct mdss_rot_bus_data_type reg_bus; /* Module power is only used for regulator management */ - struct mdss_module_power module_power; + struct dss_module_power module_power; bool regulator_enable; struct mutex clk_lock; diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c index f7923e1868e4258c2be7d76c3ed0d2bd9f048361..78c7ca31a7e0e19cf08b3134c64e4e492509942e 100644 --- a/drivers/video/fbdev/msm/mdss_smmu.c +++ b/drivers/video/fbdev/msm/mdss_smmu.c @@ -58,7 +58,7 @@ void mdss_iommu_unlock(void) } static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { u32 i = 0, rc = 0; const char *clock_name; @@ -74,7 +74,7 @@ static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev, mp->num_clk = num_clk; mp->clk_config = devm_kzalloc(&pdev->dev, - sizeof(struct mdss_clk) * mp->num_clk, GFP_KERNEL); + sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL); if (!mp->clk_config) { rc = -ENOMEM; mp->num_clk = 0; @@ -102,7 +102,7 @@ static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev, } static int mdss_smmu_clk_register(struct platform_device *pdev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { int i, ret; struct clk *clk; @@ -130,7 +130,7 @@ static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu, bool enable) { int rc = 0; - struct mdss_module_power *mp; + struct dss_module_power *mp; if (!mdss_smmu) return -EINVAL; @@ -141,27 +141,27 @@ static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu, return 0; if (enable) { - rc = msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, true); + rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, true); if (rc) { pr_err("vreg enable failed - rc:%d\n", rc); goto end; } mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt, VOTE_INDEX_LOW); - rc = msm_mdss_enable_clk(mp->clk_config, mp->num_clk, true); + rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); if (rc) { pr_err("clock enable failed - rc:%d\n", rc); mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt, VOTE_INDEX_DISABLE); - msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, + msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false); goto end; } } else { - msm_mdss_enable_clk(mp->clk_config, mp->num_clk, false); + msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt, VOTE_INDEX_DISABLE); - msm_mdss_enable_vreg(mp->vreg_config, mp->num_vreg, false); + msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, false); } end: return rc; @@ -721,7 +721,7 @@ int mdss_smmu_probe(struct platform_device *pdev) int rc = 0; struct mdss_smmu_domain smmu_domain; const struct of_device_id *match; - struct mdss_module_power *mp; + struct dss_module_power *mp; char name[MAX_CLIENT_NAME_LEN]; const __be32 *address = NULL, *size = NULL; @@ -759,13 +759,13 @@ int mdss_smmu_probe(struct platform_device *pdev) mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain]; mp = &mdss_smmu->mp; - memset(mp, 0, sizeof(struct mdss_module_power)); + memset(mp, 0, sizeof(struct dss_module_power)); if (of_find_property(pdev->dev.of_node, "gdsc-mmagic-mdss-supply", NULL)) { mp->vreg_config = devm_kzalloc(&pdev->dev, - sizeof(struct mdss_vreg), GFP_KERNEL); + sizeof(struct dss_vreg), GFP_KERNEL); if (!mp->vreg_config) return -ENOMEM; @@ -774,7 +774,7 @@ int mdss_smmu_probe(struct platform_device *pdev) mp->num_vreg = 1; } - rc = msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, + rc = msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, true); if (rc) { pr_err("vreg config failed rc=%d\n", rc); @@ -785,7 +785,7 @@ int mdss_smmu_probe(struct platform_device *pdev) if (rc) { pr_err("smmu clk register failed for domain[%d] with err:%d\n", smmu_domain.domain, rc); - msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, + msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, false); return rc; } @@ -794,7 +794,7 @@ int mdss_smmu_probe(struct platform_device *pdev) mdss_smmu->reg_bus_clt = mdss_reg_bus_vote_client_create(name); if (IS_ERR(mdss_smmu->reg_bus_clt)) { pr_err("mdss bus client register failed\n"); - msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, + msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, false); return PTR_ERR(mdss_smmu->reg_bus_clt); } @@ -856,7 +856,7 @@ int mdss_smmu_probe(struct platform_device *pdev) bus_client_destroy: mdss_reg_bus_vote_client_destroy(mdss_smmu->reg_bus_clt); mdss_smmu->reg_bus_clt = NULL; - msm_mdss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, + msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, false); return rc; } diff --git a/drivers/video/fbdev/msm/mhl_sii8334.c b/drivers/video/fbdev/msm/mhl_sii8334.c index cf45eb6fcda96ed4cfe3e440b0d5ce4a7e1e38e3..4133b76c62848f5508d32ec4af361e6ec45c10fa 100644 --- a/drivers/video/fbdev/msm/mhl_sii8334.c +++ b/drivers/video/fbdev/msm/mhl_sii8334.c @@ -203,7 +203,7 @@ int mhl_i2c_reg_read(struct i2c_client *client, int rc = -1; uint8_t buffer = 0; - rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index], + rc = dss_i2c_byte_read(client, slave_addrs[slave_addr_index], reg_offset, &buffer); if (rc) { pr_err("%s: slave=%x, off=%x\n", @@ -218,7 +218,7 @@ int mhl_i2c_reg_write(struct i2c_client *client, uint8_t slave_addr_index, uint8_t reg_offset, uint8_t value) { - return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index], + return dss_i2c_byte_write(client, slave_addrs[slave_addr_index], reg_offset, &value); } @@ -240,7 +240,7 @@ static int mhl_tx_get_dt_data(struct device *dev, { int i, rc = 0; struct device_node *of_node = NULL; - struct mdss_gpio *temp_gpio = NULL; + struct dss_gpio *temp_gpio = NULL; struct platform_device *hdmi_pdev = NULL; struct device_node *hdmi_tx_node = NULL; int dt_gpio; @@ -262,7 +262,7 @@ static int mhl_tx_get_dt_data(struct device *dev, /* GPIOs */ temp_gpio = NULL; - temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL); + temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL); pr_debug("%s: gpios allocd\n", __func__); if (!(temp_gpio)) { pr_err("%s: can't alloc %d gpio mem\n", __func__, i); @@ -283,7 +283,7 @@ static int mhl_tx_get_dt_data(struct device *dev, /* PWR */ temp_gpio = NULL; - temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL); + temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL); pr_debug("%s: gpios allocd\n", __func__); if (!(temp_gpio)) { pr_err("%s: can't alloc %d gpio mem\n", __func__, i); @@ -303,7 +303,7 @@ static int mhl_tx_get_dt_data(struct device *dev, /* INTR */ temp_gpio = NULL; - temp_gpio = devm_kzalloc(dev, sizeof(struct mdss_gpio), GFP_KERNEL); + temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL); pr_debug("%s: gpios allocd\n", __func__); if (!(temp_gpio)) { pr_err("%s: can't alloc %d gpio mem\n", __func__, i); @@ -1716,7 +1716,7 @@ static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on) static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on) { int ret; - struct mdss_gpio *temp_reset_gpio, *temp_intr_gpio; + struct dss_gpio *temp_reset_gpio, *temp_intr_gpio; /* caused too many line spills */ temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]; diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c index 3f189d5fbd7991305ce4acb6ecbe9c18afc05a11..49303625c3d9559c4076790f698e66d37e17ebec 100644 --- a/drivers/video/fbdev/msm/msm_dba/adv7533.c +++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c @@ -125,7 +125,7 @@ struct adv7533 { struct pinctrl_state *pinctrl_state_suspend; bool audio; bool disable_gpios; - struct mdss_module_power power_data; + struct dss_module_power power_data; bool hdcp_enabled; bool cec_enabled; bool is_power_on; @@ -433,7 +433,7 @@ static int adv7533_program_i2c_addr(struct adv7533 *pdata) } static void adv7533_parse_vreg_dt(struct device *dev, - struct mdss_module_power *mp) + struct dss_module_power *mp) { int i, rc = 0; int dt_vreg_total = 0; @@ -449,7 +449,7 @@ static void adv7533_parse_vreg_dt(struct device *dev, goto end; } mp->num_vreg = dt_vreg_total; - mp->vreg_config = devm_kzalloc(dev, sizeof(struct mdss_vreg) * + mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) * dt_vreg_total, GFP_KERNEL); if (!mp->vreg_config) goto end; @@ -1471,7 +1471,7 @@ static void adv7533_video_setup(struct adv7533 *pdata, static int adv7533_config_vreg(struct adv7533 *pdata, int enable) { int rc = 0; - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; if (!pdata) { pr_err("invalid input\n"); @@ -1486,7 +1486,7 @@ static int adv7533_config_vreg(struct adv7533 *pdata, int enable) } if (enable) { - rc = msm_mdss_config_vreg(&pdata->i2c_client->dev, + rc = msm_dss_config_vreg(&pdata->i2c_client->dev, power_data->vreg_config, power_data->num_vreg, 1); if (rc) { @@ -1495,7 +1495,7 @@ static int adv7533_config_vreg(struct adv7533 *pdata, int enable) goto exit; } } else { - rc = msm_mdss_config_vreg(&pdata->i2c_client->dev, + rc = msm_dss_config_vreg(&pdata->i2c_client->dev, power_data->vreg_config, power_data->num_vreg, 0); if (rc) { @@ -1512,7 +1512,7 @@ static int adv7533_config_vreg(struct adv7533 *pdata, int enable) static int adv7533_enable_vreg(struct adv7533 *pdata, int enable) { int rc = 0; - struct mdss_module_power *power_data = NULL; + struct dss_module_power *power_data = NULL; if (!pdata) { pr_err("invalid input\n"); @@ -1527,7 +1527,7 @@ static int adv7533_enable_vreg(struct adv7533 *pdata, int enable) } if (enable) { - rc = msm_mdss_enable_vreg(power_data->vreg_config, + rc = msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 1); if (rc) { pr_err("%s: Failed to enable vreg. Err=%d\n", @@ -1535,7 +1535,7 @@ static int adv7533_enable_vreg(struct adv7533 *pdata, int enable) goto exit; } } else { - rc = msm_mdss_enable_vreg(power_data->vreg_config, + rc = msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0); if (rc) { pr_err("%s: Failed to disable vreg. Err=%d\n", diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c index 5051d1290d3ef867953e35c956872a1705c26e4f..5736e09b32cb2cc6d6d5352af3853660ac413eea 100644 --- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c +++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c @@ -2343,7 +2343,7 @@ int mdss_dsi_post_clkoff_cb(void *priv, if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) && (i != DSI_CORE_PM)) continue; - rc = msm_mdss_enable_vreg( + rc = msm_dss_enable_vreg( sdata->power_data[i].vreg_config, sdata->power_data[i].num_vreg, 0); if (rc) { @@ -2404,7 +2404,7 @@ int mdss_dsi_pre_clkon_cb(void *priv, (!pdata->panel_info.cont_splash_enabled) && (i != DSI_CORE_PM)) continue; - rc = msm_mdss_enable_vreg( + rc = msm_dss_enable_vreg( sdata->power_data[i].vreg_config, sdata->power_data[i].num_vreg, 1); if (rc) { diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 73676eb0244a7bd63d294619563d723992a93183..c592ca513115c060e37133c87411efad58bbc1ad 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1044,7 +1044,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info) info->cmap.len || cmap->start < info->cmap.start) return -EINVAL; - entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL); + entries = kmalloc_array(cmap->len, sizeof(*entries), + GFP_KERNEL); if (!entries) return -ENOMEM; diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c index 74f2e6e6202a98eba4882f1f9b62ea0705e6451b..8851d441e5fd16df4eb25784c3b16cf418a18681 100644 --- a/drivers/w1/masters/mxc_w1.c +++ b/drivers/w1/masters/mxc_w1.c @@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev) if (IS_ERR(mdev->clk)) return PTR_ERR(mdev->clk); + err = clk_prepare_enable(mdev->clk); + if (err) + return err; + clkrate = clk_get_rate(mdev->clk); if (clkrate < 10000000) dev_warn(&pdev->dev, @@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdev->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mdev->regs)) - return PTR_ERR(mdev->regs); - - err = clk_prepare_enable(mdev->clk); - if (err) - return err; + if (IS_ERR(mdev->regs)) { + err = PTR_ERR(mdev->regs); + goto out_disable_clk; + } /* Software reset 1-Wire module */ writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET); @@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev) err = w1_add_master_device(&mdev->bus_master); if (err) - clk_disable_unprepare(mdev->clk); + goto out_disable_clk; + return 0; + +out_disable_clk: + clk_disable_unprepare(mdev->clk); return err; } diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 0c2a5a8327bd0c38b514c000ed9827656359f193..6f9e9505b34ced6e1bdc14ba3193db916e58b244 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -750,7 +750,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) /* slave modules need to be loaded in a context with unlocked mutex */ mutex_unlock(&dev->mutex); - request_module("w1-family-0x%02x", rn->family); + request_module("w1-family-0x%02X", rn->family); mutex_lock(&dev->mutex); spin_lock(&w1_flock); diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 762378f1811cc9069dc6171edb55aaa3610b82fa..08e4af04d6f2c32850a049a83721933a82883b8c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq) xen_irq_info_cleanup(info); } - BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); - xen_free_irq(irq); } diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index d2edbc79384a56691be87380a019c74d28beae44..83243af22d510cc8a7cd9e8b247895eeb9fcfa27 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, { struct { struct xsd_sockmsg hdr; - const char body[16]; + char body[16]; } msg; int rc; @@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u, msg.hdr.len = strlen(reply) + 1; if (msg.hdr.len > sizeof(msg.body)) return -E2BIG; + memcpy(&msg.body, reply, msg.hdr.len); mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 9f715c3edcf967eeb9dd437f23174d838fefa42c..ccc9c708a860a5d5c405a7198e9a29110f4dc390 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -55,6 +55,7 @@ int afs_open_socket(void) { struct sockaddr_rxrpc srx; struct socket *socket; + unsigned int min_level; int ret; _enter(""); @@ -80,6 +81,12 @@ int afs_open_socket(void) memset(&srx.transport.sin.sin_addr, 0, sizeof(srx.transport.sin.sin_addr)); + min_level = RXRPC_SECURITY_ENCRYPT; + ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, + (void *)&min_level, sizeof(min_level)); + if (ret < 0) + goto error_2; + ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret < 0) goto error_2; diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index a7c5a9861bef5a73b8f7377dfa9e045427b915c7..8311e8ed76de37542d35dffc90c039452572f50e 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count) s = strchr(p, del); if (!s) goto einval; - *s++ = '\0'; - e->offset = simple_strtoul(p, &p, 10); + *s = '\0'; + if (p != s) { + int r = kstrtoint(p, 10, &e->offset); + if (r != 0 || e->offset < 0) + goto einval; + } + p = s; if (*p++) goto einval; pr_debug("register: offset: %#x\n", e->offset); @@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count) if (e->mask && string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size) goto einval; - if (e->size + e->offset > BINPRM_BUF_SIZE) + if (e->size > BINPRM_BUF_SIZE || + BINPRM_BUF_SIZE - e->size < e->offset) goto einval; pr_debug("register: magic/mask length: %i\n", e->size); if (USE_DEBUG) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 27d59cf36341c69e1dc680e3ddf9f9004f8eb267..b475d1ebbbbf02117a45fc26f5652e907538e7d6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -59,7 +59,8 @@ BTRFS_HEADER_FLAG_RELOC |\ BTRFS_SUPER_FLAG_ERROR |\ BTRFS_SUPER_FLAG_SEEDING |\ - BTRFS_SUPER_FLAG_METADUMP) + BTRFS_SUPER_FLAG_METADUMP |\ + BTRFS_SUPER_FLAG_METADUMP_V2) static const struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8ecbac3b862ed6b4d1f94b969202601a7cba2a5a..f5b90dc137ec5c80fdb54258db9faae38554f5e6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1027,8 +1027,10 @@ static noinline int cow_file_range(struct inode *inode, ram_size, /* ram_bytes */ BTRFS_COMPRESS_NONE, /* compress_type */ BTRFS_ORDERED_REGULAR /* type */); - if (IS_ERR(em)) + if (IS_ERR(em)) { + ret = PTR_ERR(em); goto out_reserve; + } free_extent_map(em); ret = btrfs_add_ordered_extent(inode, start, ins.objectid, @@ -9767,6 +9769,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, u64 new_idx = 0; u64 root_objectid; int ret; + int ret2; bool root_log_pinned = false; bool dest_log_pinned = false; @@ -9963,7 +9966,8 @@ static int btrfs_rename_exchange(struct inode *old_dir, dest_log_pinned = false; } } - ret = btrfs_end_transaction(trans); + ret2 = btrfs_end_transaction(trans); + ret = ret ? ret : ret2; out_notrans: if (new_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&fs_info->subvol_sem); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2763f3184ac5b0ac1950f996ec83318cd8449b2a..7303ba108112215da25b906bf8145bd7a9bea087 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2682,8 +2682,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) } /* Check for compatibility reject unknown flags */ - if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) - return -EOPNOTSUPP; + if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) { + ret = -EOPNOTSUPP; + goto out; + } if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; @@ -3861,11 +3863,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, src->i_sb != inode->i_sb) return -EXDEV; - /* don't make the dst file partly checksummed */ - if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != - (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) - return -EINVAL; - if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) return -EISDIR; @@ -3875,6 +3872,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, inode_lock(src); } + /* don't make the dst file partly checksummed */ + if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != + (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { + ret = -EINVAL; + goto out_unlock; + } + /* determine range to clone */ ret = -EINVAL; if (off + len > src->i_size || off + len < off) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index e3f6c49e5c4d8aad323b77f72ed6e12da26bee72..936d58ca2b4914a7215920832d88fa5d151f00ec 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -301,6 +301,11 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_put_ctx(struct scrub_ctx *sctx); +static inline int scrub_is_page_on_raid56(struct scrub_page *page) +{ + return page->recover && + (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); +} static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { @@ -1323,15 +1328,34 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) * could happen otherwise that a correct page would be * overwritten by a bad one). */ - for (mirror_index = 0; - mirror_index < BTRFS_MAX_MIRRORS && - sblocks_for_recheck[mirror_index].page_count > 0; - mirror_index++) { + for (mirror_index = 0; ;mirror_index++) { struct scrub_block *sblock_other; if (mirror_index == failed_mirror_index) continue; - sblock_other = sblocks_for_recheck + mirror_index; + + /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */ + if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) { + if (mirror_index >= BTRFS_MAX_MIRRORS) + break; + if (!sblocks_for_recheck[mirror_index].page_count) + break; + + sblock_other = sblocks_for_recheck + mirror_index; + } else { + struct scrub_recover *r = sblock_bad->pagev[0]->recover; + int max_allowed = r->bbio->num_stripes - + r->bbio->num_tgtdevs; + + if (mirror_index >= max_allowed) + break; + if (!sblocks_for_recheck[1].page_count) + break; + + ASSERT(failed_mirror_index == 0); + sblock_other = sblocks_for_recheck + 1; + sblock_other->pagev[0]->mirror_num = 1 + mirror_index; + } /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, 0); @@ -1679,18 +1703,13 @@ static void scrub_bio_wait_endio(struct bio *bio) complete(&ret->event); } -static inline int scrub_is_page_on_raid56(struct scrub_page *page) -{ - return page->recover && - (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); -} - static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, struct bio *bio, struct scrub_page *page) { struct scrub_bio_ret done; int ret; + int mirror_num; init_completion(&done.event); done.status = 0; @@ -1698,9 +1717,10 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, bio->bi_private = &done; bio->bi_end_io = scrub_bio_wait_endio; + mirror_num = page->sblock->pagev[0]->mirror_num; ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, page->recover->map_length, - page->mirror_num, 0); + mirror_num, 0); if (ret) return ret; @@ -2755,7 +2775,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, have_csum = scrub_find_csum(sctx, logical, csum); if (have_csum == 0) ++sctx->stat.no_csum; - if (sctx->is_dev_replace && !have_csum) { + if (0 && sctx->is_dev_replace && !have_csum) { ret = copy_nocow_pages(sctx, logical, l, mirror_num, physical_for_dev_replace); diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h index 4f3884835267162a55224921f20fd70a7ef2dd5d..dd95a6fa24bf41f177e3f78a78a46acc58b8813a 100644 --- a/fs/cifs/cifsacl.h +++ b/fs/cifs/cifsacl.h @@ -98,4 +98,18 @@ struct cifs_ace { struct cifs_sid sid; /* ie UUID of user or group who gets these perms */ } __attribute__((packed)); +/* + * Minimum security identifier can be one for system defined Users + * and Groups such as NULL SID and World or Built-in accounts such + * as Administrator and Guest and consists of + * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority) + */ +#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */ + +/* + * Minimum security descriptor can be one without any SACL and DACL and can + * consist of revision, type, and two sids of minimum size for owner and group + */ +#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN)) + #endif /* _CIFSACL_H */ diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index eef875da7c0b8ce68b43e783e9339aed324af1e8..36bc9a7eb8ea5fbb93d43512e5e54bc836c209ab 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -570,9 +570,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); + /* + * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's + * not an error. Otherwise, the specified ea_name was not found. + */ if (!rc) rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data, SMB2_MAX_EA_BUF, ea_name); + else if (!ea_name && rc == -ENODATA) + rc = 0; kfree(smb2_data); return rc; @@ -1250,10 +1256,11 @@ smb2_is_session_expired(char *buf) { struct smb2_sync_hdr *shdr = get_sync_hdr(buf); - if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED) + if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED && + shdr->Status != STATUS_USER_SESSION_DELETED) return false; - cifs_dbg(FYI, "Session expired\n"); + cifs_dbg(FYI, "Session expired or deleted\n"); return true; } @@ -1565,8 +1572,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb, oparms.create_options = 0; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); - if (!utf16_path) - return ERR_PTR(-ENOMEM); + if (!utf16_path) { + rc = -ENOMEM; + free_xid(xid); + return ERR_PTR(rc); + } oparms.tcon = tcon; oparms.desired_access = READ_CONTROL; @@ -1624,8 +1634,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen, access_flags = WRITE_DAC; utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); - if (!utf16_path) - return -ENOMEM; + if (!utf16_path) { + rc = -ENOMEM; + free_xid(xid); + return rc; + } oparms.tcon = tcon; oparms.desired_access = access_flags; @@ -1685,15 +1698,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) - if (keep_size == false) - return -EOPNOTSUPP; + if (keep_size == false) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } /* * Must check if file sparse since fallocate -z (zero range) assumes * non-sparse allocation */ - if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) - return -EOPNOTSUPP; + if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } /* * need to make sure we are not asked to extend the file since the SMB3 @@ -1702,8 +1721,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, * which for a non sparse file would zero the newly extended range */ if (keep_size == false) - if (i_size_read(inode) < offset + len) - return -EOPNOTSUPP; + if (i_size_read(inode) < offset + len) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } cifs_dbg(FYI, "offset %lld len %lld", offset, len); @@ -1737,8 +1759,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, /* Need to make file sparse, if not already, before freeing range. */ /* Consider adding equivalent for compressed since it could also work */ - if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) - return -EOPNOTSUPP; + if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } cifs_dbg(FYI, "offset %lld len %lld", offset, len); @@ -1770,8 +1795,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) - if (keep_size == false) - return -EOPNOTSUPP; + if (keep_size == false) { + free_xid(xid); + return rc; + } /* * Files are non-sparse by default so falloc may be a no-op @@ -1780,14 +1807,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, */ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) { if (keep_size == true) - return 0; + rc = 0; /* check if extending file */ else if (i_size_read(inode) >= off + len) /* not extending file and already not sparse */ - return 0; + rc = 0; /* BB: in future add else clause to extend file */ else - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; } if ((keep_size == true) || (i_size_read(inode) >= off + len)) { @@ -1799,8 +1828,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon, * ie potentially making a few extra pages at the beginning * or end of the file non-sparse via set_sparse is harmless. */ - if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) - return -EOPNOTSUPP; + if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) { + rc = -EOPNOTSUPP; + free_xid(xid); + return rc; + } rc = smb2_set_sparse(xid, tcon, cfile, inode, false); } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 49779d952cd55cde658fe5cbb49c2fb38ae0bb7b..5247b40e57f671dfd37c1c28a3ca2a4fb58b3b7a 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1182,6 +1182,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, sess_data->ses = ses; sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->nls_cp = (struct nls_table *) nls_cp; + sess_data->previous_session = ses->Suid; while (sess_data->func) sess_data->func(sess_data); @@ -2278,8 +2279,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, return query_info(xid, tcon, persistent_fid, volatile_fid, 0, SMB2_O_INFO_SECURITY, additional_info, - SMB2_MAX_BUFFER_SIZE, - sizeof(struct smb2_file_all_info), data, plen); + SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen); } int diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index cb496989a6b693fe0b13cb2c737a7643288d5bc9..e7bee887b605244a95e018727cef195d2548283f 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypto-$(CONFIG_BLOCK) += bio.o +ccflags-y += -Ifs/ext4 +fscrypto-$(CONFIG_EXT4_FS_ICE_ENCRYPTION) += ext4_ice.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 0959044c5ceecb37168743784df8d6d269f6301b..d32a5c69ca38fd7a0222e0fbd5dc81afa5cae64f 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -25,6 +25,7 @@ #include #include #include "fscrypt_private.h" +#include "ext4_ice.h" static void __fscrypt_decrypt_bio(struct bio *bio, bool done) { @@ -33,14 +34,18 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done) bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; - int ret = fscrypt_decrypt_page(page->mapping->host, page, - PAGE_SIZE, 0, page->index); - - if (ret) { - WARN_ON_ONCE(1); - SetPageError(page); - } else if (done) { + if (ext4_should_be_processed_by_ice(page->mapping->host)) { SetPageUptodate(page); + } else { + int ret = fscrypt_decrypt_page(page->mapping->host, + page, PAGE_SIZE, 0, page->index); + + if (ret) { + WARN_ON_ONCE(1); + SetPageError(page); + } else if (done) { + SetPageUptodate(page); + } } if (done) unlock_page(page); diff --git a/fs/crypto/ext4_ice.c b/fs/crypto/ext4_ice.c new file mode 100644 index 0000000000000000000000000000000000000000..a8098e338f29be990024168f63ef8b7ae7075364 --- /dev/null +++ b/fs/crypto/ext4_ice.c @@ -0,0 +1,108 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ext4_ice.h" +#include "fscrypt_private.h" + +/* + * Retrieves encryption key from the inode + */ +char *ext4_get_ice_encryption_key(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + if (!inode) + return NULL; + + ci = inode->i_crypt_info; + if (!ci) + return NULL; + + return &(ci->ci_raw_key[0]); +} + +/* + * Retrieves encryption salt from the inode + */ +char *ext4_get_ice_encryption_salt(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + if (!inode) + return NULL; + + ci = inode->i_crypt_info; + if (!ci) + return NULL; + + return &(ci->ci_raw_key[ext4_get_ice_encryption_key_size(inode)]); +} + +/* + * returns true if the cipher mode in inode is AES XTS + */ +int ext4_is_aes_xts_cipher(const struct inode *inode) +{ + struct fscrypt_info *ci = NULL; + + ci = inode->i_crypt_info; + if (!ci) + return 0; + + return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE); +} + +/* + * returns true if encryption info in both inodes is equal + */ +int ext4_is_ice_encryption_info_equal(const struct inode *inode1, + const struct inode *inode2) +{ + char *key1 = NULL; + char *key2 = NULL; + char *salt1 = NULL; + char *salt2 = NULL; + + if (!inode1 || !inode2) + return 0; + + if (inode1 == inode2) + return 1; + + /* both do not belong to ice, so we don't care, they are equal for us */ + if (!ext4_should_be_processed_by_ice(inode1) && + !ext4_should_be_processed_by_ice(inode2)) + return 1; + + /* one belongs to ice, the other does not -> not equal */ + if (ext4_should_be_processed_by_ice(inode1) ^ + ext4_should_be_processed_by_ice(inode2)) + return 0; + + key1 = ext4_get_ice_encryption_key(inode1); + key2 = ext4_get_ice_encryption_key(inode2); + salt1 = ext4_get_ice_encryption_salt(inode1); + salt2 = ext4_get_ice_encryption_salt(inode2); + + /* key and salt should not be null by this point */ + if (!key1 || !key2 || !salt1 || !salt2 || + (ext4_get_ice_encryption_key_size(inode1) != + ext4_get_ice_encryption_key_size(inode2)) || + (ext4_get_ice_encryption_salt_size(inode1) != + ext4_get_ice_encryption_salt_size(inode2))) + return 0; + + return ((memcmp(key1, key2, + ext4_get_ice_encryption_key_size(inode1)) == 0) && + (memcmp(salt1, salt2, + ext4_get_ice_encryption_salt_size(inode1)) == 0)); +} diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 073165db5641c96918361c1bcbf0124eaace8f83..c9ca9e79411d1cb6e66a6bba7dfb7a7c0b3f0abd 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -12,7 +12,9 @@ #ifndef _FSCRYPT_PRIVATE_H #define _FSCRYPT_PRIVATE_H +#ifndef __FS_HAS_ENCRYPTION #define __FS_HAS_ENCRYPTION 1 +#endif #include #include @@ -70,6 +72,7 @@ struct fscrypt_info { struct crypto_skcipher *ci_ctfm; struct crypto_cipher *ci_essiv_tfm; u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; + u8 ci_raw_key[FS_MAX_KEY_SIZE]; }; typedef enum { @@ -95,9 +98,19 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode, filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS) return true; + if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE && + filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) + return true; + return false; } +static inline bool is_private_mode(struct fscrypt_info *ci) +{ + return ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE; +} + + /* crypto.c */ extern struct kmem_cache *fscrypt_info_cachep; extern int fscrypt_initialize(unsigned int cop_flags); diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 6e1b0281dabaea11b0d07de062feab4eef028e09..27edc5b9eb66d80800865478ab136b39ada99287 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -16,6 +16,7 @@ #include #include #include "fscrypt_private.h" +#include "ext4_ice.h" static struct crypto_shash *essiv_hash_tfm; @@ -116,7 +117,25 @@ static int validate_user_key(struct fscrypt_info *crypt_info, res = -ENOKEY; goto out; } - res = derive_key_aes(ctx->nonce, master_key, raw_key); + /* + * If we don't need to derive, we still want to do everything + * up until now to validate the key. It's cleaner to fail now + * than to fail in block I/O. + */ + if (!is_private_mode(crypt_info)) { + res = derive_key_aes(ctx->nonce, master_key, + crypt_info->ci_raw_key); + } else { + /* + * Inline encryption: no key derivation required because IVs are + * assigned based on iv_sector. + */ + if (sizeof(crypt_info->ci_raw_key) != sizeof(master_key->raw)) + goto out; + memcpy(crypt_info->ci_raw_key, + master_key->raw, sizeof(crypt_info->ci_raw_key)); + res = 0; + } out: up_read(&keyring_key->sem); key_put(keyring_key); @@ -137,33 +156,42 @@ static const struct { FS_AES_128_CTS_KEY_SIZE }, [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = { "xts(speck128)", 64 }, [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = { "cts(cbc(speck128))", 32 }, + [FS_ENCRYPTION_MODE_PRIVATE] = { "bugon", FS_AES_256_XTS_KEY_SIZE }, }; static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode, - const char **cipher_str_ret, int *keysize_ret) + const char **cipher_str_ret, int *keysize_ret, int *fname) { - u32 mode; - - if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) { - pr_warn_ratelimited("fscrypt: inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)\n", - inode->i_ino, - ci->ci_data_mode, ci->ci_filename_mode); - return -EINVAL; + if (S_ISREG(inode->i_mode)) { + if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) { + *cipher_str_ret = "xts(aes)"; + *keysize_ret = FS_AES_256_XTS_KEY_SIZE; + return 0; + } else if (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE) { + *cipher_str_ret = "bugon"; + *keysize_ret = FS_AES_256_XTS_KEY_SIZE; + return 0; + } + pr_warn_once("fscrypto: unsupported contents encryption mode %d for inode %lu\n", + ci->ci_data_mode, inode->i_ino); + return -ENOKEY; } - if (S_ISREG(inode->i_mode)) { - mode = ci->ci_data_mode; - } else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { - mode = ci->ci_filename_mode; - } else { - WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n", - inode->i_ino, (inode->i_mode & S_IFMT)); - return -EINVAL; + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { + if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) { + *cipher_str_ret = "cts(cbc(aes))"; + *keysize_ret = FS_AES_256_CTS_KEY_SIZE; + *fname = 1; + return 0; + } + pr_warn_once("fscrypto: unsupported filenames encryption mode %d for inode %lu\n", + ci->ci_filename_mode, inode->i_ino); + return -ENOKEY; } - *cipher_str_ret = available_modes[mode].cipher_str; - *keysize_ret = available_modes[mode].keysize; - return 0; + pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n", + (inode->i_mode & S_IFMT), inode->i_ino); + return -ENOKEY; } static void put_crypt_info(struct fscrypt_info *ci) @@ -173,6 +201,7 @@ static void put_crypt_info(struct fscrypt_info *ci) crypto_free_skcipher(ci->ci_ctfm); crypto_free_cipher(ci->ci_essiv_tfm); + memset(ci, 0, sizeof(*ci)); /* sanitizes ->ci_raw_key */ kmem_cache_free(fscrypt_info_cachep, ci); } @@ -242,6 +271,21 @@ void __exit fscrypt_essiv_cleanup(void) crypto_free_shash(essiv_hash_tfm); } +static int fs_data_encryption_mode(void) +{ + return ext4_is_ice_enabled() ? FS_ENCRYPTION_MODE_PRIVATE : + FS_ENCRYPTION_MODE_AES_256_XTS; +} + +int fs_using_hardware_encryption(struct inode *inode) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + + return S_ISREG(inode->i_mode) && ci && + ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE; +} +EXPORT_SYMBOL(fs_using_hardware_encryption); + int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; @@ -249,8 +293,8 @@ int fscrypt_get_encryption_info(struct inode *inode) struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; - u8 *raw_key = NULL; int res; + int fname = 0; if (inode->i_crypt_info) return 0; @@ -267,7 +311,7 @@ int fscrypt_get_encryption_info(struct inode *inode) /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; - ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; + ctx.contents_encryption_mode = fs_data_encryption_mode(); ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { @@ -292,7 +336,8 @@ int fscrypt_get_encryption_info(struct inode *inode) memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, sizeof(crypt_info->ci_master_key)); - res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize); + res = determine_cipher_type(crypt_info, inode, &cipher_str, + &keysize, &fname); if (res) goto out; @@ -301,45 +346,50 @@ int fscrypt_get_encryption_info(struct inode *inode) * crypto API as part of key derivation. */ res = -ENOMEM; - raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); - if (!raw_key) - goto out; - res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX, - keysize); + if (fscrypt_dummy_context_enabled(inode)) { + memset(crypt_info->ci_raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); + goto got_key; + } + res = validate_user_key(crypt_info, &ctx, crypt_info->ci_raw_key, + FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); if (res && inode->i_sb->s_cop->key_prefix) { - int res2 = validate_user_key(crypt_info, &ctx, raw_key, - inode->i_sb->s_cop->key_prefix, - keysize); + int res2 = validate_user_key(crypt_info, &ctx, + crypt_info->ci_raw_key, + inode->i_sb->s_cop->key_prefix, keysize); if (res2) { if (res2 == -ENOKEY) res = -ENOKEY; goto out; } + res = 0; } else if (res) { goto out; } - ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); - if (!ctfm || IS_ERR(ctfm)) { - res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; - pr_debug("%s: error %d (inode %lu) allocating crypto tfm\n", - __func__, res, inode->i_ino); - goto out; - } - crypt_info->ci_ctfm = ctfm; - crypto_skcipher_clear_flags(ctfm, ~0); - crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); - /* - * if the provided key is longer than keysize, we use the first - * keysize bytes of the derived key only - */ - res = crypto_skcipher_setkey(ctfm, raw_key, keysize); - if (res) - goto out; - - if (S_ISREG(inode->i_mode) && +got_key: + if (crypt_info->ci_data_mode != FS_ENCRYPTION_MODE_PRIVATE || fname) { + ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); + if (!ctfm || IS_ERR(ctfm)) { + res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; + pr_err("%s: error %d inode %u allocating crypto tfm\n", + __func__, res, (unsigned int) inode->i_ino); + goto out; + } + crypt_info->ci_ctfm = ctfm; + crypto_skcipher_clear_flags(ctfm, ~0); + crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); + /* + * if the provided key is longer than keysize, we use the first + * keysize bytes of the derived key only + */ + res = crypto_skcipher_setkey(ctfm, crypt_info->ci_raw_key, + keysize); + if (res) + goto out; + } else if (S_ISREG(inode->i_mode) && crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) { - res = init_essiv_generator(crypt_info, raw_key, keysize); + res = init_essiv_generator(crypt_info, crypt_info->ci_raw_key, + keysize); if (res) { pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n", __func__, res, inode->i_ino); @@ -352,7 +402,6 @@ int fscrypt_get_encryption_info(struct inode *inode) if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); - kzfree(raw_key); return res; } EXPORT_SYMBOL(fscrypt_get_encryption_info); diff --git a/fs/direct-io.c b/fs/direct-io.c index 625a84aa6484f4db92261c376f3b9db008ccb6b9..96a103249a0b21981fc89316394d0443f576e157 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -452,6 +452,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) bio_set_pages_dirty(bio); dio->bio_disk = bio->bi_disk; + bio->bi_dio_inode = dio->inode; if (sdio->submit_io) { sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); @@ -464,6 +465,18 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) sdio->logical_offset_in_bio = 0; } +struct inode *dio_bio_get_inode(struct bio *bio) +{ + struct inode *inode = NULL; + + if (bio == NULL) + return NULL; + + inode = bio->bi_dio_inode; + + return inode; +} +EXPORT_SYMBOL(dio_bio_get_inode); /* * Release any resources in case of a failure */ diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index e5e29f8c920b18bc6959cdb16d2ee57fec7d45a6..9d1823efff343fdffc2a5dbede46e0285e8df9a2 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -2026,6 +2026,16 @@ int ecryptfs_encrypt_and_encode_filename( return rc; } +static bool is_dot_dotdot(const char *name, size_t name_size) +{ + if (name_size == 1 && name[0] == '.') + return true; + else if (name_size == 2 && name[0] == '.' && name[1] == '.') + return true; + + return false; +} + /** * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext * @plaintext_name: The plaintext name @@ -2050,13 +2060,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, size_t packet_size; int rc = 0; - if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) - && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) - && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) - && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, - ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) { - const char *orig_name = name; - size_t orig_name_size = name_size; + if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && + !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) { + if (is_dot_dotdot(name, name_size)) { + rc = ecryptfs_copy_filename(plaintext_name, + plaintext_name_size, + name, name_size); + goto out; + } + + if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE || + strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX, + ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) { + rc = -EINVAL; + goto out; + } name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE; @@ -2079,12 +2097,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name, decoded_name, decoded_name_size); if (rc) { - printk(KERN_INFO "%s: Could not parse tag 70 packet " - "from filename; copying through filename " - "as-is\n", __func__); - rc = ecryptfs_copy_filename(plaintext_name, - plaintext_name_size, - orig_name, orig_name_size); + ecryptfs_printk(KERN_DEBUG, + "%s: Could not parse tag 70 packet from filename\n", + __func__); goto out_free; } } else { diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c74ed3ca3372f8b8e91ed9da35de617c7b25ce08..b76a9853325e7034a0c40f4d6099bfb83ef21c94 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name, buf->sb, lower_name, lower_namelen); if (rc) { - printk(KERN_ERR "%s: Error attempting to decode and decrypt " - "filename [%s]; rc = [%d]\n", __func__, lower_name, - rc); - goto out; + if (rc != -EINVAL) { + ecryptfs_printk(KERN_DEBUG, + "%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n", + __func__, lower_name, rc); + return rc; + } + + /* Mask -EINVAL errors as these are most likely due a plaintext + * filename present in the lower filesystem despite filename + * encryption being enabled. One unavoidable example would be + * the "lost+found" dentry in the root directory of an Ext4 + * filesystem. + */ + return 0; } + buf->caller->pos = buf->ctx.pos; rc = !dir_emit(buf->caller, name, name_size, ino, d_type); kfree(name); if (!rc) buf->entries_written++; -out: + return rc; } diff --git a/fs/exec.c b/fs/exec.c index 062159d2297614bcdf8ddd66ea463c99b4e1fad4..f6f4ed966c73f20b3f6613dbae6760af701d14bf 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -311,7 +311,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); err = insert_vm_struct(mm, vma); if (err) diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index e38039fd96ff59ab59ce17407abcf26de4c5a950..e9232a09af5de92def359af3486001b9c76c863f 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -109,10 +109,16 @@ config EXT4_ENCRYPTION decrypted pages in the page cache. config EXT4_FS_ENCRYPTION - bool - default y + bool "Ext4 FS Encryption" + default n depends on EXT4_ENCRYPTION +config EXT4_FS_ICE_ENCRYPTION + bool "Ext4 Encryption with ICE support" + default n + depends on EXT4_FS_ENCRYPTION + depends on PFK + config EXT4_DEBUG bool "EXT4 debugging support" depends on EXT4_FS diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 8fdfcd3c3e04373b913a6f3bf54807286dfebc0b..7cf69c14f796b436e0366882bdbad5aabeddf502 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -2,6 +2,7 @@ # # Makefile for the linux ext4-filesystem routines. # +ccflags-y += -Ifs/crypto obj-$(CONFIG_EXT4_FS) += ext4.o diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 27f38bb5046d6a7cf9d016be023083373ec34a54..f727e10b3522304d2179c39d95bce682ff494a51 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -40,7 +40,9 @@ #include #endif +#ifndef __FS_HAS_ENCRYPTION #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) +#endif #include /* @@ -2355,6 +2357,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir, } static inline void ext4_fname_free_filename(struct ext4_filename *fname) { } +#define fscrypt_set_d_op(i) #endif /* dir.c */ diff --git a/fs/ext4/ext4_ice.h b/fs/ext4/ext4_ice.h new file mode 100644 index 0000000000000000000000000000000000000000..b0149dd7bad4100492107a4ea8073d58d1b41b0f --- /dev/null +++ b/fs/ext4/ext4_ice.h @@ -0,0 +1,104 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EXT4_ICE_H +#define _EXT4_ICE_H + +#include "ext4.h" +#include + +#ifdef CONFIG_EXT4_FS_ICE_ENCRYPTION +static inline int ext4_should_be_processed_by_ice(const struct inode *inode) +{ + if (!ext4_encrypted_inode((struct inode *)inode)) + return 0; + + return fs_using_hardware_encryption((struct inode *)inode); +} + +static inline int ext4_is_ice_enabled(void) +{ + return 1; +} + +int ext4_is_aes_xts_cipher(const struct inode *inode); + +char *ext4_get_ice_encryption_key(const struct inode *inode); +char *ext4_get_ice_encryption_salt(const struct inode *inode); + +int ext4_is_ice_encryption_info_equal(const struct inode *inode1, + const struct inode *inode2); + +static inline size_t ext4_get_ice_encryption_key_size( + const struct inode *inode) +{ + return FS_AES_256_XTS_KEY_SIZE / 2; +} + +static inline size_t ext4_get_ice_encryption_salt_size( + const struct inode *inode) +{ + return FS_AES_256_XTS_KEY_SIZE / 2; +} + +#else +static inline int ext4_should_be_processed_by_ice(const struct inode *inode) +{ + return 0; +} +static inline int ext4_is_ice_enabled(void) +{ + return 0; +} + +static inline char *ext4_get_ice_encryption_key(const struct inode *inode) +{ + return NULL; +} + +static inline char *ext4_get_ice_encryption_salt(const struct inode *inode) +{ + return NULL; +} + +static inline size_t ext4_get_ice_encryption_key_size( + const struct inode *inode) +{ + return 0; +} + +static inline size_t ext4_get_ice_encryption_salt_size( + const struct inode *inode) +{ + return 0; +} + +static inline int ext4_is_xts_cipher(const struct inode *inode) +{ + return 0; +} + +static inline int ext4_is_ice_encryption_info_equal( + const struct inode *inode1, + const struct inode *inode2) +{ + return 0; +} + +static inline int ext4_is_aes_xts_cipher(const struct inode *inode) +{ + return 0; +} + +#endif + +#endif /* _EXT4_ICE_H */ diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index c32802c956d5ebac9f264b95ec3f77c78137e4ca..bf7fa1507e811221523a785cee1ed41159d7a0ca 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, unsigned epb = inode->i_sb->s_blocksize / sizeof(u32); int i; - /* Count number blocks in a subtree under 'partial' */ - count = 1; - for (i = 0; partial + i != chain + depth - 1; i++) - count *= epb; + /* + * Count number blocks in a subtree under 'partial'. At each + * level we count number of complete empty subtrees beyond + * current offset and then descend into the subtree only + * partially beyond current offset. + */ + count = 0; + for (i = partial - chain + 1; i < depth; i++) + count = count * epb + (epb - offsets[i] - 1); + count++; /* Fill in size of a hole we found */ map->m_pblk = 0; map->m_len = min_t(unsigned int, map->m_len, count); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 4e63507f5ce101caa6d488c37ca14653e1c50511..54e4ad43d7c22ca733ea29fe228b76024f855d7e 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -151,6 +151,12 @@ int ext4_find_inline_data_nolock(struct inode *inode) goto out; if (!is.s.not_found) { + if (is.s.here->e_value_inum) { + EXT4_ERROR_INODE(inode, "inline data xattr refers " + "to an external xattr inode"); + error = -EFSCORRUPTED; + goto out; + } EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here - (void *)ext4_raw_inode(&is.iloc)); EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE + diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3a1bd461ceb14d58ad3c17e98bb68d36011e8f63..f834ea9f5ee9c1101e2b1f032d214048e516f86d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -44,6 +44,7 @@ #include "xattr.h" #include "acl.h" #include "truncate.h" +#include "ext4_ice.h" #include #include @@ -1218,7 +1219,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++ = bh; decrypt = ext4_encrypted_inode(inode) && - S_ISREG(inode->i_mode); + S_ISREG(inode->i_mode) && + !ext4_should_be_processed_by_ice(inode); } } /* @@ -3714,6 +3716,12 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) get_block_func = ext4_dio_get_block_unwritten_async; dio_flags = DIO_LOCKING; } + +#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \ +!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION) + if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) + return 0; +#endif ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block_func, ext4_end_io_dio, NULL, dio_flags); @@ -3822,7 +3830,8 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret; int rw = iov_iter_rw(iter); -#ifdef CONFIG_EXT4_FS_ENCRYPTION +#if defined(CONFIG_EXT4_FS_ENCRYPTION) && \ +!defined(CONFIG_EXT4_FS_ICE_ENCRYPTION) if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; #endif @@ -4032,7 +4041,8 @@ static int __ext4_block_zero_page_range(handle_t *handle, if (!buffer_uptodate(bh)) goto unlock; if (S_ISREG(inode->i_mode) && - ext4_encrypted_inode(inode)) { + ext4_encrypted_inode(inode) && + !ext4_should_be_processed_by_ice(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_SIZE); @@ -4306,28 +4316,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); - /* If there are no blocks to remove, return now */ - if (first_block >= stop_block) - goto out_stop; + /* If there are blocks to remove, do it */ + if (stop_block > first_block) { - down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + down_write(&EXT4_I(inode)->i_data_sem); + ext4_discard_preallocations(inode); - ret = ext4_es_remove_extent(inode, first_block, - stop_block - first_block); - if (ret) { - up_write(&EXT4_I(inode)->i_data_sem); - goto out_stop; - } + ret = ext4_es_remove_extent(inode, first_block, + stop_block - first_block); + if (ret) { + up_write(&EXT4_I(inode)->i_data_sem); + goto out_stop; + } - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) - ret = ext4_ext_remove_space(inode, first_block, - stop_block - 1); - else - ret = ext4_ind_remove_space(handle, inode, first_block, - stop_block); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) + ret = ext4_ext_remove_space(inode, first_block, + stop_block - 1); + else + ret = ext4_ind_remove_space(handle, inode, first_block, + stop_block); - up_write(&EXT4_I(inode)->i_data_sem); + up_write(&EXT4_I(inode)->i_data_sem); + } if (IS_SYNC(inode)) ext4_handle_sync(handle); @@ -4697,19 +4707,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, } } -static inline void ext4_iget_extra_inode(struct inode *inode, +static inline int ext4_iget_extra_inode(struct inode *inode, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= EXT4_INODE_SIZE(inode->i_sb) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); - ext4_find_inline_data_nolock(inode); + return ext4_find_inline_data_nolock(inode); } else EXT4_I(inode)->i_inline_off = 0; + return 0; } int ext4_get_projid(struct inode *inode, kprojid_t *projid) @@ -4889,7 +4901,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { - ext4_iget_extra_inode(inode, raw_inode, ei); + ret = ext4_iget_extra_inode(inode, raw_inode, ei); + if (ret) + goto bad_inode; } } diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 1eb68e62693132a264005b6db50513497a2abe82..53bd5d893a586815eedc647224d10d8d8acbfa34 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -939,11 +939,13 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); - case EXT4_IOC_SET_ENCRYPTION_POLICY: - if (!ext4_has_feature_encrypt(sb)) - return -EOPNOTSUPP; + case EXT4_IOC_SET_ENCRYPTION_POLICY: { +#ifdef CONFIG_EXT4_FS_ENCRYPTION return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); - +#else + return -EOPNOTSUPP; +#endif + } case EXT4_IOC_GET_ENCRYPTION_PWSALT: { #ifdef CONFIG_EXT4_FS_ENCRYPTION int err, err2; diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index db7590178dfcf1a4b59ee3c44deaa89a21de8ca6..bc475426f0c3c6af26e609c6b54510608dbc6d89 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -29,6 +29,7 @@ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" +#include "ext4_ice.h" static struct kmem_cache *io_end_cachep; @@ -482,8 +483,9 @@ int ext4_bio_write_page(struct ext4_io_submit *io, gfp_t gfp_flags = GFP_NOFS; retry_encrypt: - data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0, - page->index, gfp_flags); + if (!ext4_should_be_processed_by_ice(inode)) + data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, + 0, page->index, gfp_flags); if (IS_ERR(data_page)) { ret = PTR_ERR(data_page); if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 1dac59c247920a0fad6b3d2cc06d5a9f72fc15fd..823c0b82dfeb006ea546caeaa38e7435a1baca3b 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1905,7 +1905,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) return 0; n_group = ext4_get_group_number(sb, n_blocks_count - 1); - if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { + if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { ext4_warning(sb, "resize would cause inodes_count overflow"); return -EINVAL; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 1718354e6322e524f58a83d3d317fc4876973fb0..ed1cf24a78319fd00cbb364b4d283c3bea5fd5f5 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1687,7 +1687,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, /* No failures allowed past this point. */ - if (!s->not_found && here->e_value_offs) { + if (!s->not_found && here->e_value_size && here->e_value_offs) { /* Remove the old value. */ void *first_val = s->base + min_offs; size_t offs = le16_to_cpu(here->e_value_offs); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index bef74d628f66990853632041b5b54b460bb71219..b1e58b10425fe0f2deaa20ec552cff35792f3494 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2439,9 +2439,18 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen); __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block); - trimmed = __wait_discard_cmd_range(sbi, &dpolicy, + + /* + * We filed discard candidates, but actually we don't need to wait for + * all of them, since they'll be issued in idle time along with runtime + * discard option. User configuration looks like using runtime discard + * or periodic fstrim instead of it. + */ + if (!test_opt(sbi, DISCARD)) { + trimmed = __wait_discard_cmd_range(sbi, &dpolicy, start_block, end_block); - range->len = F2FS_BLK_TO_BYTES(trimmed); + range->len = F2FS_BLK_TO_BYTES(trimmed); + } out: return err; } diff --git a/fs/fuse/control.c b/fs/fuse/control.c index b9ea99c5b5b31948882d37113d520ab9cdcf9564..5be0339dcceb2f97715f710f4651c084a5f8349d 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, if (!dentry) return NULL; - fc->ctl_dentry[fc->ctl_ndents++] = dentry; inode = new_inode(fuse_control_sb); - if (!inode) + if (!inode) { + dput(dentry); return NULL; + } inode->i_ino = get_next_ino(); inode->i_mode = mode; @@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, set_nlink(inode, nlink); inode->i_private = fc; d_add(dentry, inode); + + fc->ctl_dentry[fc->ctl_ndents++] = dentry; + return dentry; } @@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc) for (i = fc->ctl_ndents - 1; i >= 0; i--) { struct dentry *dentry = fc->ctl_dentry[i]; d_inode(dentry)->i_private = NULL; - d_drop(dentry); + if (!i) { + /* Get rid of submounts: */ + d_invalidate(dentry); + } dput(dentry); } drop_nlink(d_inode(fuse_control_sb->s_root)); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 968b798235021947469ad6c49a61f17ae8931f1e..f4d6c279922e205666104fa372806a45d11f8585 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -383,8 +383,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) wake_up(&fc->blocked_waitq); - if (fc->num_background == fc->congestion_threshold && - fc->connected && fc->sb) { + if (fc->num_background == fc->congestion_threshold && fc->sb) { clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index cebd108dc3ce21ce62fd57dd8dddb9a9a551136c..556053b6d001d61e452f4acf3c0783bf9188074c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1675,8 +1675,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, return err; if (attr->ia_valid & ATTR_OPEN) { - if (fc->atomic_o_trunc) + /* This is coming from open(..., ... | O_TRUNC); */ + WARN_ON(!(attr->ia_valid & ATTR_SIZE)); + WARN_ON(attr->ia_size != 0); + if (fc->atomic_o_trunc) { + /* + * No need to send request to userspace, since actual + * truncation has already been done by OPEN. But still + * need to truncate page cache. + */ + i_size_write(inode, 0); + truncate_pagecache(inode, 0); return 0; + } file = NULL; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index a79e320349cd79b8032feb20cd17c4fd5f25fd64..cd2327f5306946ecfbdcb28f2e8fce69bd483ccb 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1176,6 +1176,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fuse_dev_free(fud); err_put_conn: fuse_conn_put(fc); + sb->s_fs_info = NULL; err_fput: fput(file); err: diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 447a24d77b894ef733412ba201cadcaa9a226f7e..ed4edcd2bc56db126064864c625f7455e070aee3 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt) break; #ifdef CONFIG_JOLIET case Opt_iocharset: + kfree(popt->iocharset); popt->iocharset = match_strdup(&args[0]); + if (!popt->iocharset) + return 0; break; #endif case Opt_map_a: diff --git a/fs/namei.c b/fs/namei.c index 54fb30f14c3da4e8364cc30a5ae6409c5279241e..055c6c40901b6b6d03de187d0f92bafb4a565c76 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2913,6 +2913,11 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); + if (error) + return error; + error = security_inode_post_create(dir, dentry, mode); + if (error) + return error; if (!error) fsnotify_create(dir, dentry); return error; @@ -3735,6 +3740,13 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u return error; error = dir->i_op->mknod(dir, dentry, mode, dev); + if (error) + return error; + + error = security_inode_post_create(dir, dentry, mode); + if (error) + return error; + if (!error) fsnotify_create(dir, dentry); return error; diff --git a/fs/namespace.c b/fs/namespace.c index 3dc434888f7df75f0b152ce7bf636a196a3ef335..5660c128bc843a08f74e670ad8492111d50e9e9f 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2837,7 +2837,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); - if (flags & SB_RDONLY) + if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; /* The default atime for remount is preservation */ diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 19151f6c0e97e32d4f58245a75c0b237194911b0..516b2248cafe8ad5f074be1af0c7ad52d1fffd3e 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -420,11 +420,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, return htonl(NFS4ERR_SEQ_FALSE_RETRY); } - /* Wraparound */ - if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) { - if (args->csa_sequenceid == 1) - return htonl(NFS4_OK); - } else if (likely(args->csa_sequenceid == slot->seq_nr + 1)) + /* Note: wraparound relies on seq_nr being of type u32 */ + if (likely(args->csa_sequenceid == slot->seq_nr + 1)) return htonl(NFS4_OK); /* Misordered request */ diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index dcfcf7fd7438f9a3248d110d149de8f7e652cf07..a73144b3cb8c8aaeeaddc9b12713620d22380843 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -465,7 +465,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); extern void nfs_release_seqid(struct nfs_seqid *seqid); extern void nfs_free_seqid(struct nfs_seqid *seqid); -extern int nfs4_setup_sequence(const struct nfs_client *client, +extern int nfs4_setup_sequence(struct nfs_client *client, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 22dc30a679a03615e480b993b76482f8536bcf19..b6f9d84ba19b1a1002cbc22760ab4d84bd396006 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf, int id_len; ssize_t ret; - id_len = snprintf(id_str, sizeof(id_str), "%u", id); + id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str)); ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap); if (ret < 0) return -EINVAL; @@ -627,7 +627,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, if (strcmp(upcall->im_name, im->im_name) != 0) break; /* Note: here we store the NUL terminator too */ - len = sprintf(id_str, "%d", im->im_id) + 1; + len = 1 + nfs_map_numeric_to_string(im->im_id, id_str, + sizeof(id_str)); ret = nfs_idmap_instantiate(key, authkey, id_str, len); break; case IDMAP_CONV_IDTONAME: diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ae8f43d270d6dc6a0ce76bce9f51ebb3208d7e46..928bbc397818ad252eb64e4ceadbe9464d57ca97 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -96,6 +96,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, struct nfs_open_context *ctx, struct nfs4_label *ilabel, struct nfs4_label *olabel); #ifdef CONFIG_NFS_V4_1 +static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, + struct rpc_cred *cred, + struct nfs4_slot *slot, + bool is_privileged); static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, struct rpc_cred *); static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, @@ -641,13 +645,14 @@ static int nfs40_sequence_done(struct rpc_task *task, #if defined(CONFIG_NFS_V4_1) -static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) +static void nfs41_release_slot(struct nfs4_slot *slot) { struct nfs4_session *session; struct nfs4_slot_table *tbl; - struct nfs4_slot *slot = res->sr_slot; bool send_new_highest_used_slotid = false; + if (!slot) + return; tbl = slot->table; session = tbl->session; @@ -673,13 +678,18 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) send_new_highest_used_slotid = false; out_unlock: spin_unlock(&tbl->slot_tbl_lock); - res->sr_slot = NULL; if (send_new_highest_used_slotid) nfs41_notify_server(session->clp); if (waitqueue_active(&tbl->slot_waitq)) wake_up_all(&tbl->slot_waitq); } +static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) +{ + nfs41_release_slot(res->sr_slot); + res->sr_slot = NULL; +} + static int nfs41_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -707,13 +717,6 @@ static int nfs41_sequence_process(struct rpc_task *task, /* Check the SEQUENCE operation status */ switch (res->sr_status) { case 0: - /* If previous op on slot was interrupted and we reused - * the seq# and got a reply from the cache, then retry - */ - if (task->tk_status == -EREMOTEIO && interrupted) { - ++slot->seq_nr; - goto retry_nowait; - } /* Update the slot's sequence and clientid lease timer */ slot->seq_done = 1; clp = session->clp; @@ -747,16 +750,16 @@ static int nfs41_sequence_process(struct rpc_task *task, * The slot id we used was probably retired. Try again * using a different slot id. */ + if (slot->slot_nr < slot->table->target_highest_slotid) + goto session_recover; goto retry_nowait; case -NFS4ERR_SEQ_MISORDERED: /* * Was the last operation on this sequence interrupted? * If so, retry after bumping the sequence number. */ - if (interrupted) { - ++slot->seq_nr; - goto retry_nowait; - } + if (interrupted) + goto retry_new_seq; /* * Could this slot have been previously retired? * If so, then the server may be expecting seq_nr = 1! @@ -765,10 +768,11 @@ static int nfs41_sequence_process(struct rpc_task *task, slot->seq_nr = 1; goto retry_nowait; } - break; + goto session_recover; case -NFS4ERR_SEQ_FALSE_RETRY: - ++slot->seq_nr; - goto retry_nowait; + if (interrupted) + goto retry_new_seq; + goto session_recover; default: /* Just update the slot sequence no. */ slot->seq_done = 1; @@ -778,6 +782,11 @@ static int nfs41_sequence_process(struct rpc_task *task, dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); out_noaction: return ret; +session_recover: + nfs4_schedule_session_recovery(session, res->sr_status); + goto retry_nowait; +retry_new_seq: + ++slot->seq_nr; retry_nowait: if (rpc_restart_call_prepare(task)) { nfs41_sequence_free_slot(res); @@ -854,6 +863,17 @@ static const struct rpc_call_ops nfs41_call_sync_ops = { .rpc_call_done = nfs41_call_sync_done, }; +static void +nfs4_sequence_process_interrupted(struct nfs_client *client, + struct nfs4_slot *slot, struct rpc_cred *cred) +{ + struct rpc_task *task; + + task = _nfs41_proc_sequence(client, cred, slot, true); + if (!IS_ERR(task)) + rpc_put_task_async(task); +} + #else /* !CONFIG_NFS_V4_1 */ static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) @@ -874,9 +894,34 @@ int nfs4_sequence_done(struct rpc_task *task, } EXPORT_SYMBOL_GPL(nfs4_sequence_done); +static void +nfs4_sequence_process_interrupted(struct nfs_client *client, + struct nfs4_slot *slot, struct rpc_cred *cred) +{ + WARN_ON_ONCE(1); + slot->interrupted = 0; +} + #endif /* !CONFIG_NFS_V4_1 */ -int nfs4_setup_sequence(const struct nfs_client *client, +static +void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + struct nfs4_slot *slot) +{ + if (!slot) + return; + slot->privileged = args->sa_privileged ? 1 : 0; + args->sa_slot = slot; + + res->sr_slot = slot; + res->sr_timestamp = jiffies; + res->sr_status_flags = 0; + res->sr_status = 1; + +} + +int nfs4_setup_sequence(struct nfs_client *client, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task) @@ -894,29 +939,28 @@ int nfs4_setup_sequence(const struct nfs_client *client, task->tk_timeout = 0; } - spin_lock(&tbl->slot_tbl_lock); - /* The state manager will wait until the slot table is empty */ - if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) - goto out_sleep; + for (;;) { + spin_lock(&tbl->slot_tbl_lock); + /* The state manager will wait until the slot table is empty */ + if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) + goto out_sleep; + + slot = nfs4_alloc_slot(tbl); + if (IS_ERR(slot)) { + /* Try again in 1/4 second */ + if (slot == ERR_PTR(-ENOMEM)) + task->tk_timeout = HZ >> 2; + goto out_sleep; + } + spin_unlock(&tbl->slot_tbl_lock); - slot = nfs4_alloc_slot(tbl); - if (IS_ERR(slot)) { - /* Try again in 1/4 second */ - if (slot == ERR_PTR(-ENOMEM)) - task->tk_timeout = HZ >> 2; - goto out_sleep; + if (likely(!slot->interrupted)) + break; + nfs4_sequence_process_interrupted(client, + slot, task->tk_msg.rpc_cred); } - spin_unlock(&tbl->slot_tbl_lock); - - slot->privileged = args->sa_privileged ? 1 : 0; - args->sa_slot = slot; - res->sr_slot = slot; - if (session) { - res->sr_timestamp = jiffies; - res->sr_status_flags = 0; - res->sr_status = 1; - } + nfs4_sequence_attach_slot(args, res, slot); trace_nfs4_setup_sequence(session, args); out_start: @@ -8151,6 +8195,7 @@ static const struct rpc_call_ops nfs41_sequence_ops = { static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred, + struct nfs4_slot *slot, bool is_privileged) { struct nfs4_sequence_data *calldata; @@ -8164,15 +8209,18 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, .callback_ops = &nfs41_sequence_ops, .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, }; + struct rpc_task *ret; + ret = ERR_PTR(-EIO); if (!atomic_inc_not_zero(&clp->cl_count)) - return ERR_PTR(-EIO); + goto out_err; + + ret = ERR_PTR(-ENOMEM); calldata = kzalloc(sizeof(*calldata), GFP_NOFS); - if (calldata == NULL) { - nfs_put_client(clp); - return ERR_PTR(-ENOMEM); - } + if (calldata == NULL) + goto out_put_clp; nfs4_init_sequence(&calldata->args, &calldata->res, 0); + nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); if (is_privileged) nfs4_set_sequence_privileged(&calldata->args); msg.rpc_argp = &calldata->args; @@ -8180,7 +8228,15 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, calldata->clp = clp; task_setup_data.callback_data = calldata; - return rpc_run_task(&task_setup_data); + ret = rpc_run_task(&task_setup_data); + if (IS_ERR(ret)) + goto out_err; + return ret; +out_put_clp: + nfs_put_client(clp); +out_err: + nfs41_release_slot(slot); + return ret; } static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) @@ -8190,7 +8246,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) return -EAGAIN; - task = _nfs41_proc_sequence(clp, cred, false); + task = _nfs41_proc_sequence(clp, cred, NULL, false); if (IS_ERR(task)) ret = PTR_ERR(task); else @@ -8204,7 +8260,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) struct rpc_task *task; int ret; - task = _nfs41_proc_sequence(clp, cred, true); + task = _nfs41_proc_sequence(clp, cred, NULL, true); if (IS_ERR(task)) { ret = PTR_ERR(task); goto out; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index df2b8849a63bfb404dcef3feae8bb851615a5f93..f6588cc6816c885ff8625ba33db318a85cdbe0b1 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3645,7 +3645,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 nfserr = nfserr_resource; goto err_no_verf; } - maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX); + maxcount = svc_max_payload(resp->rqstp); + maxcount = min_t(u32, readdir->rd_maxcount, maxcount); /* * Note the rfc defines rd_maxcount as the size of the * READDIR4resok structure, which includes the verifier above @@ -3659,7 +3660,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */ if (!readdir->rd_dircount) - readdir->rd_dircount = INT_MAX; + readdir->rd_dircount = svc_max_payload(resp->rqstp); readdir->xdr = xdr; readdir->rd_maxcount = maxcount; diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 0747162938296d4baa60f28f73b3080de869a6bb..d76c81323dc167281b5bf10156875fd1c0744c30 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; - __u32 inode_test_mask = 0; - __u32 vfsmount_test_mask = 0; + __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD); + __u32 marks_mask = 0; + __u32 marks_ignored_mask = 0; if (unlikely(!inode_mark && !vfsmount_mark)) { BUG(); @@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell, /* does the inode mark tell us to do something? */ if (inode_mark) { group = inode_mark->group; - inode_test_mask = (mask & ~FS_EVENT_ON_CHILD); - inode_test_mask &= inode_mark->mask; - inode_test_mask &= ~inode_mark->ignored_mask; + marks_mask |= inode_mark->mask; + marks_ignored_mask |= inode_mark->ignored_mask; } /* does the vfsmount_mark tell us to do something? */ if (vfsmount_mark) { - vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD); group = vfsmount_mark->group; - vfsmount_test_mask &= vfsmount_mark->mask; - vfsmount_test_mask &= ~vfsmount_mark->ignored_mask; - if (inode_mark) - vfsmount_test_mask &= ~inode_mark->ignored_mask; + marks_mask |= vfsmount_mark->mask; + marks_ignored_mask |= vfsmount_mark->ignored_mask; } pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" - " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" + " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x" " data=%p data_is=%d cookie=%d\n", - __func__, group, to_tell, mask, inode_mark, - inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, + __func__, group, to_tell, mask, inode_mark, vfsmount_mark, + marks_mask, marks_ignored_mask, data, data_is, cookie); - if (!inode_test_mask && !vfsmount_test_mask) + if (!(test_mask & marks_mask & ~marks_ignored_mask)) return 0; return group->ops->handle_event(group, to_tell, inode_mark, diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index ab156e35ec00c462d69a4566ba3475cd39d5d176..1b1283f07941bc625008ee0352b8a20afecff163 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4250,10 +4250,11 @@ static int __ocfs2_reflink(struct dentry *old_dentry, static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, bool preserve) { - int error; + int error, had_lock; struct inode *inode = d_inode(old_dentry); struct buffer_head *old_bh = NULL; struct inode *new_orphan_inode = NULL; + struct ocfs2_lock_holder oh; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; @@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, goto out; } + had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1, + &oh); + if (had_lock < 0) { + error = had_lock; + mlog_errno(error); + goto out; + } + /* If the security isn't preserved, we need to re-initialize them. */ if (!preserve) { error = ocfs2_init_security_and_acl(dir, new_orphan_inode, @@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, if (error) mlog_errno(error); } -out: if (!error) { error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, new_dentry); if (error) mlog_errno(error); } + ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock); +out: if (new_orphan_inode) { /* * We need to open_unlock the inode no matter whether we diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 28825a5b6d098f5fbfad1741e0476f7601f41a56..902b72dac41a611397ce04d39d2ab96f5c733602 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat, else stat->result_mask = STATX_BASIC_STATS & ~STATX_SIZE; + + stat->attributes_mask = STATX_ATTR_IMMUTABLE | + STATX_ATTR_APPEND; + if (inode->i_flags & S_IMMUTABLE) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (inode->i_flags & S_APPEND) + stat->attributes |= STATX_ATTR_APPEND; } return ret; } diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c index f8f3c73d266476eab2298a15a807d2829398b0ff..05b3abbdbc4bf9eee2c6c1738132ad6508015d4e 100644 --- a/fs/orangefs/namei.c +++ b/fs/orangefs/namei.c @@ -314,6 +314,13 @@ static int orangefs_symlink(struct inode *dir, ret = PTR_ERR(inode); goto out; } + /* + * This is necessary because orangefs_inode_getattr will not + * re-read symlink size as it is impossible for it to change. + * Invalidating the cache does not help. orangefs_new_inode + * does not set the correct size (it does not know symname). + */ + inode->i_size = strlen(symname); gossip_debug(GOSSIP_NAME_DEBUG, "Assigned symlink inode new number of %pU\n", diff --git a/fs/proc/base.c b/fs/proc/base.c index 292cf94949cf260a56bbf9f222795a445e3d03d6..d1ab37c8a52c5f24ae64bc60f087b0b14a27012c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1893,6 +1893,12 @@ void task_dump_owner(struct task_struct *task, mode_t mode, kuid_t uid; kgid_t gid; + if (unlikely(task->flags & PF_KTHREAD)) { + *ruid = GLOBAL_ROOT_UID; + *rgid = GLOBAL_ROOT_GID; + return; + } + /* Default to the tasks effective ownership */ rcu_read_lock(); cred = __task_cred(task); @@ -3162,6 +3168,9 @@ static const struct pid_entry tgid_base_stuff[] = { REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), REG("mountstats", S_IRUSR, proc_mountstats_operations), +#ifdef CONFIG_PROCESS_RECLAIM + REG("reclaim", 0200, proc_reclaim_operations), +#endif #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_pid_smaps_operations), diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5e55186858c962ebde07bce79e5478b799d41c43..c5d76327262b42a1c90219e470ce40402c88c498 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -199,6 +199,7 @@ struct pde_opener { extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; +extern const struct file_operations proc_reclaim_operations; extern void proc_init_inodecache(void); void set_proc_pid_nlink(void); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index d1e82761de813abb95af0faac99194dba5821538..e64ecb9f272090bf6b23772a0d36e56b63b8106a 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) { struct list_head *head = (struct list_head *)arg; struct kcore_list *ent; + struct page *p; + + if (!pfn_valid(pfn)) + return 1; + + p = pfn_to_page(pfn); + if (!memmap_valid_within(pfn, p, page_zone(p))) + return 1; ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return -ENOMEM; - ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT)); + ent->addr = (unsigned long)page_to_virt(p); ent->size = nr_pages << PAGE_SHIFT; - /* Sanity check: Can happen in 32bit arch...maybe */ - if (ent->addr < (unsigned long) __va(0)) + if (!virt_addr_valid(ent->addr)) goto free_out; /* cut not-mapped area. ....from ppc-32 code. */ if (ULONG_MAX - ent->addr < ent->size) ent->size = ULONG_MAX - ent->addr; - /* cut when vmalloc() area is higher than direct-map area */ - if (VMALLOC_START > (unsigned long)__va(0)) { - if (ent->addr > VMALLOC_START) - goto free_out; + /* + * We've already checked virt_addr_valid so we know this address + * is a valid pointer, therefore we can check against it to determine + * if we need to trim + */ + if (VMALLOC_START > ent->addr) { if (VMALLOC_START - ent->addr < ent->size) ent->size = VMALLOC_START - ent->addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0db728ae1ff2afc484cc5a6a6db003ac2c4298e3..8450f848f857b58b496df1fd1613e6b8aa47ee79 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -1221,8 +1223,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, goto out_mm; } for (vma = mm->mmap; vma; vma = vma->vm_next) { - vma->vm_flags &= ~VM_SOFTDIRTY; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & ~VM_SOFTDIRTY); vma_set_page_prot(vma); + vm_write_end(vma); } downgrade_write(&mm->mmap_sem); break; @@ -1395,9 +1400,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); + unsigned long offset = swp_offset(entry); + offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; frame = swp_type(entry) | - (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + (offset << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; @@ -1417,6 +1424,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, break; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; + else if (flags & PM_SWAP) + frame += (1 << MAX_SWAPFILES_SHIFT); } spin_unlock(ptl); return err; @@ -1639,6 +1648,238 @@ const struct file_operations proc_pagemap_operations = { }; #endif /* CONFIG_PROC_PAGE_MONITOR */ +#ifdef CONFIG_PROCESS_RECLAIM +static int reclaim_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct reclaim_param *rp = walk->private; + struct vm_area_struct *vma = rp->vma; + pte_t *pte, ptent; + spinlock_t *ptl; + struct page *page; + LIST_HEAD(page_list); + int isolated; + int reclaimed; + + split_huge_pmd(vma, addr, pmd); + if (pmd_trans_unstable(pmd) || !rp->nr_to_reclaim) + return 0; +cont: + isolated = 0; + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + if (!pte_present(ptent)) + continue; + + page = vm_normal_page(vma, addr, ptent); + if (!page) + continue; + + if (isolate_lru_page(page)) + continue; + + list_add(&page->lru, &page_list); + inc_node_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + isolated++; + rp->nr_scanned++; + if ((isolated >= SWAP_CLUSTER_MAX) || !rp->nr_to_reclaim) + break; + } + pte_unmap_unlock(pte - 1, ptl); + reclaimed = reclaim_pages_from_list(&page_list, vma); + rp->nr_reclaimed += reclaimed; + rp->nr_to_reclaim -= reclaimed; + if (rp->nr_to_reclaim < 0) + rp->nr_to_reclaim = 0; + + if (rp->nr_to_reclaim && (addr != end)) + goto cont; + + cond_resched(); + return 0; +} + +enum reclaim_type { + RECLAIM_FILE, + RECLAIM_ANON, + RECLAIM_ALL, + RECLAIM_RANGE, +}; + +struct reclaim_param reclaim_task_anon(struct task_struct *task, + int nr_to_reclaim) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + struct mm_walk reclaim_walk = {}; + struct reclaim_param rp = { + .nr_to_reclaim = nr_to_reclaim, + }; + + get_task_struct(task); + mm = get_task_mm(task); + if (!mm) + goto out; + + reclaim_walk.mm = mm; + reclaim_walk.pmd_entry = reclaim_pte_range; + + reclaim_walk.private = &rp; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (is_vm_hugetlb_page(vma)) + continue; + + if (vma->vm_file) + continue; + + if (!rp.nr_to_reclaim) + break; + + rp.vma = vma; + walk_page_range(vma->vm_start, vma->vm_end, + &reclaim_walk); + } + + flush_tlb_mm(mm); + up_read(&mm->mmap_sem); + mmput(mm); +out: + put_task_struct(task); + return rp; +} + +static ssize_t reclaim_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + char buffer[200]; + struct mm_struct *mm; + struct vm_area_struct *vma; + enum reclaim_type type; + char *type_buf; + struct mm_walk reclaim_walk = {}; + unsigned long start = 0; + unsigned long end = 0; + struct reclaim_param rp; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + + if (copy_from_user(buffer, buf, count)) + return -EFAULT; + + type_buf = strstrip(buffer); + if (!strcmp(type_buf, "file")) + type = RECLAIM_FILE; + else if (!strcmp(type_buf, "anon")) + type = RECLAIM_ANON; + else if (!strcmp(type_buf, "all")) + type = RECLAIM_ALL; + else if (isdigit(*type_buf)) + type = RECLAIM_RANGE; + else + goto out_err; + + if (type == RECLAIM_RANGE) { + char *token; + unsigned long long len, len_in, tmp; + + token = strsep(&type_buf, " "); + if (!token) + goto out_err; + tmp = memparse(token, &token); + if (tmp & ~PAGE_MASK || tmp > ULONG_MAX) + goto out_err; + start = tmp; + + token = strsep(&type_buf, " "); + if (!token) + goto out_err; + len_in = memparse(token, &token); + len = (len_in + ~PAGE_MASK) & PAGE_MASK; + if (len > ULONG_MAX) + goto out_err; + /* + * Check to see whether len was rounded up from small -ve + * to zero. + */ + if (len_in && !len) + goto out_err; + + end = start + len; + if (end < start) + goto out_err; + } + + task = get_proc_task(file->f_path.dentry->d_inode); + if (!task) + return -ESRCH; + + mm = get_task_mm(task); + if (!mm) + goto out; + + reclaim_walk.mm = mm; + reclaim_walk.pmd_entry = reclaim_pte_range; + + rp.nr_to_reclaim = INT_MAX; + rp.nr_reclaimed = 0; + reclaim_walk.private = &rp; + + down_read(&mm->mmap_sem); + if (type == RECLAIM_RANGE) { + vma = find_vma(mm, start); + while (vma) { + if (vma->vm_start > end) + break; + if (is_vm_hugetlb_page(vma)) + continue; + + rp.vma = vma; + walk_page_range(max(vma->vm_start, start), + min(vma->vm_end, end), + &reclaim_walk); + vma = vma->vm_next; + } + } else { + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (is_vm_hugetlb_page(vma)) + continue; + + if (type == RECLAIM_ANON && vma->vm_file) + continue; + + if (type == RECLAIM_FILE && !vma->vm_file) + continue; + + rp.vma = vma; + walk_page_range(vma->vm_start, vma->vm_end, + &reclaim_walk); + } + } + + flush_tlb_mm(mm); + up_read(&mm->mmap_sem); + mmput(mm); +out: + put_task_struct(task); + return count; + +out_err: + return -EINVAL; +} + +const struct file_operations proc_reclaim_operations = { + .write = reclaim_write, + .llseek = noop_llseek, +}; +#endif + #ifdef CONFIG_NUMA struct numa_maps { diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c index b43258684fb933202d40cb9635589731304f6eb2..2de5a4dffa22ce71bf5bd406bb4c3927227fc1da 100644 --- a/fs/sdcardfs/inode.c +++ b/fs/sdcardfs/inode.c @@ -270,6 +270,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode struct dentry *lower_dentry; struct vfsmount *lower_mnt; struct dentry *lower_parent_dentry = NULL; + struct dentry *parent_dentry = NULL; struct path lower_path; struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); const struct cred *saved_cred = NULL; @@ -289,11 +290,14 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir)); /* check disk space */ - if (!check_min_free_space(dentry, 0, 1)) { + parent_dentry = dget_parent(dentry); + if (!check_min_free_space(parent_dentry, 0, 1)) { pr_err("sdcardfs: No minimum free space.\n"); err = -ENOSPC; + dput(parent_dentry); goto out_revert; } + dput(parent_dentry); /* the lower_dentry is negative here */ sdcardfs_get_lower_path(dentry, &lower_path); diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 04c4ec6483e5208b043724a6f8d5fefb85976977..8ae1cd8611cc4c719a9470dbd4585b1b630d4a5e 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -1283,10 +1283,11 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in int *new_len) { void *buf; - int err, dlen, compr_type, out_len, old_dlen; + int err, compr_type; + u32 dlen, out_len, old_dlen; out_len = le32_to_cpu(dn->size); - buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); + buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); if (!buf) return -ENOMEM; diff --git a/fs/udf/directory.c b/fs/udf/directory.c index 7aa48bd7cbaf317b2301305137d7d9687bd211e8..a636b3b172199c6530675881c8e6ce2f7baf2e49 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c @@ -151,6 +151,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, sizeof(struct fileIdentDesc)); } } + /* Got last entry outside of dir size - fs is corrupted! */ + if (*nf_pos > dir->i_size) + return NULL; return fi; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 5811bb06a0cc5d27d5106104fbc4c6408851c64c..6b2d138a4923e0dda5d55b2abbb89b07750025b0 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -656,8 +656,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { + vm_write_begin(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING)); + vm_write_end(vma); return 0; } @@ -883,8 +886,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file) vma = prev; else prev = vma; - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vm_write_end(vma); } up_write(&mm->mmap_sem); mmput(mm); @@ -1443,8 +1448,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx.ctx = ctx; + vm_write_end(vma); skip: prev = vma; @@ -1602,8 +1609,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vm_write_end(vma); skip: prev = vma; diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 3c2024d8e6f5f35989d39d798a686861d3dd3433..328f232f33eef1cb5bd4c8c68c5f1fe9f8aa5bc9 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -32,6 +32,7 @@ struct mipi_dsi_device; * @type: payload data type * @flags: flags controlling this message transmission * @ctrl: ctrl index to transmit on + * @wait_ms: duration in ms to wait after message transmission * @tx_len: length of @tx_buf * @tx_buf: data to be written * @rx_len: length of @rx_buf @@ -42,6 +43,7 @@ struct mipi_dsi_msg { u8 type; u16 flags; u32 ctrl; + u32 wait_ms; size_t tx_len; const void *tx_buf; diff --git a/include/dt-bindings/clock/mdss-28nm-pll-clk.h b/include/dt-bindings/clock/mdss-28nm-pll-clk.h new file mode 100644 index 0000000000000000000000000000000000000000..45e12ca5f923271600b5006c2d493d850ecfefca --- /dev/null +++ b/include/dt-bindings/clock/mdss-28nm-pll-clk.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MDSS_28NM_PLL_CLK_H +#define __MDSS_28NM_PLL_CLK_H + +/* DSI PLL clocks */ +#define VCO_CLK_0 0 +#define ANALOG_POSTDIV_0_CLK 1 +#define INDIRECT_PATH_SRC_0_CLK 2 +#define BYTECLK_SRC_MUX_0_CLK 3 +#define BYTECLK_SRC_0_CLK 4 +#define PCLK_SRC_0_CLK 5 +#define VCO_CLK_1 6 +#define ANALOG_POSTDIV_1_CLK 7 +#define INDIRECT_PATH_SRC_1_CLK 8 +#define BYTECLK_SRC_MUX_1_CLK 9 +#define BYTECLK_SRC_1_CLK 10 +#define PCLK_SRC_1_CLK 11 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-qcs405.h b/include/dt-bindings/clock/qcom,gcc-qcs405.h index 6c2bed052f0430f878ddbedae73bd8c0b63f6431..6a122d262c049f67fac3c6594a57bbd6e367a001 100644 --- a/include/dt-bindings/clock/qcom,gcc-qcs405.h +++ b/include/dt-bindings/clock/qcom,gcc-qcs405.h @@ -14,133 +14,133 @@ #ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS405_H #define _DT_BINDINGS_CLK_QCOM_GCC_QCS405_H -#define APSS_AHB_CLK_SRC 0 -#define BLSP1_QUP0_I2C_APPS_CLK_SRC 1 -#define BLSP1_QUP0_SPI_APPS_CLK_SRC 2 -#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3 -#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4 -#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5 -#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6 -#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7 -#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8 -#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9 -#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10 -#define BLSP1_UART0_APPS_CLK_SRC 11 -#define BLSP1_UART1_APPS_CLK_SRC 12 -#define BLSP1_UART2_APPS_CLK_SRC 13 -#define BLSP1_UART3_APPS_CLK_SRC 14 -#define BLSP2_QUP0_I2C_APPS_CLK_SRC 15 -#define BLSP2_QUP0_SPI_APPS_CLK_SRC 16 -#define BLSP2_UART0_APPS_CLK_SRC 17 -#define BYTE0_CLK_SRC 18 -#define EMAC_CLK_SRC 19 -#define EMAC_PTP_CLK_SRC 20 -#define ESC0_CLK_SRC 21 -#define GCC_APSS_AHB_CLK 22 -#define GCC_APSS_AXI_CLK 23 -#define GCC_BIMC_APSS_AXI_CLK 24 -#define GCC_BIMC_GFX_CLK 25 -#define GCC_BIMC_MDSS_CLK 26 -#define GCC_BLSP1_AHB_CLK 27 -#define GCC_BLSP1_QUP0_I2C_APPS_CLK 28 -#define GCC_BLSP1_QUP0_SPI_APPS_CLK 29 -#define GCC_BLSP1_QUP1_I2C_APPS_CLK 30 -#define GCC_BLSP1_QUP1_SPI_APPS_CLK 31 -#define GCC_BLSP1_QUP2_I2C_APPS_CLK 32 -#define GCC_BLSP1_QUP2_SPI_APPS_CLK 33 -#define GCC_BLSP1_QUP3_I2C_APPS_CLK 34 -#define GCC_BLSP1_QUP3_SPI_APPS_CLK 35 -#define GCC_BLSP1_QUP4_I2C_APPS_CLK 36 -#define GCC_BLSP1_QUP4_SPI_APPS_CLK 37 -#define GCC_BLSP1_UART0_APPS_CLK 38 -#define GCC_BLSP1_UART1_APPS_CLK 39 -#define GCC_BLSP1_UART2_APPS_CLK 40 -#define GCC_BLSP1_UART3_APPS_CLK 41 -#define GCC_BLSP2_AHB_CLK 42 -#define GCC_BLSP2_QUP0_I2C_APPS_CLK 43 -#define GCC_BLSP2_QUP0_SPI_APPS_CLK 44 -#define GCC_BLSP2_UART0_APPS_CLK 45 -#define GCC_BOOT_ROM_AHB_CLK 46 -#define GCC_DCC_CLK 47 -#define GCC_GENI_IR_H_CLK 48 -#define GCC_ETH_AXI_CLK 49 -#define GCC_ETH_PTP_CLK 50 -#define GCC_ETH_RGMII_CLK 51 -#define GCC_ETH_SLAVE_AHB_CLK 52 -#define GCC_GENI_IR_S_CLK 53 -#define GCC_GP1_CLK 54 -#define GCC_GP2_CLK 55 -#define GCC_GP3_CLK 56 -#define GCC_MDSS_AHB_CLK 57 -#define GCC_MDSS_AXI_CLK 58 -#define GCC_MDSS_BYTE0_CLK 59 -#define GCC_MDSS_ESC0_CLK 60 -#define GCC_MDSS_HDMI_APP_CLK 61 -#define GCC_MDSS_HDMI_PCLK_CLK 62 -#define GCC_MDSS_MDP_CLK 63 -#define GCC_MDSS_PCLK0_CLK 64 -#define GCC_MDSS_VSYNC_CLK 65 -#define GCC_OXILI_AHB_CLK 66 -#define GCC_OXILI_GFX3D_CLK 67 -#define GCC_PCIE_0_AUX_CLK 68 -#define GCC_PCIE_0_CFG_AHB_CLK 69 -#define GCC_PCIE_0_MSTR_AXI_CLK 70 -#define GCC_PCIE_0_PIPE_CLK 71 -#define GCC_PCIE_0_SLV_AXI_CLK 72 -#define GCC_PCNOC_USB2_CLK 73 -#define GCC_PCNOC_USB3_CLK 74 -#define GCC_PDM2_CLK 75 -#define GCC_PDM_AHB_CLK 76 -#define VSYNC_CLK_SRC 77 -#define GCC_PRNG_AHB_CLK 78 -#define GCC_PWM0_XO512_CLK 79 -#define GCC_PWM1_XO512_CLK 80 -#define GCC_PWM2_XO512_CLK 81 -#define GCC_SDCC1_AHB_CLK 82 -#define GCC_SDCC1_APPS_CLK 83 -#define GCC_SDCC1_ICE_CORE_CLK 84 -#define GCC_SDCC2_AHB_CLK 85 -#define GCC_SDCC2_APPS_CLK 86 -#define GCC_SYS_NOC_USB3_CLK 87 -#define GCC_USB20_MOCK_UTMI_CLK 88 -#define GCC_USB2A_PHY_SLEEP_CLK 89 -#define GCC_USB30_MASTER_CLK 90 -#define GCC_USB30_MOCK_UTMI_CLK 91 -#define GCC_USB30_SLEEP_CLK 92 -#define GCC_USB3_PHY_AUX_CLK 93 -#define GCC_USB3_PHY_PIPE_CLK 94 -#define GCC_USB_HS_PHY_CFG_AHB_CLK 95 -#define GCC_USB_HS_SYSTEM_CLK 96 -#define GFX3D_CLK_SRC 97 -#define GP1_CLK_SRC 98 -#define GP2_CLK_SRC 99 -#define GP3_CLK_SRC 100 -#define GPLL0_OUT_MAIN 101 -#define GPLL1_OUT_MAIN 102 -#define GPLL3_OUT_MAIN 103 -#define GPLL4_OUT_MAIN 104 -#define HDMI_APP_CLK_SRC 105 -#define HDMI_PCLK_CLK_SRC 106 -#define MDP_CLK_SRC 107 -#define PCIE_0_AUX_CLK_SRC 108 -#define PCIE_0_PIPE_CLK_SRC 109 -#define PCLK0_CLK_SRC 110 -#define PDM2_CLK_SRC 111 -#define SDCC1_APPS_CLK_SRC 112 -#define SDCC1_ICE_CORE_CLK_SRC 113 -#define SDCC2_APPS_CLK_SRC 114 -#define USB20_MOCK_UTMI_CLK_SRC 115 -#define USB30_MASTER_CLK_SRC 116 -#define USB30_MOCK_UTMI_CLK_SRC 117 -#define USB3_PHY_AUX_CLK_SRC 118 -#define USB_HS_SYSTEM_CLK_SRC 119 -#define GPLL0_AO_CLK_SRC 120 -#define WCNSS_M_CLK 121 -#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 122 -#define GPLL0_AO_OUT_MAIN 123 -#define GPLL0_SLEEP_CLK_SRC 124 -#define GPLL6 125 -#define GPLL6_OUT_AUX 126 +#define GPLL0_OUT_MAIN 0 +#define GPLL0_AO_CLK_SRC 1 +#define GPLL1_OUT_MAIN 2 +#define GPLL3_OUT_MAIN 3 +#define GPLL4_OUT_MAIN 4 +#define GPLL0_AO_OUT_MAIN 5 +#define GPLL0_SLEEP_CLK_SRC 6 +#define GPLL6 7 +#define GPLL6_OUT_AUX 8 +#define APSS_AHB_CLK_SRC 9 +#define BLSP1_QUP0_I2C_APPS_CLK_SRC 10 +#define BLSP1_QUP0_SPI_APPS_CLK_SRC 11 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 12 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 13 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 14 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 15 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 17 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 18 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 19 +#define BLSP1_UART0_APPS_CLK_SRC 20 +#define BLSP1_UART1_APPS_CLK_SRC 21 +#define BLSP1_UART2_APPS_CLK_SRC 22 +#define BLSP1_UART3_APPS_CLK_SRC 23 +#define BLSP2_QUP0_I2C_APPS_CLK_SRC 24 +#define BLSP2_QUP0_SPI_APPS_CLK_SRC 25 +#define BLSP2_UART0_APPS_CLK_SRC 26 +#define BYTE0_CLK_SRC 27 +#define EMAC_CLK_SRC 28 +#define EMAC_PTP_CLK_SRC 29 +#define ESC0_CLK_SRC 30 +#define GCC_APSS_AHB_CLK 31 +#define GCC_APSS_AXI_CLK 32 +#define GCC_BIMC_APSS_AXI_CLK 33 +#define GCC_BIMC_GFX_CLK 34 +#define GCC_BIMC_MDSS_CLK 35 +#define GCC_BLSP1_AHB_CLK 36 +#define GCC_BLSP1_QUP0_I2C_APPS_CLK 37 +#define GCC_BLSP1_QUP0_SPI_APPS_CLK 38 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 39 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 40 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 41 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 42 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 43 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 44 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 45 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 46 +#define GCC_BLSP1_UART0_APPS_CLK 47 +#define GCC_BLSP1_UART1_APPS_CLK 48 +#define GCC_BLSP1_UART2_APPS_CLK 49 +#define GCC_BLSP1_UART3_APPS_CLK 50 +#define GCC_BLSP2_AHB_CLK 51 +#define GCC_BLSP2_QUP0_I2C_APPS_CLK 52 +#define GCC_BLSP2_QUP0_SPI_APPS_CLK 53 +#define GCC_BLSP2_UART0_APPS_CLK 54 +#define GCC_BOOT_ROM_AHB_CLK 55 +#define GCC_DCC_CLK 56 +#define GCC_GENI_IR_H_CLK 57 +#define GCC_ETH_AXI_CLK 58 +#define GCC_ETH_PTP_CLK 59 +#define GCC_ETH_RGMII_CLK 60 +#define GCC_ETH_SLAVE_AHB_CLK 61 +#define GCC_GENI_IR_S_CLK 62 +#define GCC_GP1_CLK 63 +#define GCC_GP2_CLK 64 +#define GCC_GP3_CLK 65 +#define GCC_MDSS_AHB_CLK 66 +#define GCC_MDSS_AXI_CLK 67 +#define GCC_MDSS_BYTE0_CLK 68 +#define GCC_MDSS_ESC0_CLK 69 +#define GCC_MDSS_HDMI_APP_CLK 70 +#define GCC_MDSS_HDMI_PCLK_CLK 71 +#define GCC_MDSS_MDP_CLK 72 +#define GCC_MDSS_PCLK0_CLK 73 +#define GCC_MDSS_VSYNC_CLK 74 +#define GCC_OXILI_AHB_CLK 75 +#define GCC_OXILI_GFX3D_CLK 76 +#define GCC_PCIE_0_AUX_CLK 77 +#define GCC_PCIE_0_CFG_AHB_CLK 78 +#define GCC_PCIE_0_MSTR_AXI_CLK 79 +#define GCC_PCIE_0_PIPE_CLK 80 +#define GCC_PCIE_0_SLV_AXI_CLK 81 +#define GCC_PCNOC_USB2_CLK 82 +#define GCC_PCNOC_USB3_CLK 83 +#define GCC_PDM2_CLK 84 +#define GCC_PDM_AHB_CLK 85 +#define VSYNC_CLK_SRC 86 +#define GCC_PRNG_AHB_CLK 87 +#define GCC_PWM0_XO512_CLK 88 +#define GCC_PWM1_XO512_CLK 89 +#define GCC_PWM2_XO512_CLK 90 +#define GCC_SDCC1_AHB_CLK 91 +#define GCC_SDCC1_APPS_CLK 92 +#define GCC_SDCC1_ICE_CORE_CLK 93 +#define GCC_SDCC2_AHB_CLK 94 +#define GCC_SDCC2_APPS_CLK 95 +#define GCC_SYS_NOC_USB3_CLK 96 +#define GCC_USB20_MOCK_UTMI_CLK 97 +#define GCC_USB2A_PHY_SLEEP_CLK 98 +#define GCC_USB30_MASTER_CLK 99 +#define GCC_USB30_MOCK_UTMI_CLK 100 +#define GCC_USB30_SLEEP_CLK 101 +#define GCC_USB3_PHY_AUX_CLK 102 +#define GCC_USB3_PHY_PIPE_CLK 103 +#define GCC_USB_HS_PHY_CFG_AHB_CLK 104 +#define GCC_USB_HS_SYSTEM_CLK 105 +#define GFX3D_CLK_SRC 106 +#define GP1_CLK_SRC 107 +#define GP2_CLK_SRC 108 +#define GP3_CLK_SRC 109 +#define HDMI_APP_CLK_SRC 110 +#define HDMI_PCLK_CLK_SRC 111 +#define MDP_CLK_SRC 112 +#define PCIE_0_AUX_CLK_SRC 113 +#define PCIE_0_PIPE_CLK_SRC 114 +#define PCLK0_CLK_SRC 115 +#define PDM2_CLK_SRC 116 +#define SDCC1_APPS_CLK_SRC 117 +#define SDCC1_ICE_CORE_CLK_SRC 118 +#define SDCC2_APPS_CLK_SRC 119 +#define USB20_MOCK_UTMI_CLK_SRC 120 +#define USB30_MASTER_CLK_SRC 121 +#define USB30_MOCK_UTMI_CLK_SRC 122 +#define USB3_PHY_AUX_CLK_SRC 123 +#define USB_HS_SYSTEM_CLK_SRC 124 +#define WCNSS_M_CLK 125 +#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 126 #define MDSS_MDP_VOTE_CLK 127 #define MDSS_ROTATOR_VOTE_CLK 128 #define GCC_BIMC_GPU_CLK 129 diff --git a/include/dt-bindings/clock/qcom,gcc-sdxprairie.h b/include/dt-bindings/clock/qcom,gcc-sdxprairie.h new file mode 100644 index 0000000000000000000000000000000000000000..18fe72a018d6ebf5de3ee252b8e5bc0c35436c26 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-sdxprairie.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_SDXPRAIRIE_H +#define _DT_BINDINGS_CLK_MSM_GCC_SDXPRAIRIE_H + +/* GCC clock registers */ +#define GCC_BLSP1_AHB_CLK 0 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 1 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 2 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 3 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 5 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 6 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 7 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 8 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 9 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 10 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 11 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 12 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 13 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 14 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 15 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 16 +#define GCC_BLSP1_SLEEP_CLK 17 +#define GCC_BLSP1_UART1_APPS_CLK 18 +#define GCC_BLSP1_UART1_APPS_CLK_SRC 19 +#define GCC_BLSP1_UART2_APPS_CLK 20 +#define GCC_BLSP1_UART2_APPS_CLK_SRC 21 +#define GCC_BLSP1_UART3_APPS_CLK 22 +#define GCC_BLSP1_UART3_APPS_CLK_SRC 23 +#define GCC_BLSP1_UART4_APPS_CLK 24 +#define GCC_BLSP1_UART4_APPS_CLK_SRC 25 +#define GCC_BOOT_ROM_AHB_CLK 26 +#define GCC_CE1_AHB_CLK 27 +#define GCC_CE1_AXI_CLK 28 +#define GCC_CE1_CLK 29 +#define GCC_CPUSS_AHB_CLK 30 +#define GCC_CPUSS_AHB_CLK_SRC 31 +#define GCC_CPUSS_GNOC_CLK 32 +#define GCC_CPUSS_RBCPR_CLK 33 +#define GCC_CPUSS_RBCPR_CLK_SRC 34 +#define GCC_EMAC_CLK_SRC 35 +#define GCC_EMAC_PTP_CLK_SRC 36 +#define GCC_ETH_AXI_CLK 37 +#define GCC_ETH_PTP_CLK 38 +#define GCC_ETH_RGMII_CLK 39 +#define GCC_ETH_SLAVE_AHB_CLK 40 +#define GCC_GP1_CLK 41 +#define GCC_GP1_CLK_SRC 42 +#define GCC_GP2_CLK 43 +#define GCC_GP2_CLK_SRC 44 +#define GCC_GP3_CLK 45 +#define GCC_GP3_CLK_SRC 46 +#define GCC_PCIE_0_CLKREF_CLK 47 +#define GCC_PCIE_AUX_CLK 48 +#define GCC_PCIE_AUX_PHY_CLK_SRC 49 +#define GCC_PCIE_CFG_AHB_CLK 50 +#define GCC_PCIE_MSTR_AXI_CLK 51 +#define GCC_PCIE_PHY_REFGEN_CLK 52 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 53 +#define GCC_PCIE_PIPE_CLK 54 +#define GCC_PCIE_SLEEP_CLK 55 +#define GCC_PCIE_SLV_AXI_CLK 56 +#define GCC_PCIE_SLV_Q2A_AXI_CLK 57 +#define GCC_PDM2_CLK 58 +#define GCC_PDM2_CLK_SRC 59 +#define GCC_PDM_AHB_CLK 60 +#define GCC_PDM_XO4_CLK 61 +#define GCC_PRNG_AHB_CLK 62 +#define GCC_SDCC1_AHB_CLK 63 +#define GCC_SDCC1_APPS_CLK 64 +#define GCC_SDCC1_APPS_CLK_SRC 65 +#define GCC_SPMI_FETCHER_AHB_CLK 66 +#define GCC_SPMI_FETCHER_CLK 67 +#define GCC_SPMI_FETCHER_CLK_SRC 68 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 69 +#define GCC_SYS_NOC_USB3_CLK 70 +#define GCC_USB30_MASTER_CLK 71 +#define GCC_USB30_MASTER_CLK_SRC 72 +#define GCC_USB30_MOCK_UTMI_CLK 73 +#define GCC_USB30_MOCK_UTMI_CLK_SRC 74 +#define GCC_USB30_SLEEP_CLK 75 +#define GCC_USB3_PHY_AUX_CLK 76 +#define GCC_USB3_PHY_AUX_CLK_SRC 77 +#define GCC_USB3_PHY_PIPE_CLK 78 +#define GCC_USB3_PRIM_CLKREF_CLK 79 +#define GCC_USB_PHY_CFG_AHB2PHY_CLK 80 +#define GPLL0 81 +#define GPLL0_OUT_EVEN 82 +#define GPLL4 83 +#define GPLL4_OUT_EVEN 84 + +/* CPU clocks */ +#define CLOCK_A7SS 0 + +/* GCC reset clocks */ +#define GCC_BLSP1_QUP1_BCR 0 +#define GCC_BLSP1_QUP2_BCR 1 +#define GCC_BLSP1_QUP3_BCR 2 +#define GCC_BLSP1_QUP4_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_UART3_BCR 5 +#define GCC_BLSP1_UART4_BCR 6 +#define GCC_CE1_BCR 7 +#define GCC_PCIE_BCR 8 +#define GCC_PCIE_PHY_BCR 9 +#define GCC_PDM_BCR 10 +#define GCC_PRNG_BCR 11 +#define GCC_SDCC1_BCR 12 +#define GCC_SPMI_FETCHER_BCR 13 +#define GCC_USB30_BCR 14 +#define GCC_USB3_PHY_BCR 15 +#define GCC_USB3PHY_PHY_BCR 16 +#define GCC_QUSB2PHY_BCR 17 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 18 +#define GCC_EMAC_BCR 19 + +/* Dummy clocks for rate measurement */ +#define MEASURE_ONLY_IPA_2X_CLK 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm6150.h b/include/dt-bindings/clock/qcom,gcc-sm6150.h index 711f19c1d55dc7d90ac2c50651c01a69d2bc4bfd..2b1678111e43408bd643bbb685059d74a484b5fe 100644 --- a/include/dt-bindings/clock/qcom,gcc-sm6150.h +++ b/include/dt-bindings/clock/qcom,gcc-sm6150.h @@ -14,214 +14,195 @@ #ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6150_H #define _DT_BINDINGS_CLK_QCOM_GCC_SM6150_H -#define GCC_AGGRE_UFS_PHY_AXI_CLK 0 -#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 1 -#define GCC_AGGRE_USB2_SEC_AXI_CLK 2 -#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3 -#define GCC_AHB2PHY_EAST_CLK 4 -#define GCC_AHB2PHY_WEST_CLK 5 -#define GCC_APC_VS_CLK 6 -#define GCC_BOOT_ROM_AHB_CLK 7 -#define GCC_CAMERA_AHB_CLK 8 -#define GCC_CAMERA_HF_AXI_CLK 9 -#define GCC_CAMERA_XO_CLK 10 -#define GCC_CE1_AHB_CLK 11 -#define GCC_CE1_AXI_CLK 12 -#define GCC_CE1_CLK 13 -#define GCC_CFG_NOC_USB2_SEC_AXI_CLK 14 -#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 15 -#define GCC_CPUSS_AHB_CLK 16 -#define GCC_CPUSS_AHB_CLK_SRC 17 -#define GCC_CPUSS_GNOC_CLK 18 -#define GCC_DDRSS_GPU_AXI_CLK 19 -#define GCC_DISP_AHB_CLK 20 -#define GCC_DISP_GPLL0_DIV_CLK_SRC 21 -#define GCC_DISP_HF_AXI_CLK 22 -#define GCC_DISP_XO_CLK 23 -#define GCC_EMAC_AXI_CLK 24 -#define GCC_EMAC_PTP_CLK 25 -#define GCC_EMAC_PTP_CLK_SRC 26 -#define GCC_EMAC_RGMII_CLK 27 -#define GCC_EMAC_RGMII_CLK_SRC 28 -#define GCC_EMAC_SLV_AHB_CLK 29 -#define GCC_GP1_CLK 30 -#define GCC_GP1_CLK_SRC 31 -#define GCC_GP2_CLK 32 -#define GCC_GP2_CLK_SRC 33 -#define GCC_GP3_CLK 34 -#define GCC_GP3_CLK_SRC 35 -#define GCC_GPU_CFG_AHB_CLK 36 -#define GCC_GPU_GPLL0_CLK_SRC 37 -#define GCC_GPU_GPLL0_DIV_CLK_SRC 38 -#define GCC_GPU_IREF_CLK 39 -#define GCC_GPU_MEMNOC_GFX_CLK 40 -#define GCC_GPU_SNOC_DVM_GFX_CLK 41 -#define GCC_MSS_AXIS2_CLK 42 -#define GCC_MSS_CFG_AHB_CLK 43 -#define GCC_MSS_GPLL0_DIV_CLK_SRC 44 -#define GCC_MSS_MFAB_AXIS_CLK 45 -#define GCC_MSS_Q6_MEMNOC_AXI_CLK 46 -#define GCC_MSS_SNOC_AXI_CLK 47 -#define GCC_MSS_VS_CLK 48 -#define GCC_PCIE0_PHY_REFGEN_CLK 49 -#define GCC_PCIE_0_AUX_CLK 50 -#define GCC_PCIE_0_AUX_CLK_SRC 51 -#define GCC_PCIE_0_CFG_AHB_CLK 52 -#define GCC_PCIE_0_CLKREF_CLK 53 -#define GCC_PCIE_0_MSTR_AXI_CLK 54 -#define GCC_PCIE_0_PIPE_CLK 55 -#define GCC_PCIE_0_SLV_AXI_CLK 56 -#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57 -#define GCC_PCIE_PHY_AUX_CLK 58 -#define GCC_PCIE_PHY_REFGEN_CLK_SRC 59 -#define GCC_PDM2_CLK 60 -#define GCC_PDM2_CLK_SRC 61 -#define GCC_PDM_AHB_CLK 62 -#define GCC_PDM_XO4_CLK 63 -#define GCC_PRNG_AHB_CLK 64 -#define GCC_QMIP_CAMERA_NRT_AHB_CLK 65 -#define GCC_QMIP_DISP_AHB_CLK 66 -#define GCC_QMIP_PCIE_AHB_CLK 67 -#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 68 -#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 69 -#define GCC_QSPI_CORE_CLK 70 -#define GCC_QSPI_CORE_CLK_SRC 71 -#define GCC_QUPV3_WRAP0_CORE_2X_CLK 72 -#define GCC_QUPV3_WRAP0_CORE_CLK 73 -#define GCC_QUPV3_WRAP0_S0_CLK 74 -#define GCC_QUPV3_WRAP0_S0_CLK_SRC 75 -#define GCC_QUPV3_WRAP0_S1_CLK 76 -#define GCC_QUPV3_WRAP0_S1_CLK_SRC 77 -#define GCC_QUPV3_WRAP0_S2_CLK 78 -#define GCC_QUPV3_WRAP0_S2_CLK_SRC 79 -#define GCC_QUPV3_WRAP0_S3_CLK 80 -#define GCC_QUPV3_WRAP0_S3_CLK_SRC 81 -#define GCC_QUPV3_WRAP0_S4_CLK 82 -#define GCC_QUPV3_WRAP0_S4_CLK_SRC 83 -#define GCC_QUPV3_WRAP0_S5_CLK 84 -#define GCC_QUPV3_WRAP0_S5_CLK_SRC 85 -#define GCC_QUPV3_WRAP1_CORE_2X_CLK 86 -#define GCC_QUPV3_WRAP1_CORE_CLK 87 -#define GCC_QUPV3_WRAP1_S0_CLK 88 -#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89 -#define GCC_QUPV3_WRAP1_S1_CLK 90 -#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91 -#define GCC_QUPV3_WRAP1_S2_CLK 92 -#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93 -#define GCC_QUPV3_WRAP1_S3_CLK 94 -#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95 -#define GCC_QUPV3_WRAP1_S4_CLK 96 -#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97 -#define GCC_QUPV3_WRAP1_S5_CLK 98 -#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99 -#define GCC_QUPV3_WRAP_0_M_AHB_CLK 100 -#define GCC_QUPV3_WRAP_0_S_AHB_CLK 101 -#define GCC_QUPV3_WRAP_1_M_AHB_CLK 102 -#define GCC_QUPV3_WRAP_1_S_AHB_CLK 103 -#define GCC_SDCC1_AHB_CLK 104 -#define GCC_SDCC1_APPS_CLK 105 -#define GCC_SDCC1_APPS_CLK_SRC 106 -#define GCC_SDCC1_ICE_CORE_CLK 107 -#define GCC_SDCC1_ICE_CORE_CLK_SRC 108 -#define GCC_SDCC2_AHB_CLK 109 -#define GCC_SDCC2_APPS_CLK 110 -#define GCC_SDCC2_APPS_CLK_SRC 111 -#define GCC_SYS_NOC_CPUSS_AHB_CLK 112 -#define GCC_UFS_CARD_CLKREF_CLK 113 -#define GCC_UFS_MEM_CLKREF_CLK 114 -#define GCC_UFS_PHY_AHB_CLK 115 -#define GCC_UFS_PHY_AXI_CLK 116 -#define GCC_UFS_PHY_AXI_CLK_SRC 117 -#define GCC_UFS_PHY_AXI_HW_CTL_CLK 118 -#define GCC_UFS_PHY_ICE_CORE_CLK 119 -#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 120 -#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 121 -#define GCC_UFS_PHY_PHY_AUX_CLK 122 -#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 123 -#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 124 -#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 125 -#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 126 -#define GCC_UFS_PHY_UNIPRO_CORE_CLK 127 -#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 128 -#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 129 -#define GCC_USB20_SEC_MASTER_CLK 130 -#define GCC_USB20_SEC_MASTER_CLK_SRC 131 -#define GCC_USB20_SEC_MOCK_UTMI_CLK 132 -#define GCC_USB20_SEC_MOCK_UTMI_CLK_SRC 133 -#define GCC_USB20_SEC_SLEEP_CLK 134 -#define GCC_USB2_SEC_PHY_AUX_CLK 135 -#define GCC_USB2_SEC_PHY_AUX_CLK_SRC 136 -#define GCC_USB2_SEC_PHY_COM_AUX_CLK 137 -#define GCC_USB2_SEC_PHY_PIPE_CLK 138 -#define GCC_USB30_PRIM_MASTER_CLK 139 -#define GCC_USB30_PRIM_MASTER_CLK_SRC 140 -#define GCC_USB30_PRIM_MOCK_UTMI_CLK 141 -#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 142 -#define GCC_USB30_PRIM_SLEEP_CLK 143 -#define GCC_USB3_PRIM_CLKREF_CLK 144 -#define GCC_USB3_PRIM_PHY_AUX_CLK 145 -#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 146 -#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 147 -#define GCC_USB3_PRIM_PHY_PIPE_CLK 148 -#define GCC_USB3_SEC_CLKREF_CLK 149 -#define GCC_VDDA_VS_CLK 150 -#define GCC_VDDCX_VS_CLK 151 -#define GCC_VDDMX_VS_CLK 152 -#define GCC_VIDEO_AHB_CLK 153 -#define GCC_VIDEO_AXI0_CLK 154 -#define GCC_VIDEO_XO_CLK 155 -#define GCC_VS_CTRL_AHB_CLK 156 -#define GCC_VS_CTRL_CLK 157 -#define GCC_VS_CTRL_CLK_SRC 158 -#define GCC_VSENSOR_CLK_SRC 159 -#define GCC_WCSS_VS_CLK 160 -#define GPLL0 161 -#define GPLL0_OUT_AUX2 162 -#define GPLL0_OUT_MAIN 163 -#define GPLL6 164 -#define GPLL6_OUT_MAIN 165 -#define GPLL7 166 -#define GPLL7_OUT_MAIN 167 -#define GPLL8 168 -#define GPLL8_OUT_MAIN 169 +/* Hardware and dummy clocks for rate measurement */ +#define GPLL0_OUT_AUX2 0 +#define MEASURE_ONLY_SNOC_CLK 1 +#define MEASURE_ONLY_CNOC_CLK 2 +#define MEASURE_ONLY_BIMC_CLK 3 +#define MEASURE_ONLY_IPA_2X_CLK 4 -#define GCC_EMAC_BCR 0 -#define GCC_GPU_BCR 1 -#define GCC_MMSS_BCR 2 -#define GCC_PCIE_0_BCR 3 -#define GCC_PCIE_0_LINK_DOWN_BCR 4 -#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 -#define GCC_PCIE_0_PHY_BCR 6 -#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 -#define GCC_PCIE_PHY_BCR 8 -#define GCC_PCIE_PHY_CFG_AHB_BCR 9 -#define GCC_PCIE_PHY_COM_BCR 10 -#define GCC_PDM_BCR 11 -#define GCC_PRNG_BCR 12 -#define GCC_QSPI_BCR 13 -#define GCC_QUPV3_WRAPPER_0_BCR 14 -#define GCC_QUPV3_WRAPPER_1_BCR 15 -#define GCC_QUSB2PHY_PRIM_BCR 16 -#define GCC_QUSB2PHY_SEC_BCR 17 -#define GCC_SDCC1_BCR 18 -#define GCC_SDCC2_BCR 19 -#define GCC_UFS_PHY_BCR 20 -#define GCC_USB20_SEC_BCR 21 -#define GCC_USB2_PHY_SEC_BCR 22 -#define GCC_USB30_PRIM_BCR 23 -#define GCC_USB3_DP_PHY_PRIM_SP0_BCR 24 -#define GCC_USB3_DP_PHY_PRIM_SP1_BCR 25 -#define GCC_USB3_DP_PHY_SEC_BCR 26 -#define GCC_USB3_PHY_PRIM_SP0_BCR 27 -#define GCC_USB3_PHY_PRIM_SP1_BCR 28 -#define GCC_USB3_UNIPHY_MP0_BCR 29 -#define GCC_USB3_UNIPHY_MP1_BCR 30 -#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 31 -#define GCC_USB3PHY_PHY_PRIM_SP1_BCR 32 -#define GCC_USB3PHY_PHY_SEC_BCR 33 -#define GCC_USB3UNIPHY_PHY_MP0_BCR 34 -#define GCC_USB3UNIPHY_PHY_MP1_BCR 35 -#define GCC_USB_PHY_CFG_AHB2PHY_BCR 36 -#define GCC_VS_BCR 37 +/* GCC clock registers */ +#define GPLL0_OUT_MAIN 5 +#define GPLL6_OUT_MAIN 6 +#define GPLL7_OUT_MAIN 7 +#define GPLL8_OUT_MAIN 8 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 9 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 10 +#define GCC_AGGRE_USB2_SEC_AXI_CLK 11 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12 +#define GCC_AHB2PHY_EAST_CLK 13 +#define GCC_AHB2PHY_WEST_CLK 14 +#define GCC_APC_VS_CLK 15 +#define GCC_BOOT_ROM_AHB_CLK 16 +#define GCC_CAMERA_AHB_CLK 17 +#define GCC_CAMERA_HF_AXI_CLK 18 +#define GCC_CE1_AHB_CLK 19 +#define GCC_CE1_AXI_CLK 20 +#define GCC_CE1_CLK 21 +#define GCC_CFG_NOC_USB2_SEC_AXI_CLK 22 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23 +#define GCC_CPUSS_AHB_CLK 24 +#define GCC_CPUSS_AHB_CLK_SRC 25 +#define GCC_DDRSS_GPU_AXI_CLK 26 +#define GCC_DISP_AHB_CLK 27 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 28 +#define GCC_DISP_HF_AXI_CLK 29 +#define GCC_EMAC_AXI_CLK 30 +#define GCC_EMAC_PTP_CLK 31 +#define GCC_EMAC_PTP_CLK_SRC 32 +#define GCC_EMAC_RGMII_CLK 33 +#define GCC_EMAC_RGMII_CLK_SRC 34 +#define GCC_EMAC_SLV_AHB_CLK 35 +#define GCC_GP1_CLK 36 +#define GCC_GP1_CLK_SRC 37 +#define GCC_GP2_CLK 38 +#define GCC_GP2_CLK_SRC 39 +#define GCC_GP3_CLK 40 +#define GCC_GP3_CLK_SRC 41 +#define GCC_GPU_CFG_AHB_CLK 42 +#define GCC_GPU_GPLL0_CLK_SRC 43 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 44 +#define GCC_GPU_IREF_CLK 45 +#define GCC_GPU_MEMNOC_GFX_CLK 46 +#define GCC_GPU_SNOC_DVM_GFX_CLK 47 +#define GCC_MSS_AXIS2_CLK 48 +#define GCC_MSS_CFG_AHB_CLK 49 +#define GCC_MSS_GPLL0_DIV_CLK_SRC 50 +#define GCC_MSS_MFAB_AXIS_CLK 51 +#define GCC_MSS_Q6_MEMNOC_AXI_CLK 52 +#define GCC_MSS_SNOC_AXI_CLK 53 +#define GCC_MSS_VS_CLK 54 +#define GCC_PCIE0_PHY_REFGEN_CLK 55 +#define GCC_PCIE_0_AUX_CLK 56 +#define GCC_PCIE_0_AUX_CLK_SRC 57 +#define GCC_PCIE_0_CFG_AHB_CLK 58 +#define GCC_PCIE_0_CLKREF_CLK 59 +#define GCC_PCIE_0_MSTR_AXI_CLK 60 +#define GCC_PCIE_0_PIPE_CLK 61 +#define GCC_PCIE_0_SLV_AXI_CLK 62 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 63 +#define GCC_PCIE_PHY_AUX_CLK 64 +#define GCC_PCIE_PHY_REFGEN_CLK_SRC 65 +#define GCC_PDM2_CLK 66 +#define GCC_PDM2_CLK_SRC 67 +#define GCC_PDM_AHB_CLK 68 +#define GCC_PDM_XO4_CLK 69 +#define GCC_PRNG_AHB_CLK 70 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 71 +#define GCC_QMIP_DISP_AHB_CLK 72 +#define GCC_QMIP_PCIE_AHB_CLK 73 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 74 +#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 75 +#define GCC_QSPI_CORE_CLK 76 +#define GCC_QSPI_CORE_CLK_SRC 77 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 78 +#define GCC_QUPV3_WRAP0_CORE_CLK 79 +#define GCC_QUPV3_WRAP0_S0_CLK 80 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81 +#define GCC_QUPV3_WRAP0_S1_CLK 82 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83 +#define GCC_QUPV3_WRAP0_S2_CLK 84 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85 +#define GCC_QUPV3_WRAP0_S3_CLK 86 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87 +#define GCC_QUPV3_WRAP0_S4_CLK 88 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89 +#define GCC_QUPV3_WRAP0_S5_CLK 90 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 92 +#define GCC_QUPV3_WRAP1_CORE_CLK 93 +#define GCC_QUPV3_WRAP1_S0_CLK 94 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 95 +#define GCC_QUPV3_WRAP1_S1_CLK 96 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S2_CLK 98 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 99 +#define GCC_QUPV3_WRAP1_S3_CLK 100 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 101 +#define GCC_QUPV3_WRAP1_S4_CLK 102 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 103 +#define GCC_QUPV3_WRAP1_S5_CLK 104 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 105 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 106 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 107 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 108 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 109 +#define GCC_SDCC1_AHB_CLK 110 +#define GCC_SDCC1_APPS_CLK 111 +#define GCC_SDCC1_APPS_CLK_SRC 112 +#define GCC_SDCC1_ICE_CORE_CLK 113 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 114 +#define GCC_SDCC2_AHB_CLK 115 +#define GCC_SDCC2_APPS_CLK 116 +#define GCC_SDCC2_APPS_CLK_SRC 117 +#define GCC_SYS_NOC_CPUSS_AHB_CLK 118 +#define GCC_UFS_CARD_CLKREF_CLK 119 +#define GCC_UFS_MEM_CLKREF_CLK 120 +#define GCC_UFS_PHY_AHB_CLK 121 +#define GCC_UFS_PHY_AXI_CLK 122 +#define GCC_UFS_PHY_AXI_CLK_SRC 123 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 124 +#define GCC_UFS_PHY_ICE_CORE_CLK 125 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 126 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 127 +#define GCC_UFS_PHY_PHY_AUX_CLK 128 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 129 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 130 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 131 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 132 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 133 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 134 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 135 +#define GCC_USB20_SEC_MASTER_CLK 136 +#define GCC_USB20_SEC_MASTER_CLK_SRC 137 +#define GCC_USB20_SEC_MOCK_UTMI_CLK 138 +#define GCC_USB20_SEC_MOCK_UTMI_CLK_SRC 139 +#define GCC_USB20_SEC_SLEEP_CLK 140 +#define GCC_USB2_SEC_PHY_AUX_CLK 141 +#define GCC_USB2_SEC_PHY_AUX_CLK_SRC 142 +#define GCC_USB2_SEC_PHY_COM_AUX_CLK 143 +#define GCC_USB2_SEC_PHY_PIPE_CLK 144 +#define GCC_USB30_PRIM_MASTER_CLK 145 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 146 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 147 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 148 +#define GCC_USB30_PRIM_SLEEP_CLK 149 +#define GCC_USB3_PRIM_CLKREF_CLK 150 +#define GCC_USB3_PRIM_PHY_AUX_CLK 151 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 152 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 153 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 154 +#define GCC_USB3_SEC_CLKREF_CLK 155 +#define GCC_VDDA_VS_CLK 156 +#define GCC_VDDCX_VS_CLK 157 +#define GCC_VDDMX_VS_CLK 158 +#define GCC_VIDEO_AHB_CLK 159 +#define GCC_VIDEO_AXI0_CLK 160 +#define GCC_VS_CTRL_AHB_CLK 161 +#define GCC_VS_CTRL_CLK 162 +#define GCC_VS_CTRL_CLK_SRC 163 +#define GCC_VSENSOR_CLK_SRC 164 +#define GCC_WCSS_VS_CLK 165 +#define GCC_CAMERA_XO_CLK 166 +#define GCC_CPUSS_GNOC_CLK 167 +#define GCC_DISP_XO_CLK 168 +#define GCC_VIDEO_XO_CLK 169 +#define GCC_RX1_USB2_CLKREF_CLK 170 +#define GCC_USB2_PRIM_CLKREF_CLK 171 +#define GCC_USB2_SEC_CLKREF_CLK 172 + +/* GCC Resets */ +#define GCC_QUSB2PHY_PRIM_BCR 0 +#define GCC_QUSB2PHY_SEC_BCR 1 +#define GCC_USB30_PRIM_BCR 2 +#define GCC_USB2_PHY_SEC_BCR 3 +#define GCC_USB3_DP_PHY_SEC_BCR 4 +#define GCC_USB3PHY_PHY_SEC_BCR 5 +#define GCC_PCIE_0_BCR 6 +#define GCC_PCIE_0_PHY_BCR 7 +#define GCC_PCIE_PHY_BCR 8 +#define GCC_PCIE_PHY_COM_BCR 9 +#define GCC_UFS_PHY_BCR 10 +#define GCC_USB20_SEC_BCR 11 #endif diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6150.h b/include/dt-bindings/clock/qcom,gpucc-sm6150.h index 15ad06ad4e7dbb416ef78aadead2969dbfac37e2..405f6dd09e69fbf69b9ae0849a53c05cad93a660 100644 --- a/include/dt-bindings/clock/qcom,gpucc-sm6150.h +++ b/include/dt-bindings/clock/qcom,gpucc-sm6150.h @@ -14,39 +14,23 @@ #ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6150_H #define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6150_H -#define GPU_CC_AHB_CLK 0 -#define GPU_CC_CRC_AHB_CLK 1 -#define GPU_CC_CX_APB_CLK 2 -#define GPU_CC_CX_GFX3D_CLK 3 -#define GPU_CC_CX_GFX3D_SLV_CLK 4 -#define GPU_CC_CX_GMU_CLK 5 -#define GPU_CC_CX_QDSS_AT_CLK 6 -#define GPU_CC_CX_QDSS_TRIG_CLK 7 -#define GPU_CC_CX_QDSS_TSCTR_CLK 8 -#define GPU_CC_CX_SNOC_DVM_CLK 9 -#define GPU_CC_CXO_AON_CLK 10 -#define GPU_CC_CXO_CLK 11 -#define GPU_CC_GMU_CLK_SRC 12 -#define GPU_CC_GX_CXO_CLK 13 -#define GPU_CC_GX_GFX3D_CLK 14 -#define GPU_CC_GX_GFX3D_CLK_SRC 15 -#define GPU_CC_GX_GMU_CLK 16 -#define GPU_CC_GX_QDSS_TSCTR_CLK 17 -#define GPU_CC_GX_VSENSE_CLK 18 -#define GPU_CC_PLL0 19 -#define GPU_CC_PLL0_OUT_AUX 20 -#define GPU_CC_PLL1 21 -#define GPU_CC_PLL1_OUT_AUX 22 -#define GPU_CC_PLL_TEST_CLK 23 -#define GPU_CC_SLEEP_CLK 24 - -/* TODO: PLL CLOCK IDs */ - -#define GPUCC_GPU_CC_CX_BCR 0 -#define GPUCC_GPU_CC_GFX3D_AON_BCR 1 -#define GPUCC_GPU_CC_GMU_BCR 2 -#define GPUCC_GPU_CC_GX_BCR 3 -#define GPUCC_GPU_CC_SPDM_BCR 4 -#define GPUCC_GPU_CC_XO_BCR 5 +/* GPUCC clock registers */ +#define GPU_CC_PLL0_OUT_AUX2 0 +#define GPU_CC_PLL1_OUT_AUX2 1 +#define GPU_CC_CRC_AHB_CLK 2 +#define GPU_CC_CX_APB_CLK 3 +#define GPU_CC_CX_GFX3D_CLK 4 +#define GPU_CC_CX_GFX3D_SLV_CLK 5 +#define GPU_CC_CX_GMU_CLK 6 +#define GPU_CC_CX_SNOC_DVM_CLK 7 +#define GPU_CC_CXO_AON_CLK 8 +#define GPU_CC_CXO_CLK 9 +#define GPU_CC_GMU_CLK_SRC 10 +#define GPU_CC_SLEEP_CLK 11 +#define GPU_CC_GX_GMU_CLK 12 +#define GPU_CC_GX_CXO_CLK 13 +#define GPU_CC_GX_GFX3D_CLK 14 +#define GPU_CC_GX_GFX3D_CLK_SRC 15 +#define GPU_CC_AHB_CLK 16 #endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 33891f2b7153f65ffb27521302af70adf23a5eef..a0876ac455d883962cb3d0437259001deb9c9a55 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -111,32 +111,36 @@ #define RPM_SMD_LN_BB_CLK_A 71 #define RPM_SMD_LN_BB_CLK_PIN 72 #define RPM_SMD_LN_BB_CLK_A_PIN 73 -#define PNOC_MSMBUS_CLK 74 -#define PNOC_MSMBUS_A_CLK 75 -#define PNOC_KEEPALIVE_A_CLK 76 -#define SNOC_MSMBUS_CLK 77 -#define SNOC_MSMBUS_A_CLK 78 -#define BIMC_MSMBUS_CLK 79 -#define BIMC_MSMBUS_A_CLK 80 -#define PNOC_USB_CLK 81 -#define PNOC_USB_A_CLK 82 -#define SNOC_USB_CLK 83 -#define SNOC_USB_A_CLK 84 -#define BIMC_USB_CLK 85 -#define BIMC_USB_A_CLK 86 -#define SNOC_WCNSS_A_CLK 87 -#define BIMC_WCNSS_A_CLK 88 -#define MCD_CE1_CLK 89 -#define QCEDEV_CE1_CLK 90 -#define QCRYPTO_CE1_CLK 91 -#define QSEECOM_CE1_CLK 92 -#define SCM_CE1_CLK 93 -#define CXO_SMD_OTG_CLK 94 -#define CXO_SMD_LPM_CLK 95 -#define CXO_SMD_PIL_PRONTO_CLK 96 -#define CXO_SMD_PIL_MSS_CLK 97 -#define CXO_SMD_WLAN_CLK 98 -#define CXO_SMD_PIL_LPASS_CLK 99 -#define CXO_SMD_PIL_CDSP_CLK 100 +#define RPM_SMD_RF_CLK3 74 +#define RPM_SMD_RF_CLK3_A 75 +#define RPM_SMD_RF_CLK3_PIN 76 +#define RPM_SMD_RF_CLK3_A_PIN 77 +#define PNOC_MSMBUS_CLK 78 +#define PNOC_MSMBUS_A_CLK 79 +#define PNOC_KEEPALIVE_A_CLK 80 +#define SNOC_MSMBUS_CLK 81 +#define SNOC_MSMBUS_A_CLK 82 +#define BIMC_MSMBUS_CLK 83 +#define BIMC_MSMBUS_A_CLK 84 +#define PNOC_USB_CLK 85 +#define PNOC_USB_A_CLK 86 +#define SNOC_USB_CLK 87 +#define SNOC_USB_A_CLK 88 +#define BIMC_USB_CLK 89 +#define BIMC_USB_A_CLK 90 +#define SNOC_WCNSS_A_CLK 91 +#define BIMC_WCNSS_A_CLK 92 +#define MCD_CE1_CLK 93 +#define QCEDEV_CE1_CLK 94 +#define QCRYPTO_CE1_CLK 95 +#define QSEECOM_CE1_CLK 96 +#define SCM_CE1_CLK 97 +#define CXO_SMD_OTG_CLK 98 +#define CXO_SMD_LPM_CLK 99 +#define CXO_SMD_PIL_PRONTO_CLK 100 +#define CXO_SMD_PIL_MSS_CLK 101 +#define CXO_SMD_WLAN_CLK 102 +#define CXO_SMD_PIL_LPASS_CLK 103 +#define CXO_SMD_PIL_CDSP_CLK 104 #endif diff --git a/include/dt-bindings/clock/qcom,videocc-sm6150.h b/include/dt-bindings/clock/qcom,videocc-sm6150.h index be71dfb724274c78517bf5ddd80f6be1144901e6..c7fff1475c18122927513b201ee5dc34e141dab5 100644 --- a/include/dt-bindings/clock/qcom,videocc-sm6150.h +++ b/include/dt-bindings/clock/qcom,videocc-sm6150.h @@ -14,25 +14,21 @@ #ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6150_H #define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6150_H -#define VIDEO_CC_APB_CLK 0 -#define VIDEO_CC_AT_CLK 1 -#define VIDEO_CC_QDSS_TRIG_CLK 2 -#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK 3 -#define VIDEO_CC_SLEEP_CLK 4 -#define VIDEO_CC_SLEEP_CLK_SRC 5 -#define VIDEO_CC_VCODEC0_AXI_CLK 6 -#define VIDEO_CC_VCODEC0_CORE_CLK 7 -#define VIDEO_CC_VENUS_AHB_CLK 8 -#define VIDEO_CC_VENUS_CLK_SRC 9 -#define VIDEO_CC_VENUS_CTL_AXI_CLK 10 -#define VIDEO_CC_VENUS_CTL_CORE_CLK 11 -#define VIDEO_CC_XO_CLK 12 -#define VIDEO_CC_XO_CLK_SRC 13 -#define VIDEO_PLL0 14 -#define VIDEO_PLL0_OUT_MAIN 15 +/* Hardware clocks*/ +#define CHIP_SLEEP_CLK 0 -#define VIDEO_CC_INTERFACE_BCR 0 -#define VIDEO_CC_VCODEC0_BCR 1 -#define VIDEO_CC_VENUS_BCR 2 +/* VIDEOCC clock registers */ +#define VIDEO_PLL0_OUT_MAIN 1 +#define VIDEO_CC_APB_CLK 2 +#define VIDEO_CC_SLEEP_CLK 3 +#define VIDEO_CC_SLEEP_CLK_SRC 4 +#define VIDEO_CC_VCODEC0_AXI_CLK 5 +#define VIDEO_CC_VCODEC0_CORE_CLK 6 +#define VIDEO_CC_VENUS_AHB_CLK 7 +#define VIDEO_CC_VENUS_CLK_SRC 8 +#define VIDEO_CC_VENUS_CTL_AXI_CLK 9 +#define VIDEO_CC_VENUS_CTL_CORE_CLK 10 +#define VIDEO_CC_XO_CLK 11 +#define VIDEO_CC_XO_CLK_SRC 12 #endif diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h index 0d35fdadefb2c9af3e8126ef3d86c15b8018d178..f646c7b7b9b89a6e2c3da92b8c02fd5b4d805e92 100644 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ b/include/dt-bindings/msm/msm-bus-ids.h @@ -276,6 +276,7 @@ #define MSM_BUS_MASTER_QSPI_0 165 #define MSM_BUS_MASTER_QSPI_1 166 #define MSM_BUS_MASTER_PCIE_3 167 +#define MSM_BUS_MASTER_LPASS_ANOC 168 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001 @@ -362,6 +363,7 @@ #define MSM_BUS_SLAVE_SNOC_GEM_NOC_GC 10072 #define MSM_BUS_SLAVE_SNOC_GEM_NOC_SF 10073 #define MSM_BUS_PNOC_SLV_10 10074 +#define MSM_BUS_PNOC_SLV_11 10075 #define MSM_BUS_INT_TEST_ID 20000 #define MSM_BUS_INT_TEST_LAST 20050 @@ -644,6 +646,9 @@ #define MSM_BUS_SLAVE_USB3_2 799 #define MSM_BUS_SLAVE_SERVICE_GEM_NOC_1 800 #define MSM_BUS_SLAVE_PCIE_3 801 +#define MSM_BUS_SLAVE_LPASS_SNOC 802 +#define MSM_BUS_SLAVE_DC_NOC_GEMNOC 803 +#define MSM_BUS_SLAVE_MEM_NOC_PCIE_SNOC 804 #define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512 #define MSM_BUS_SLAVE_LLCC_DISPLAY 20513 @@ -818,7 +823,19 @@ #define ICBID_MASTER_CNOC_A2NOC 146 #define ICBID_MASTER_WLAN 147 #define ICBID_MASTER_MSS_CE 148 -#define ICBID_MASTER_PCNOC_S_10 149 +#define ICBID_MASTER_CDSP_PROC 149 +#define ICBID_MASTER_GNOC_SNOC 150 +#define ICBID_MASTER_MODEM_WRAPPER 151 +#define ICBID_MASTER_SDIO 152 +#define ICBID_MASTER_BIMC_SNOC_PCIE 153 +#define ICBID_MASTER_WLAN_PROC 154 +#define ICBID_MASTER_CRVIRT_PCNOC 155 +#define ICBID_MASTER_WLAN_INT 156 +#define ICBID_MASTER_PCNOC_S_10 157 +#define ICBID_MASTER_PCNOC_S_11 158 +#define ICBID_MASTER_LPASS_LPAIF 159 +#define ICBID_MASTER_LPASS_LEC 160 +#define ICBID_MASTER_LPASS_ANOC_BIMC 161 #define ICBID_SLAVE_EBI1 0 #define ICBID_SLAVE_APPSS_L2 1 @@ -1048,5 +1065,35 @@ #define ICBID_SLAVE_TLMM_NORTH 214 #define ICBID_SLAVE_TLMM_WEST 215 #define ICBID_SLAVE_TLMM_SOUTH 216 -#define ICBID_SLAVE_PCNOC_S_10 217 +#define ICBID_SLAVE_TLMM_CENTER 217 +#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 218 +#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 219 +#define ICBID_SLAVE_CDSP 220 +#define ICBID_SLAVE_CDSP_SMMU_CFG 221 +#define ICBID_SLAVE_LPASS_MPU_CFG 222 +#define ICBID_SLAVE_CSI_PHY_CFG 223 +#define ICBID_SLAVE_DDRSS_CFG 224 +#define ICBID_SLAVE_DDRSS_MPU_CFG 225 +#define ICBID_SLAVE_SNOC_MSS_XPU_CFG 226 +#define ICBID_SLAVE_BIMC_MSS_XPU_CFG 227 +#define ICBID_SLAVE_MSS_SNOC_MPU_CFG 228 +#define ICBID_SLAVE_MSS 229 +#define ICBID_SLAVE_SDIO 230 +#define ICBID_SLAVE_QM_MPU_CFG 231 +#define ICBID_SLAVE_BIMC_SNOC_PCIE 232 +#define ICBID_SLAVE_BOOTIMEM 233 +#define ICBID_SLAVE_CDSP_CFG 234 +#define ICBID_SLAVE_WLAN_DSP_CFG 235 +#define ICBID_SLAVE_GENIR_XPU_CFG 236 +#define ICBID_SLAVE_BOOTIMEM_MPU 237 +#define ICBID_SLAVE_CRVIRT_PCNOC 238 +#define ICBID_SLAVE_WLAN_INT 239 +#define ICBID_SLAVE_WLAN_MPU_CFG 240 +#define ICBID_SLAVE_LPASS_AGNOC_CFG 241 +#define ICBID_SLAVE_LPASS_AGNOC_XPU_CFG 242 +#define ICBID_SLAVE_PLL_BIAS_CFG 243 +#define ICBID_SLAVE_EMAC 244 +#define ICBID_SLAVE_PCNOC_S_10 245 +#define ICBID_SLAVE_PCNOC_S_11 246 +#define ICBID_SLAVE_LPASS_ANOC_BIMC 247 #endif diff --git a/include/dt-bindings/sound/audio-codec-port-types.h b/include/dt-bindings/sound/audio-codec-port-types.h new file mode 100644 index 0000000000000000000000000000000000000000..b795d0db7db3cce4548fc126adb4301f0c14be6e --- /dev/null +++ b/include/dt-bindings/sound/audio-codec-port-types.h @@ -0,0 +1,32 @@ +#ifndef __AUDIO_CODEC_PORT_TYPES_H +#define __AUDIO_CODEC_PORT_TYPES_H + +#define SPKR_L 1 +#define SPKR_L_BOOST 2 +#define SPKR_L_COMP 3 +#define SPKR_L_VI 4 +#define SPKR_R 5 +#define SPKR_R_BOOST 6 +#define SPKR_R_COMP 7 +#define SPKR_R_VI 8 +#define HPH 9 +#define COMPANDER 10 +#define CLSH 11 +#define LO 12 +#define DSD 13 +#define MBHC 14 +#define ADC1 15 +#define ADC2 16 +#define ADC3 17 +#define DMIC1 18 +#define DMIC2 19 +#define DMIC3 20 +#define DMIC4 21 +#define DMIC5 22 +#define DMIC6 23 +#define DMIC7 24 +#define DMIC8 25 +#define DMIC9 26 +#define DMIC10 27 + +#endif /* __AUDIO_CODEC_PORT_TYPES_H */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 43690f501712a427c551e3248f02ae7a26ce0038..43072b1fde0c6846eb530153601719642e33aea6 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -80,6 +80,11 @@ ARM_SMCCC_SMC_32, \ 0, 0x8000) +#define ARM_SMCCC_ARCH_WORKAROUND_2 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x7fff) + #ifndef __ASSEMBLY__ #include @@ -293,5 +298,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) +/* Return codes defined in ARM DEN 0070A */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 40090ee2eeb0e77d7250cc4bee2c5f4923c6e698..be2bee81c748e614f18697f9b1f9e4b7e277395b 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -115,6 +115,12 @@ struct bio { struct bio_set *bi_pool; + /* + * When using dircet-io (O_DIRECT), we can't get the inode from a bio + * by walking bio->bi_io_vec->bv_page->mapping->host + * since the page is anon. + */ + struct inode *bi_dio_inode; /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7a138a482f941e0aa63d3ee252198cdfff61b132..64ac8e3c5a40aa9a28139e35ba9f925d608fb7de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1091,8 +1091,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, if (!q->limits.chunk_sectors) return q->limits.max_sectors; - return q->limits.chunk_sectors - - (offset & (q->limits.chunk_sectors - 1)); + return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)))); } static inline unsigned int blk_rq_get_max_sectors(struct request *rq, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 61345e73dae5d0fee48e993e9c32299822529e6c..065d021c44ac136b840d54ffebd68eb6d27d972c 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -841,6 +841,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw, int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req); int __clk_mux_determine_rate_closest(struct clk_hw *hw, struct clk_rate_request *req); +int clk_mux_determine_rate_flags(struct clk_hw *hw, + struct clk_rate_request *req, + unsigned long flags); void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 853929f989625956f529d60eeaa26ded526df031..a704d032713b9d74129e36a7a499c9d100901d8f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define unlikely_notrace(x) __builtin_expect(!!(x), 0) #define __branch_check__(x, expect, is_constant) ({ \ - int ______r; \ + long ______r; \ static struct ftrace_likely_data \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_annotated_branch"))) \ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 3ecdd63e9e0f04e7810fa480f65596f7fea0b928..bfd59289864ab724e64fd067a8e23248fe4cbe97 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -97,6 +97,7 @@ enum cpuhp_state { CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, CPUHP_AP_RCUTREE_DYING, + CPUHP_AP_KMAP_DYING, CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 0c0146e7e274f460ca9e3ea39c77531e6bedf065..59fbe005f2047257bb4185390ab2f3aaf54281bc 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -300,6 +300,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS * instead of the latter), any change to them will be overwritten * by kernel. Returns a negative error code or zero. + * @get_fecparam: Get the network device Forward Error Correction parameters. + * @set_fecparam: Set the network device Forward Error Correction parameters. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 6b359a518effe7c3d73fca564196c8f9470e95aa..4bdd9dd873330aac49d976b06215d861932886af 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -280,7 +280,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, extern const char *extcon_get_edev_name(struct extcon_dev *edev); extern int extcon_blocking_sync(struct extcon_dev *edev, unsigned int id, - bool val); + u8 val); #else /* CONFIG_EXTCON */ static inline int extcon_dev_register(struct extcon_dev *edev) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 63b60955042b707a81e7867936f06477e739380f..b030db4098ee3328ea139c0e6f05a10b5df70066 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3043,6 +3043,8 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } +struct inode *dio_bio_get_inode(struct bio *bio); + extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 952ab97af325ea32b537cf32f32a2afd30648cdd..2a3957bc2221c8b5157d098a4ef1662d9dc1eebb 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -17,6 +17,8 @@ #include #define FS_CRYPTO_BLOCK_SIZE 16 +#define FS_ENCRYPTION_MODE_PRIVATE 127 +#define FS_AES_256_XTS_KEY_SIZE 64 struct fscrypt_ctx; struct fscrypt_info; @@ -42,6 +44,8 @@ struct fscrypt_name { /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 +extern int fs_using_hardware_encryption(struct inode *inode); + #if __FS_HAS_ENCRYPTION #include #else diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5ade8f2a698764986b2d8fb3757ae812a2b1b310..550fa358893ae9555d24dc0dea6791f56cd51224 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -365,7 +365,9 @@ static inline void free_part_stats(struct hd_struct *part) part_stat_add(cpu, gendiskp, field, -subnd) void part_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); + unsigned int inflight[2]); +void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, + unsigned int inflight[2]); void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw); void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h index c8117801ab135dbc05bd037a5f57cdf247b43776..406a6ed7c818dbec51687b702f29ff23fe2aeb46 100644 --- a/include/linux/hdcp_qseecom.h +++ b/include/linux/hdcp_qseecom.h @@ -68,6 +68,7 @@ void hdcp2_deinit(void *ctx); bool hdcp2_feature_supported(void *ctx); int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, struct hdcp2_app_data *app_data); +int hdcp2_force_encryption(void *ctx, uint32_t enable); #else static inline void *hdcp1_init(void) { @@ -113,6 +114,11 @@ static inline int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, { return 0; } + +static inline int hdcp2_force_encryption(void *ctx, uint32_t enable) +{ + return 0; +} #endif #endif /* __HDCP_QSEECOM_H */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index e2f26daff2c1a78afe0f4348096f791153489c4b..4db2f34c4fe5696238886b4abcffa14125495b43 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -42,6 +42,7 @@ void kmap_flush_unused(void); #ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH void kmap_atomic_flush_unused(void); +int kmap_remove_unused_cpu(unsigned int cpu); #else static inline void kmap_atomic_flush_unused(void) { } #endif @@ -92,6 +93,10 @@ static inline void __kunmap_atomic(void *addr) #endif /* CONFIG_HIGHMEM */ +#if !defined(CONFIG_HIGHMEM) || !defined(CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH) +static inline int kmap_remove_unused_cpu(unsigned int cpu) { return 0; } +#endif + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DECLARE_PER_CPU(int, __kmap_atomic_idx); diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 0660a03d37d986c8167dbdb5086acaa96dc17a99..9e25283d6fc9e2f64c148265e8cb102412016d51 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -8,7 +8,7 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { - return !!(vma->vm_flags & VM_HUGETLB); + return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB); } #else diff --git a/include/linux/init.h b/include/linux/init.h index f138e5b918c216473eacd07b4bec8c5dab79980d..d66317911c1ae0116e702fdfc85c0be1861e9d74 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -289,6 +289,8 @@ void __init parse_early_options(char *cmdline); /* Data marked not to be saved by software suspend */ #define __nosavedata __section(.data..nosave) +#define __rticdata __attribute__((section(".bss.rtic"))) + #ifdef MODULE #define __exit_p(x) x #else diff --git a/include/linux/ipa.h b/include/linux/ipa.h index 40a848c24edabff1895dfa546b44cd02dd6274c6..ecdc4e7c0bf69c5701a75abf34b176ae8e4e959b 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -1206,11 +1206,13 @@ int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); */ int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs); +int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only); + int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls); int ipa_commit_hdr(void); -int ipa_reset_hdr(void); +int ipa_reset_hdr(bool user_only); int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup); @@ -1221,7 +1223,8 @@ int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy); /* * Header Processing Context */ -int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs); +int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); @@ -1230,11 +1233,13 @@ int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); */ int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only); + int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); int ipa_commit_rt(enum ipa_ip_type ip); -int ipa_reset_rt(enum ipa_ip_type ip); +int ipa_reset_rt(enum ipa_ip_type ip, bool user_only); int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); @@ -1249,13 +1254,15 @@ int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); */ int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); +int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only); + int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); int ipa_commit_flt(enum ipa_ip_type ip); -int ipa_reset_flt(enum ipa_ip_type ip); +int ipa_reset_flt(enum ipa_ip_type ip, bool user_only); /* * NAT\IPv6CT @@ -1656,6 +1663,12 @@ static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs) return -EPERM; } +static inline int ipa_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls) { return -EPERM; @@ -1666,7 +1679,7 @@ static inline int ipa_commit_hdr(void) return -EPERM; } -static inline int ipa_reset_hdr(void) +static inline int ipa_reset_hdr(bool user_only) { return -EPERM; } @@ -1690,7 +1703,8 @@ static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy) * Header Processing Context */ static inline int ipa_add_hdr_proc_ctx( - struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs) + struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) { return -EPERM; } @@ -1707,6 +1721,12 @@ static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) return -EPERM; } +static inline int ipa_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) { return -EPERM; @@ -1717,7 +1737,7 @@ static inline int ipa_commit_rt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_rt(enum ipa_ip_type ip) +static inline int ipa_reset_rt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } @@ -1750,6 +1770,12 @@ static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) return -EPERM; } +static inline int ipa_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only) +{ + return -EPERM; +} + static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) { return -EPERM; @@ -1765,7 +1791,7 @@ static inline int ipa_commit_flt(enum ipa_ip_type ip) return -EPERM; } -static inline int ipa_reset_flt(enum ipa_ip_type ip) +static inline int ipa_reset_flt(enum ipa_ip_type ip, bool user_only) { return -EPERM; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 39f0489eb137b65620c3ea2ff6de70a44f84e6d7..b81d458ad4fb0d6f8bdd03a3ea58a876e7aec75f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1044,13 +1044,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING -#ifdef CONFIG_S390 -#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... -#elif defined(CONFIG_ARM64) -#define KVM_MAX_IRQ_ROUTES 4096 -#else -#define KVM_MAX_IRQ_ROUTES 1024 -#endif +#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ bool kvm_arch_can_set_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 7161d8e7ee79246ffca220805826f883f26d7ddd..d1431c1bfed3308053f87fbb86fee8400497c7ea 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1475,6 +1475,8 @@ union security_list_options { size_t *len); int (*inode_create)(struct inode *dir, struct dentry *dentry, umode_t mode); + int (*inode_post_create)(struct inode *dir, struct dentry *dentry, + umode_t mode); int (*inode_link)(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int (*inode_unlink)(struct inode *dir, struct dentry *dentry); @@ -1780,6 +1782,7 @@ struct security_hook_heads { struct list_head inode_free_security; struct list_head inode_init_security; struct list_head inode_create; + struct list_head inode_post_create; struct list_head inode_link; struct list_head inode_unlink; struct list_head inode_symlink; diff --git a/include/linux/mdss_io_util.h b/include/linux/mdss_io_util.h index dd0b17cc555d9f50f0ae798f16ef063259b26ba1..ad706baa9d08f40c34ae9b7cb2633a35cd57dfe5 100644 --- a/include/linux/mdss_io_util.h +++ b/include/linux/mdss_io_util.h @@ -28,26 +28,26 @@ #define DEV_WARN(fmt, args...) pr_warn(fmt, ##args) #define DEV_ERR(fmt, args...) pr_err(fmt, ##args) -struct mdss_io_data { +struct dss_io_data { u32 len; void __iomem *base; }; -void mdss_reg_w(struct mdss_io_data *io, u32 offset, u32 value, u32 debug); -u32 mdss_reg_r(struct mdss_io_data *io, u32 offset, u32 debug); -void mdss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug); +void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug); +u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug); +void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug); -#define DSS_REG_W_ND(io, offset, val) mdss_reg_w(io, offset, val, false) -#define DSS_REG_W(io, offset, val) mdss_reg_w(io, offset, val, true) -#define DSS_REG_R_ND(io, offset) mdss_reg_r(io, offset, false) -#define DSS_REG_R(io, offset) mdss_reg_r(io, offset, true) +#define DSS_REG_W_ND(io, offset, val) dss_reg_w(io, offset, val, false) +#define DSS_REG_W(io, offset, val) dss_reg_w(io, offset, val, true) +#define DSS_REG_R_ND(io, offset) dss_reg_r(io, offset, false) +#define DSS_REG_R(io, offset) dss_reg_r(io, offset, true) -enum mdss_vreg_type { +enum dss_vreg_type { DSS_REG_LDO, DSS_REG_VS, }; -enum mdss_vreg_mode { +enum dss_vreg_mode { DSS_REG_MODE_ENABLE, DSS_REG_MODE_DISABLE, DSS_REG_MODE_LP, @@ -55,68 +55,70 @@ enum mdss_vreg_mode { DSS_REG_MODE_MAX, }; -struct mdss_vreg { +struct dss_vreg { struct regulator *vreg; /* vreg handle */ char vreg_name[32]; int min_voltage; int max_voltage; u32 load[DSS_REG_MODE_MAX]; + int enable_load; + int disable_load; int pre_on_sleep; int post_on_sleep; int pre_off_sleep; int post_off_sleep; }; -struct mdss_gpio { +struct dss_gpio { unsigned int gpio; unsigned int value; char gpio_name[32]; }; -enum mdss_clk_type { +enum dss_clk_type { DSS_CLK_AHB, /* no set rate. rate controlled through rpm */ DSS_CLK_PCLK, DSS_CLK_OTHER, }; -struct mdss_clk { +struct dss_clk { struct clk *clk; /* clk handle */ char clk_name[32]; - enum mdss_clk_type type; + enum dss_clk_type type; unsigned long rate; }; -struct mdss_module_power { +struct dss_module_power { unsigned int num_vreg; - struct mdss_vreg *vreg_config; + struct dss_vreg *vreg_config; unsigned int num_gpio; - struct mdss_gpio *gpio_config; + struct dss_gpio *gpio_config; unsigned int num_clk; - struct mdss_clk *clk_config; + struct dss_clk *clk_config; }; -int msm_mdss_ioremap_byname(struct platform_device *pdev, - struct mdss_io_data *io_data, const char *name); -void msm_mdss_iounmap(struct mdss_io_data *io_data); +int msm_dss_ioremap_byname(struct platform_device *pdev, + struct dss_io_data *io_data, const char *name); +void msm_dss_iounmap(struct dss_io_data *io_data); -int msm_mdss_enable_gpio(struct mdss_gpio *in_gpio, int num_gpio, int enable); -int msm_mdss_gpio_enable(struct mdss_gpio *in_gpio, int num_gpio, int enable); +int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable); +int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable); -int msm_mdss_config_vreg(struct device *dev, struct mdss_vreg *in_vreg, +int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg, int num_vreg, int config); -int msm_mdss_enable_vreg(struct mdss_vreg *in_vreg, int num_vreg, int enable); -int msm_mdss_config_vreg_opt_mode(struct mdss_vreg *in_vreg, int num_vreg, - enum mdss_vreg_mode mode); +int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable); +int msm_dss_config_vreg_opt_mode(struct dss_vreg *in_vreg, int num_vreg, + enum dss_vreg_mode mode); -int msm_mdss_get_clk(struct device *dev, struct mdss_clk *clk_arry, +int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk); -void msm_mdss_put_clk(struct mdss_clk *clk_arry, int num_clk); -int msm_mdss_clk_set_rate(struct mdss_clk *clk_arry, int num_clk); -int msm_mdss_enable_clk(struct mdss_clk *clk_arry, int num_clk, int enable); +void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk); +int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk); +int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable); -int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, +int dss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr, uint8_t reg_offset, uint8_t *read_buf); -int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, +int dss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr, uint8_t reg_offset, uint8_t *value); #endif /* __MDSS_IO_UTIL_H__ */ diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 006c253ca45b719798c0b810f495260119beae28..07bd3a99e47f5c4c360fa82870acdb1d26d2fcb1 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -64,10 +64,12 @@ enum MHI_FLAGS { * enum mhi_device_type - Device types * @MHI_XFER_TYPE: Handles data transfer * @MHI_TIMESYNC_TYPE: Use for timesync feature + * @MHI_CONTROLLER_TYPE: Control device */ enum mhi_device_type { MHI_XFER_TYPE, MHI_TIMESYNC_TYPE, + MHI_CONTROLLER_TYPE, }; /** @@ -87,6 +89,7 @@ struct image_info { * @of_node: DT that has MHI configuration information * @regs: Points to base of MHI MMIO register space * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space * @wake_db: MHI WAKE doorbell register address * @dev_id: PCIe device id of the external device * @domain: PCIe domain the device connected to @@ -129,6 +132,7 @@ struct image_info { */ struct mhi_controller { struct list_head node; + struct mhi_device *mhi_dev; /* device node for iommu ops */ struct device *dev; @@ -137,6 +141,7 @@ struct mhi_controller { /* mmio base */ void __iomem *regs; void __iomem *bhi; + void __iomem *bhie; void __iomem *wake_db; /* device topology */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2246cf670badb96e6c11c4d13b233db0c93388f..18c7980d9513f692801a5102cef7f7e6522d6407 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -127,14 +127,14 @@ static inline void __ClearPageMovable(struct page *page) #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, - struct vm_area_struct *vma, int node); + struct vm_fault *vmf, int node); #else static inline bool pmd_trans_migrating(pmd_t pmd) { return false; } static inline int migrate_misplaced_page(struct page *page, - struct vm_area_struct *vma, int node) + struct vm_fault *vmf, int node) { return -EAGAIN; /* can't migrate now */ } diff --git a/include/linux/mm.h b/include/linux/mm.h index d6f98baf9e9633f59b250bbb3ff7a5c51f3899d4..f1b0668551e9da190358c6e35b2ad0aa036c0501 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -292,6 +292,9 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ +/* Speculative fault, not holding mmap_sem */ +#define FAULT_FLAG_SPECULATIVE 0x200 +#define FAULT_FLAG_PREFAULT_OLD 0x400 /* Make faultaround ptes old */ #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ @@ -320,6 +323,10 @@ struct vm_fault { gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + unsigned int sequence; + pmd_t orig_pmd; /* value of PMD at the time of fault */ +#endif pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ pud_t *pud; /* Pointer to pud entry matching @@ -350,6 +357,12 @@ struct vm_fault { * page table to avoid allocation from * atomic context. */ + /* + * These entries are required when handling speculative page fault. + * This way the page handling is done using consistent field values. + */ + unsigned long vma_flags; + pgprot_t vma_page_prot; }; /* page entry size for vm->huge_fault() */ @@ -668,9 +681,9 @@ void free_compound_page(struct page *page); * pte_mkwrite. But get_user_pages can cause write faults for mappings * that do not have writing enabled, when used by access_process_vm. */ -static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags) { - if (likely(vma->vm_flags & VM_WRITE)) + if (likely(vma_flags & VM_WRITE)) pte = pte_mkwrite(pte); return pte; } @@ -1185,6 +1198,7 @@ static inline void clear_page_pfmemalloc(struct page *page) #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ +#define VM_FAULT_PTNOTSAME 0x4000 /* Page table entries have changed */ #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ @@ -1239,9 +1253,30 @@ struct zap_details { pgoff_t last_index; /* Highest page->index to unmap */ }; -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device); -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) +static inline void INIT_VMA(struct vm_area_struct *vma) +{ + INIT_LIST_HEAD(&vma->anon_vma_chain); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqcount_init(&vma->vm_sequence); + atomic_set(&vma->vm_ref_count, 1); +#endif +} + +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags); +static inline struct page *_vm_normal_page(struct vm_area_struct *vma, + unsigned long addr, pte_t pte, + bool with_public_device) +{ + return __vm_normal_page(vma, addr, pte, with_public_device, + vma->vm_flags); +} +static inline struct page *vm_normal_page(struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + return _vm_normal_page(vma, addr, pte, false); +} struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); @@ -1321,6 +1356,47 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, unmap_mapping_range(mapping, holebegin, holelen, 0); } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static inline void vm_write_begin(struct vm_area_struct *vma) +{ + write_seqcount_begin(&vma->vm_sequence); +} +static inline void vm_write_begin_nested(struct vm_area_struct *vma, + int subclass) +{ + write_seqcount_begin_nested(&vma->vm_sequence, subclass); +} +static inline void vm_write_end(struct vm_area_struct *vma) +{ + write_seqcount_end(&vma->vm_sequence); +} +static inline void vm_raw_write_begin(struct vm_area_struct *vma) +{ + raw_write_seqcount_begin(&vma->vm_sequence); +} +static inline void vm_raw_write_end(struct vm_area_struct *vma) +{ + raw_write_seqcount_end(&vma->vm_sequence); +} +#else +static inline void vm_write_begin(struct vm_area_struct *vma) +{ +} +static inline void vm_write_begin_nested(struct vm_area_struct *vma, + int subclass) +{ +} +static inline void vm_write_end(struct vm_area_struct *vma) +{ +} +static inline void vm_raw_write_begin(struct vm_area_struct *vma) +{ +} +static inline void vm_raw_write_end(struct vm_area_struct *vma) +{ +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); @@ -1332,6 +1408,43 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags); + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +extern int __handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags, + struct vm_area_struct **vma); +static inline int handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags, + struct vm_area_struct **vma) +{ + /* + * Try speculative page fault for multithreaded user space task only. + */ + if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) { + *vma = NULL; + return VM_FAULT_RETRY; + } + return __handle_speculative_fault(mm, address, flags, vma); +} +extern bool can_reuse_spf_vma(struct vm_area_struct *vma, + unsigned long address); +#else +static inline int handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags, + struct vm_area_struct **vma) +{ + return VM_FAULT_RETRY; +} +static inline bool can_reuse_spf_vma(struct vm_area_struct *vma, + unsigned long address) +{ + return false; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); @@ -2088,16 +2201,29 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand); + struct vm_area_struct *expand, bool keep_locked); static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) { - return __vma_adjust(vma, start, end, pgoff, insert, NULL); + return __vma_adjust(vma, start, end, pgoff, insert, NULL, false); } -extern struct vm_area_struct *vma_merge(struct mm_struct *, + +extern struct vm_area_struct *__vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, - unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, - struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *); + unsigned long vm_flags, struct anon_vma *anon, struct file *file, + pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff, + const char __user *user, bool keep_locked); + +static inline struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, unsigned long end, + unsigned long vm_flags, struct anon_vma *anon, struct file *file, + pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff, + const char __user *user) +{ + return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off, + pol, uff, user, false); +} + extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int __split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); @@ -2621,5 +2747,21 @@ void __init setup_nr_node_ids(void); static inline void setup_nr_node_ids(void) {} #endif +extern int want_old_faultaround_pte; + +#ifdef CONFIG_PROCESS_RECLAIM +struct reclaim_param { + struct vm_area_struct *vma; + /* Number of pages scanned */ + int nr_scanned; + /* max pages to reclaim */ + int nr_to_reclaim; + /* pages reclaimed */ + int nr_reclaimed; +}; +extern struct reclaim_param reclaim_task_anon(struct task_struct *task, + int nr_to_reclaim); +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 561c23ebf4d7c4e54e2e84888ca89af44aebe51f..121a5796bbe4c9b4672e43dc46c48a3c31988cc6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -344,6 +344,10 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqcount_t vm_sequence; + atomic_t vm_ref_count; /* see vma_get(), vma_put() */ +#endif } __randomize_layout; struct core_thread { @@ -361,6 +365,9 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + rwlock_t mm_rb_lock; +#endif u32 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f679f526846756b19ebef509a2d58676d8c8f97b..da6bb5bb3658fbeb3fc6d8a91006a8724b6028bb 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -66,6 +66,13 @@ enum migratetype { /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ extern char * const migratetype_names[MIGRATE_TYPES]; +/* + * Returns a list which contains the migrate types on to which + * an allocation falls back when the free list for the migrate + * type mtype is depleted. + * The end of the list is delimited by the type MIGRATE_TYPES. + */ +extern int *get_migratetype_fallbacks(int mtype); #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 76b4958d078f3273b0d7789bb915998d455cac41..a08bfd52b36079b54c66018b1487f0c683fdbb6c 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -705,6 +705,8 @@ struct fsl_mc_device_id { const char obj_type[16]; }; +#define MHI_NAME_SIZE 32 + /** * struct mhi_device_id - MHI device identification * @chan: MHI channel name @@ -712,7 +714,7 @@ struct fsl_mc_device_id { */ struct mhi_device_id { - const char *chan; + const char chan[MHI_NAME_SIZE]; kernel_ulong_t driver_data; }; diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index c41fe65c5d9b7daf126b84a833d787ab37a880e4..32848f9abad58c91e30c4edbe11a22c4a6bbea5d 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -12,6 +12,7 @@ #ifndef MSM_GSI_H #define MSM_GSI_H #include +#include enum gsi_ver { GSI_VER_ERR = 0, @@ -84,6 +85,9 @@ enum gsi_intr_type { * @irq: IRQ number * @phys_addr: physical address of GSI block * @size: register size of GSI block + * @emulator_intcntrlr_addr: the location of emulator's interrupt control block + * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr + * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits * @mhi_er_id_limits: MHI event ring start and end ids * @notify_cb: general notification callback @@ -109,6 +113,9 @@ struct gsi_per_props { unsigned int irq; phys_addr_t phys_addr; unsigned long size; + phys_addr_t emulator_intcntrlr_addr; + unsigned long emulator_intcntrlr_size; + irq_handler_t emulator_intcntrlr_client_isr; bool mhi_er_id_limits_valid; uint32_t mhi_er_id_limits[2]; void (*notify_cb)(struct gsi_per_notify *notify); @@ -221,9 +228,19 @@ enum gsi_max_prefetch { GSI_TWO_PREFETCH_SEG = 0x1 }; +/** + * @GSI_USE_PREFETCH_BUFS: Channel will use normal prefetch buffers if possible + * @GSI_ESCAPE_BUF_ONLY: Channel will always use escape buffers only + * @GSI_SMART_PRE_FETCH: Channel will work in smart prefetch mode. + * relevant starting GSI 2.5 + * @GSI_FREE_PRE_FETCH: Channel will work in free prefetch mode. + * relevant starting GSI 2.5 + */ enum gsi_prefetch_mode { GSI_USE_PREFETCH_BUFS = 0x0, - GSI_ESCAPE_BUF_ONLY = 0x1 + GSI_ESCAPE_BUF_ONLY = 0x1, + GSI_SMART_PRE_FETCH = 0x2, + GSI_FREE_PRE_FETCH = 0x3, }; enum gsi_chan_evt { @@ -315,6 +332,12 @@ enum gsi_chan_use_db_eng { * @max_prefetch: limit number of pre-fetch segments for channel * @low_weight: low channel weight (priority of channel for RE engine * round robin algorithm); must be >= 1 + * @empty_lvl_threshold: + * The thershold number of free entries available in the + * receiving fifos of GSI-peripheral. If Smart PF mode + * is used, REE will fetch/send new TRE to peripheral only + * if peripheral's empty_level_count is higher than + * EMPTY_LVL_THRSHOLD defined for this channel * @xfer_cb: transfer notification callback, this callback happens * on event boundaries * @@ -365,6 +388,7 @@ struct gsi_chan_props { enum gsi_max_prefetch max_prefetch; uint8_t low_weight; enum gsi_prefetch_mode prefetch_mode; + uint8_t empty_lvl_threshold; void (*xfer_cb)(struct gsi_chan_xfer_notify *notify); void (*err_cb)(struct gsi_chan_err_notify *notify); void *chan_user_data; @@ -440,6 +464,8 @@ struct gsi_xfer_elem { * gsi_gpi_channel_scratch - GPI protocol SW config area of * channel scratch * + * @dl_nlo_channel: Whether this is DL NLO Channel or not? Relevant for + * GSI 2.5 and above where DL NLO introduced. * @max_outstanding_tre: Used for the prefetch management sequence by the * sequencer. Defines the maximum number of allowed * outstanding TREs in IPA/GSI (in Bytes). RE engine @@ -449,18 +475,23 @@ struct gsi_xfer_elem { * the feature in doorbell mode (DB Mode=1). Maximum * outstanding TREs should be set to 64KB * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. * @outstanding_threshold: Used for the prefetch management sequence by the * sequencer. Defines the threshold (in Bytes) as to when * to update the channel doorbell. Should be smaller than * Maximum outstanding TREs. value. It is suggested to * configure this value to 2 * element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. */ struct __packed gsi_gpi_channel_scratch { - uint64_t resvd1; + uint64_t dl_nlo_channel:1; /* Relevant starting GSI 2.5 */ + uint64_t resvd1:63; uint32_t resvd2:16; - uint32_t max_outstanding_tre:16; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ uint32_t resvd3:16; - uint32_t outstanding_threshold:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ }; /** @@ -500,12 +531,16 @@ struct __packed gsi_gpi_channel_scratch { * To disable the feature in doorbell mode (DB Mode=1). * Maximum outstanding TREs should be set to 64KB * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. * @outstanding_threshold: Used for the prefetch management sequence by the * sequencer. Defines the threshold (in Bytes) as to when * to update the channel doorbell. Should be smaller than * Maximum outstanding TREs. value. It is suggested to * configure this value to min(TLV_FIFO_SIZE/2,8) * * element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. */ struct __packed gsi_mhi_channel_scratch { uint64_t mhi_host_wp_addr; @@ -516,9 +551,9 @@ struct __packed gsi_mhi_channel_scratch { uint32_t polling_mode:1; uint32_t oob_mod_threshold:5; uint32_t resvd2:2; - uint32_t max_outstanding_tre:16; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ uint32_t resvd3:16; - uint32_t outstanding_threshold:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ }; /** @@ -543,6 +578,8 @@ struct __packed gsi_mhi_channel_scratch { * To disable the feature in doorbell mode (DB Mode=1) * Maximum outstanding TREs should be set to 64KB * (or any value larger or equal to ring length . RLEN) + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. * @depcmd_hi_addr: Used to generate "Update Transfer" command * @outstanding_threshold: Used for the prefetch management sequence by the * sequencer. Defines the threshold (in Bytes) as to when @@ -550,6 +587,8 @@ struct __packed gsi_mhi_channel_scratch { * Maximum outstanding TREs. value. It is suggested to * configure this value to 2 * element size. for MBIM the * suggested configuration is the element size. + * The field is irrelevant starting GSI 2.5 where smart + * prefetch implemented by the H/W. */ struct __packed gsi_xdci_channel_scratch { uint32_t last_trb_addr:16; @@ -559,9 +598,9 @@ struct __packed gsi_xdci_channel_scratch { uint32_t depcmd_low_addr; uint32_t depcmd_hi_addr:8; uint32_t resvd2:8; - uint32_t max_outstanding_tre:16; + uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */ uint32_t resvd3:16; - uint32_t outstanding_threshold:16; + uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */ }; /** @@ -848,6 +887,34 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, int gsi_write_channel_scratch(unsigned long chan_hdl, union __packed gsi_channel_scratch val); +/** + * gsi_read_channel_scratch - Peripheral should call this function to + * read to the scratch area of the channel context + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @val: Read value + * + * @Return gsi_status + */ +int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val); + +/** + * gsi_update_mhi_channel_scratch - MHI Peripheral should call this + * function to update the scratch area of the channel context. Updating + * will be by read-modify-write method, so non SWI fields will not be + * affected + * + * @chan_hdl: Client handle previously obtained from + * gsi_alloc_channel + * @mscr: MHI Channel Scratch value + * + * @Return gsi_status + */ +int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr); + /** * gsi_start_channel - Peripheral should call this function to * start a channel i.e put into running state @@ -1069,11 +1136,12 @@ int gsi_start_xfer(unsigned long chan_hdl); * @gsi_base_addr: Base address of GSI register space * @gsi_size: Mapping size of the GSI register space * @per_base_addr: Base address of the peripheral using GSI + * @ver: GSI core version * * @Return gsi_status */ int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, - phys_addr_t per_base_addr); + phys_addr_t per_base_addr, enum gsi_ver ver); /** * gsi_enable_fw - Peripheral should call this function @@ -1094,11 +1162,12 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver); * * @base_offset:[OUT] - IRAM base offset address * @size: [OUT] - IRAM size + * @ver: GSI core version * @Return none */ void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, - unsigned long *size); + unsigned long *size, enum gsi_ver ver); /** * gsi_halt_channel_ee - Peripheral should call this function @@ -1125,6 +1194,7 @@ int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code); * gsi_alloc_channel (for as many channels as needed; channels can have * no event ring, an exclusive event ring or a shared event ring) * gsi_write_channel_scratch + * gsi_read_channel_scratch * gsi_start_channel * gsi_queue_xfer/gsi_start_xfer * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on @@ -1209,6 +1279,18 @@ static inline int gsi_write_channel_scratch(unsigned long chan_hdl, return -GSI_STATUS_UNSUPPORTED_OP; } +static inline int gsi_read_channel_scratch(unsigned long chan_hdl, + union __packed gsi_channel_scratch *val) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + +static inline int gsi_update_mhi_channel_scratch(unsigned long chan_hdl, + struct __packed gsi_mhi_channel_scratch mscr) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + static inline int gsi_start_channel(unsigned long chan_hdl) { return -GSI_STATUS_UNSUPPORTED_OP; @@ -1308,18 +1390,19 @@ static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, } static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size, - phys_addr_t per_base_addr) + phys_addr_t per_base_addr, enum gsi_ver ver) { return -GSI_STATUS_UNSUPPORTED_OP; } -static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size) +static inline int gsi_enable_fw( + phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver) { return -GSI_STATUS_UNSUPPORTED_OP; } static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset, - unsigned long *size) + unsigned long *size, enum gsi_ver ver) { } diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index b5b43f94f311626ee364157515c4342aba976e6f..01b990e4b228a90ef26bc302d8b8476293a81869 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd); ({ \ int i, ret = 1; \ for (i = 0; i < map_words(map); i++) { \ - if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ + if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ ret = 0; \ break; \ } \ diff --git a/include/linux/of.h b/include/linux/of.h index 13eb8bc92c1096accd59c2b28e37f465617d9ec2..5081debc56dfdcfc5c7827b2fab5734f4069dc71 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -39,7 +39,9 @@ struct property { struct property *next; unsigned long _flags; unsigned int unique_id; +#if defined(CONFIG_OF_KOBJ) struct bin_attribute attr; +#endif }; #if defined(CONFIG_SPARC) @@ -58,7 +60,9 @@ struct device_node { struct device_node *parent; struct device_node *child; struct device_node *sibling; +#if defined(CONFIG_OF_KOBJ) struct kobject kobj; +#endif unsigned long _flags; void *data; #if defined(CONFIG_SPARC) @@ -103,21 +107,17 @@ extern struct kobj_type of_node_ktype; extern const struct fwnode_operations of_fwnode_ops; static inline void of_node_init(struct device_node *node) { +#if defined(CONFIG_OF_KOBJ) kobject_init(&node->kobj, &of_node_ktype); +#endif node->fwnode.ops = &of_fwnode_ops; } -/* true when node is initialized */ -static inline int of_node_is_initialized(struct device_node *node) -{ - return node && node->kobj.state_initialized; -} - -/* true when node is attached (i.e. present on sysfs) */ -static inline int of_node_is_attached(struct device_node *node) -{ - return node && node->kobj.state_in_sysfs; -} +#if defined(CONFIG_OF_KOBJ) +#define of_node_kobj(n) (&(n)->kobj) +#else +#define of_node_kobj(n) NULL +#endif #ifdef CONFIG_OF_DYNAMIC extern struct device_node *of_node_get(struct device_node *node); diff --git a/include/linux/oom.h b/include/linux/oom.h index efce1a078d88ae8758e33e4cfebfdd5082c35247..395ac25d0cfcab8e5ab7e2925176a8bcc8575046 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -116,6 +116,8 @@ extern struct task_struct *find_lock_task_mm(struct task_struct *p); extern void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask); +extern void wake_oom_reaper(struct task_struct *tsk); + /* sysctls */ extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 9369e9cea2955f282086b41425595bc4dcee3a69..51a9a0af32812dd645ec3e4569f66baf04eed7ce 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -452,8 +452,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, pgoff_t pgoff; if (unlikely(is_vm_hugetlb_page(vma))) return linear_hugepage_index(vma, address); - pgoff = (address - vma->vm_start) >> PAGE_SHIFT; - pgoff += vma->vm_pgoff; + pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT; + pgoff += READ_ONCE(vma->vm_pgoff); return pgoff; } diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index b1f37a89e368683233499bc5942b43e353b637a3..79b99d653e030d113e4401fc26c7b47e81dcff8c 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, lock_release(&sem->rw_sem.dep_map, 1, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) - sem->rw_sem.owner = NULL; + sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN; #endif } @@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + if (!read) + sem->rw_sem.owner = current; +#endif } #endif diff --git a/include/linux/pfk.h b/include/linux/pfk.h new file mode 100644 index 0000000000000000000000000000000000000000..3c7a389fd4d4e2e06822fd7fbee330d868b7548e --- /dev/null +++ b/include/linux/pfk.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_H_ +#define PFK_H_ + +#include + +struct ice_crypto_setting; + +#ifdef CONFIG_PFK + +int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, bool *is_pfe, bool); +int pfk_load_key_end(const struct bio *bio, bool *is_pfe); +int pfk_remove_key(const unsigned char *key, size_t key_size); +bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2); +void pfk_clear_on_reset(void); + +#else +static inline int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async) +{ + return -ENODEV; +} + +static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe) +{ + return -ENODEV; +} + +static inline int pfk_remove_key(const unsigned char *key, size_t key_size) +{ + return -ENODEV; +} + +static inline bool pfk_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2) +{ + return true; +} + +static inline void pfk_clear_on_reset(void) +{} + +#endif /* CONFIG_PFK */ + +#endif /* PFK_H */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index a4fe711aea33d521f6b5ba72d43fb58b58a01900..4ed565058a7f10e2ca32e3127cd76f361e1d46ba 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -295,6 +295,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_RECHARGE_SOC, POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED, POWER_SUPPLY_PROP_SMB_EN_MODE, + POWER_SUPPLY_PROP_ESR_ACTUAL, + POWER_SUPPLY_PROP_ESR_NOMINAL, + POWER_SUPPLY_PROP_SOH, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 8ff402f20cdd438085a81f72ea4b076f138a9379..b8dd63a2614c1fc00712cab26eb261e6690f349a 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -154,6 +154,7 @@ struct se_geni_rsc { /* FW_REVISION_RO fields */ #define FW_REV_PROTOCOL_MSK (GENMASK(15, 8)) #define FW_REV_PROTOCOL_SHFT (8) +#define FW_REV_VERSION_MSK (GENMASK(7, 0)) /* GENI_CLK_SEL fields */ #define CLK_SEL_MSK (GENMASK(2, 0)) @@ -404,6 +405,22 @@ void geni_write_reg(unsigned int value, void __iomem *base, int offset); */ int get_se_proto(void __iomem *base); +/** + * get_se_m_fw() - Read the Firmware ver for the Main seqeuncer engine + * @base: Base address of the serial engine's register block. + * + * Return: Firmware version for the Main seqeuncer engine + */ +int get_se_m_fw(void __iomem *base); + +/** + * get_se_s_fw() - Read the Firmware ver for the Secondry seqeuncer engine + * @base: Base address of the serial engine's register block. + * + * Return: Firmware version for the Secondry seqeuncer engine + */ +int get_se_s_fw(void __iomem *base); + /** * geni_se_init() - Initialize the GENI Serial Engine * @base: Base address of the serial engine's register block. diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h index f998a8c8121436ceb31b006a1585726d84f0d8f5..8ab6e3336a5bd3891e026e6531c01bb674c9bf44 100644 --- a/include/linux/qpnp/qpnp-revid.h +++ b/include/linux/qpnp/qpnp-revid.h @@ -186,6 +186,9 @@ #define PM8150L_SUBTYPE 0x1F #define PM8150B_SUBTYPE 0x20 +#define PM6150_SUBTYPE 0x28 +#define PM6150L_SUBTYPE 0x1F + /* PMI632 */ #define PMI632_SUBTYPE 0x25 @@ -255,6 +258,21 @@ #define PM8150L_V3P0_REV3 0x00 #define PM8150L_V3P0_REV4 0x03 +#define PM6150_V1P0_REV1 0x00 +#define PM6150_V1P0_REV2 0x00 +#define PM6150_V1P0_REV3 0x00 +#define PM6150_V1P0_REV4 0x01 + +#define PM6150_V1P1_REV1 0x00 +#define PM6150_V1P1_REV2 0x00 +#define PM6150_V1P1_REV3 0x01 +#define PM6150_V1P1_REV4 0x01 + +#define PM6150_V2P0_REV1 0x00 +#define PM6150_V2P0_REV2 0x00 +#define PM6150_V2P0_REV3 0x00 +#define PM6150_V2P0_REV4 0x02 + /* PMI8998 FAB_ID */ #define PMI8998_FAB_ID_SMIC 0x11 #define PMI8998_FAB_ID_GF 0x30 diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df75342b307cb40b809049f0ed64cb..7268a54b895669f69828f72685ce099edc39f9b2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -12,6 +12,11 @@ #include #include +extern int isolate_lru_page(struct page *page); +extern void putback_lru_page(struct page *page); +extern unsigned long reclaim_pages_from_list(struct list_head *page_list, + struct vm_area_struct *vma); + /* * The anon_vma heads a list of private "related" vmas, to scan if * an anonymous page pointing to this anon_vma needs to be unmapped: @@ -174,8 +179,16 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, + unsigned long address, bool compound); +static inline void page_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, + unsigned long address, bool compound) +{ + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); + __page_add_new_anon_rmap(page, vma, address, compound); +} + void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); @@ -195,7 +208,8 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *, enum ttu_flags flags); +bool try_to_unmap(struct page *page, enum ttu_flags flags, + struct vm_area_struct *vma); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) @@ -261,6 +275,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); */ struct rmap_walk_control { void *arg; + struct vm_area_struct *target_vma; /* * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. @@ -289,7 +304,7 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs) false +#define try_to_unmap(page, refs, vma) false static inline int page_mkclean(struct page *page) { diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index dfa34d8034399c80eef05c707e36f839510dec92..c427ffaa49048c851689a6f09cfc9dfe5061bee5 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -44,6 +44,12 @@ struct rw_semaphore { #endif }; +/* + * Setting bit 0 of the owner field with other non-zero bits will indicate + * that the rwsem is writer-owned with an unknown owner. + */ +#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) + extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); diff --git a/include/linux/sched.h b/include/linux/sched.h index 2790c40436c02ec509f4bb3dba3d3f5e2a0e065b..8814051c5a8e24f4b52ec18442fa19f1b5da0ec5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -113,17 +113,36 @@ struct task_group; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP +/* + * Special states are those that do not use the normal wait-loop pattern. See + * the comment with set_special_state(). + */ +#define is_special_task_state(state) \ + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) + #define __set_current_state(state_value) \ do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ current->state = (state_value); \ } while (0) + #define set_current_state(state_value) \ do { \ + WARN_ON_ONCE(is_special_task_state(state_value));\ current->task_state_change = _THIS_IP_; \ smp_store_mb(current->state, (state_value)); \ } while (0) +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + WARN_ON_ONCE(!is_special_task_state(state_value)); \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) #else /* * set_current_state() includes a barrier so that the write of current->state @@ -145,8 +164,8 @@ struct task_group; * * The above is typically ordered against the wakeup, which does: * - * need_sleep = false; - * wake_up_state(p, TASK_UNINTERRUPTIBLE); + * need_sleep = false; + * wake_up_state(p, TASK_UNINTERRUPTIBLE); * * Where wake_up_state() (and all other wakeup primitives) imply enough * barriers to order the store of the variable against wakeup. @@ -155,12 +174,33 @@ struct task_group; * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * - * This is obviously fine, since they both store the exact same value. + * However, with slightly different timing the wakeup TASK_RUNNING store can + * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not + * a problem either because that will result in one extra go around the loop + * and our @cond test will save the day. * * Also see the comments of try_to_wake_up(). */ -#define __set_current_state(state_value) do { current->state = (state_value); } while (0) -#define set_current_state(state_value) smp_store_mb(current->state, (state_value)) +#define __set_current_state(state_value) \ + current->state = (state_value) + +#define set_current_state(state_value) \ + smp_store_mb(current->state, (state_value)) + +/* + * set_special_state() should be used for those states when the blocking task + * can not use the regular condition based wait-loop. In that case we must + * serialize against wakeups such that any possible in-flight TASK_RUNNING stores + * will not collide with our state change. + */ +#define set_special_state(state_value) \ + do { \ + unsigned long flags; /* may shadow */ \ + raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + current->state = (state_value); \ + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ + } while (0) + #endif /* Task command name length: */ @@ -1530,6 +1570,8 @@ static inline bool is_percpu_thread(void) #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ +#define PFA_LMK_WAITING 5 /* Lowmemorykiller is waiting */ + #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1561,6 +1603,9 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_TEST(LMK_WAITING, lmk_waiting) +TASK_PFA_SET(LMK_WAITING, lmk_waiting) + static inline void current_restore_flags(unsigned long orig_flags, unsigned long flags) { diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3d49b91b674d75640d9eebdb0bff240434e2a6dd..c0ae949102773b3e358db5ce90f607847db76b02 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -84,7 +84,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm) } /* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); +extern int mmput(struct mm_struct *mm); #ifdef CONFIG_MMU /* same as above but performs the slow path from the async context. Can * be called from the atomic context as well diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0aa4548fb49293b858ded19f056112cdefded7be..fbf86ecd149d3826f519964dd61e6ef15efb2450 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void) { spin_lock_irq(¤t->sighand->siglock); if (current->jobctl & JOBCTL_STOP_DEQUEUED) - __set_current_state(TASK_STOPPED); + set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); schedule(); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 2df98acfeb5aed188e14024a4eaa436b58e83bbd..2dd27ce2840d98337ff000c00bc74b4c8c77dd5f 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -40,6 +40,7 @@ extern unsigned int sysctl_sched_boost; extern unsigned int sysctl_sched_group_upmigrate_pct; extern unsigned int sysctl_sched_group_downmigrate_pct; extern unsigned int sysctl_sched_walt_rotate_big_tasks; +extern unsigned int sysctl_sched_min_task_util_for_boost_colocation; extern int walt_proc_update_handler(struct ctl_table *table, int write, diff --git a/include/linux/security.h b/include/linux/security.h index 73f1ef625d40c900430778fab29f8bad6cd2e029..30fb23a4ca81ff5717da79e2f8a8ca5e513b4dcb 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -31,6 +31,7 @@ #include #include #include +#include struct linux_binprm; struct cred; @@ -270,6 +271,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const char **name, void **value, size_t *len); int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); +int security_inode_post_create(struct inode *dir, struct dentry *dentry, + umode_t mode); int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); @@ -664,6 +667,13 @@ static inline int security_inode_create(struct inode *dir, return 0; } +static inline int security_inode_post_create(struct inode *dir, + struct dentry *dentry, + umode_t mode) +{ + return 0; +} + static inline int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) diff --git a/include/linux/slimbus/slimbus.h b/include/linux/slimbus/slimbus.h index 60f0c33fb225f6ff6dd032dabb4fc4353d8524cc..2983e79487814caf52bc40887d6496655ca6bda1 100644 --- a/include/linux/slimbus/slimbus.h +++ b/include/linux/slimbus/slimbus.h @@ -684,6 +684,7 @@ struct slim_pending_ch { * first time it has reported present. * @dev_list: List of devices on a controller * @wd: Work structure associated with workqueue for presence notification + * @device_reset: Work structure for device reset notification * @sldev_reconf: Mutex to protect the pending data-channel lists. * @pending_msgsl: Message bandwidth reservation request by this client in * slots that's pending reconfiguration. @@ -706,6 +707,7 @@ struct slim_device { bool notified; struct list_head dev_list; struct work_struct wd; + struct work_struct device_reset; struct mutex sldev_reconf; u32 pending_msgsl; u32 cur_msgsl; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 39fa09bcde23f26a6e12162eb79b389b4c856709..2038ab5316161a1bcf8cb40355086b08c6392454 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -151,8 +151,12 @@ struct kmem_cache { #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS +void sysfs_slab_unlink(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *); #else +static inline void sysfs_slab_unlink(struct kmem_cache *s) +{ +} static inline void sysfs_slab_release(struct kmem_cache *s) { } diff --git a/include/linux/soc/qcom/cdsprm.h b/include/linux/soc/qcom/cdsprm.h new file mode 100644 index 0000000000000000000000000000000000000000..828446961bcfea1b1cec15c6836b403338ce41df --- /dev/null +++ b/include/linux/soc/qcom/cdsprm.h @@ -0,0 +1,52 @@ +/* + * cdsprm.h + * + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This header is for cdspl3 devfreq governor in drivers/devfreq. + */ + +#ifndef __QCOM_CDSPRM_H__ +#define __QCOM_CDSPRM_H__ + +/** + * struct cdsprm_l3 - register with set L3 clock frequency method + * @set_l3_freq: Sets desired L3 clock frequency in kilo-hertz. + * cdsprm module would call this method to set L3 + * clock frequency as requested by CDSP subsystem. + */ +struct cdsprm_l3 { + int (*set_l3_freq)(unsigned int freq_khz); +}; + +/** + * cdsprm_register_cdspl3gov() - Register a method to set L3 clock + * frequency + * @arg: cdsprm_l3 structure with set L3 clock frequency method + * + * Note: To be called from cdspl3 governor only. Called when the governor is + * started. + */ +void cdsprm_register_cdspl3gov(struct cdsprm_l3 *arg); + +/** + * cdsprm_unregister_cdspl3gov() - Unregister the method to set L3 clock + * frequency + * + * Note: To be called from cdspl3 governor only. Called when the governor is + * stopped + */ +void cdsprm_unregister_cdspl3gov(void); + +#endif diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h index e8f0f852968f1326edb7b261708d5a5cee49f030..c0c5c5b73dc0b0949043881e247a90aff5cf8a03 100644 --- a/include/linux/stringhash.h +++ b/include/linux/stringhash.h @@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash) * losing bits). This also has the property (wanted by the dcache) * that the msbits make a good hash table index. */ -static inline unsigned long end_name_hash(unsigned long hash) +static inline unsigned int end_name_hash(unsigned long hash) { - return __hash_32((unsigned int)hash); + return hash_long(hash, 32); } /* diff --git a/include/linux/swap.h b/include/linux/swap.h index 4d128336ed6835dd95dea06e653271dca5323b87..827aa3d26100b1a483dab394b5592650569990b3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -332,8 +332,14 @@ extern void swap_setup(void); extern void add_page_to_unevictable_list(struct page *page); -extern void lru_cache_add_active_or_unevictable(struct page *page, - struct vm_area_struct *vma); +extern void __lru_cache_add_active_or_unevictable(struct page *page, + unsigned long vma_flags); + +static inline void lru_cache_add_active_or_unevictable(struct page *page, + struct vm_area_struct *vma) +{ + return __lru_cache_add_active_or_unevictable(page, vma->vm_flags); +} /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 8c71874e84852b5cefb9b9bf0a06a877457eaf86..c21a1f170768714627f4509951d93719ac76e8d3 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -43,6 +43,7 @@ struct sysrq_key_op { * are available -- else NULL's). */ +bool sysrq_on(void); void handle_sysrq(int key); void __handle_sysrq(int key, bool check_mask); int register_sysrq_key(int key, struct sysrq_key_op *op); diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h index 974c3796a23fc70b23e5873e1c5151cca6a7acd4..5d9b02a9042164c0681b1b46c4fe211ab1c15b39 100644 --- a/include/linux/usb/msm_hsusb_hw.h +++ b/include/linux/usb/msm_hsusb_hw.h @@ -64,6 +64,7 @@ #define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ #define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ #define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */ +#define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */ /* OTG definitions */ #define OTGSC_INTSTS_MASK (0x7f << 16) @@ -74,4 +75,29 @@ #define OTGSC_IDIE (1 << 24) #define OTGSC_BSVIE (1 << 27) +/* USB PHY CSR registers and bit definitions */ + +#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078) +#define SIDDQ BIT(2) + +#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C) +#define ID_HV_CLAMP_EN_N BIT(1) + +#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094) +#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2) + +#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0) +#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C) + +#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC) +#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0) + +#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8) + +#define USB_PHY_IDDIG_1_0 BIT(7) + +#define USB_PHY_IDDIG_RISE_MASK BIT(0) +#define USB_PHY_IDDIG_FALL_MASK BIT(1) +#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK) + #endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index f144216febc642fd70512df9dddefe1a7f119478..9397628a196714dc2177552465fe91fd18b9627d 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, struct virtio_net_hdr *hdr, bool little_endian, - bool has_data_valid) + bool has_data_valid, + int vlan_hlen) { memset(hdr, 0, sizeof(*hdr)); /* no info leak */ @@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - if (skb_vlan_tag_present(skb)) - hdr->csum_start = __cpu_to_virtio16(little_endian, - skb_checksum_start_offset(skb) + VLAN_HLEN); - else - hdr->csum_start = __cpu_to_virtio16(little_endian, - skb_checksum_start_offset(skb)); + hdr->csum_start = __cpu_to_virtio16(little_endian, + skb_checksum_start_offset(skb) + vlan_hlen); hdr->csum_offset = __cpu_to_virtio16(little_endian, skb->csum_offset); } else if (has_data_valid && diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index d3a8dbee8038c49a110c380d705c0dc82588917c..ffd92cc5a03964ffc22c721f08d807e734537648 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -110,6 +110,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, +#endif +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + SPECULATIVE_PGFAULT, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 61e6fddfb26fd4c3d6870fd4ee8d451c5e10144c..de86c6b946c7c7af83dda5c22023039499b7ec68 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -16,6 +16,7 @@ struct vmpressure { unsigned long tree_scanned; unsigned long tree_reclaimed; + unsigned long stall; /* The lock is used to keep the scanned/reclaimed above in sync. */ struct spinlock sr_lock; @@ -29,11 +30,13 @@ struct vmpressure { struct mem_cgroup; -#ifdef CONFIG_MEMCG +extern int vmpressure_notifier_register(struct notifier_block *nb); +extern int vmpressure_notifier_unregister(struct notifier_block *nb); extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); +#ifdef CONFIG_MEMCG extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); @@ -44,9 +47,9 @@ extern int vmpressure_register_event(struct mem_cgroup *memcg, extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else -static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, - unsigned long scanned, unsigned long reclaimed) {} -static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, - int prio) {} +static inline struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) +{ + return NULL; +} #endif /* CONFIG_MEMCG */ #endif /* __LINUX_VMPRESSURE_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index dd97d8b7e852d0528a3f6e135794d8c5bafdfd5a..8433c14bd8edc95f84edb72c99aa3ab6ae055cf8 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -26,6 +26,11 @@ /* Indicate backport support for processing user cell base hint */ #define CFG80211_USER_HINT_CELL_BASE_SELF_MANAGED 1 +/* Backport support for DFS offload */ +#define CFG80211_DFS_OFFLOAD_BACKPORT 1 + +/* Indicate backport support for external authentication*/ +#define CFG80211_EXTERNAL_AUTH_SUPPORT 1 /** * DOC: Introduction @@ -1778,6 +1783,8 @@ enum cfg80211_signal_type { * by %parent_bssid. * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to * the BSS that requested the scan in which the beacon/probe was received. + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; @@ -1786,6 +1793,8 @@ struct cfg80211_inform_bss { u64 boottime_ns; u64 parent_tsf; u8 parent_bssid[ETH_ALEN] __aligned(2); + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; }; /** @@ -1829,6 +1838,8 @@ struct cfg80211_bss_ies { * that holds the beacon data. @beacon_ies is still valid, of course, and * points to the same data as hidden_beacon_bss->beacon_ies in that case. * @signal: signal strength value (type depends on the wiphy's signal_type) + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { @@ -1847,6 +1858,8 @@ struct cfg80211_bss { u16 capability; u8 bssid[ETH_ALEN]; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 priv[0] __aligned(sizeof(void *)); }; @@ -1900,11 +1913,16 @@ struct cfg80211_auth_request { * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) * @ASSOC_REQ_DISABLE_VHT: Disable VHT * @ASSOC_REQ_USE_RRM: Declare RRM capability in this association + * @CONNECT_REQ_EXTERNAL_AUTH_SUPPORT: User space indicates external + * authentication capability. Drivers can offload authentication to + * userspace if this flag is set. Only applicable for cfg80211_connect() + * request (connect callback). */ enum cfg80211_assoc_req_flags { - ASSOC_REQ_DISABLE_HT = BIT(0), - ASSOC_REQ_DISABLE_VHT = BIT(1), - ASSOC_REQ_USE_RRM = BIT(2), + ASSOC_REQ_DISABLE_HT = BIT(0), + ASSOC_REQ_DISABLE_VHT = BIT(1), + ASSOC_REQ_USE_RRM = BIT(2), + CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), }; /** @@ -2590,6 +2608,33 @@ struct cfg80211_pmk_conf { const u8 *pmk_r0_name; }; +/** + * struct cfg80211_external_auth_params - Trigger External authentication. + * + * Commonly used across the external auth request and event interfaces. + * + * @action: action type / trigger for external authentication. Only significant + * for the authentication request event interface (driver to user space). + * @bssid: BSSID of the peer with which the authentication has + * to happen. Used by both the authentication request event and + * authentication response command interface. + * @ssid: SSID of the AP. Used by both the authentication request event and + * authentication response command interface. + * @key_mgmt_suite: AKM suite of the respective authentication. Used by the + * authentication request event interface. + * @status: status code, %WLAN_STATUS_SUCCESS for successful authentication, + * use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you + * the real status code for failures. Used only for the authentication + * response command interface (user space to driver). + */ +struct cfg80211_external_auth_params { + enum nl80211_external_auth_action action; + u8 bssid[ETH_ALEN] __aligned(2); + struct cfg80211_ssid ssid; + unsigned int key_mgmt_suite; + u16 status; +}; + /** * struct cfg80211_ops - backend description for wireless configuration * @@ -2913,6 +2958,9 @@ struct cfg80211_pmk_conf { * (invoked with the wireless_dev mutex held) * @del_pmk: delete the previously configured PMK for the given authenticator. * (invoked with the wireless_dev mutex held) + * + * @external_auth: indicates result of offloaded authentication processing from + * user space */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3206,6 +3254,8 @@ struct cfg80211_ops { const struct cfg80211_pmk_conf *conf); int (*del_pmk)(struct wiphy *wiphy, struct net_device *dev, const u8 *aa); + int (*external_auth)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_external_auth_params *params); }; /* @@ -3254,7 +3304,6 @@ struct cfg80211_ops { * beaconing mode (AP, IBSS, Mesh, ...). * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation * before connection. - * @WIPHY_FLAG_DFS_OFFLOAD: The driver handles all the DFS related operations. */ enum wiphy_flags { /* use hole at 0 */ @@ -3281,7 +3330,6 @@ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22), WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23), WIPHY_FLAG_HAS_STATIC_WEP = BIT(24), - WIPHY_FLAG_DFS_OFFLOAD = BIT(25) }; /** @@ -6236,6 +6284,17 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info); */ bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb); +/** + * cfg80211_external_auth_request - userspace request for authentication + * @netdev: network device + * @params: External authentication parameters + * @gfp: allocation flags + * Returns: 0 on success, < 0 on error + */ +int cfg80211_external_auth_request(struct net_device *netdev, + struct cfg80211_external_auth_params *params, + gfp_t gfp); + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h new file mode 100644 index 0000000000000000000000000000000000000000..3bafdd59eee0f15e306fc287d17971de78b25660 --- /dev/null +++ b/include/net/cnss_nl.h @@ -0,0 +1,104 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NET_CNSS_GENETLINK_H_ +#define _NET_CNSS_GENETLINK_H_ + +#define CLD80211_MAX_COMMANDS 40 +#define CLD80211_MAX_NL_DATA 4096 + +/** + * enum cld80211_attr - Driver/Application embeds the data in nlmsg with the + * help of below attributes + * + * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested + * attribute. + * @CLD80211_ATTR_DATA: Embed complete data in this attribute + * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help + * wlan driver to peek into request message packet without opening up definition + * of complete request message. + * + * Any new message in future can be added as another attribute + */ +enum cld80211_attr { + CLD80211_ATTR_VENDOR_DATA = 1, + CLD80211_ATTR_DATA, + CLD80211_ATTR_META_DATA, + /* add new attributes above here */ + + __CLD80211_ATTR_AFTER_LAST, + CLD80211_ATTR_MAX = __CLD80211_ATTR_AFTER_LAST - 1 +}; + +/** + * enum cld80211_multicast_groups - List of multicast groups supported + * + * @CLD80211_MCGRP_SVC_MSGS: WLAN service message will be sent to this group. + * Ex: Status ind messages + * @CLD80211_MCGRP_HOST_LOGS: All logging related messages from driver will be + * sent to this multicast group + * @CLD80211_MCGRP_FW_LOGS: Firmware logging messages will be sent to this group + * @CLD80211_MCGRP_PER_PKT_STATS: Messages related packet stats debugging infra + * will be sent to this group + * @CLD80211_MCGRP_DIAG_EVENTS: Driver/Firmware status logging diag events will + * be sent to this group + * @CLD80211_MCGRP_FATAL_EVENTS: Any fatal message generated in driver/firmware + * will be sent to this group + * @CLD80211_MCGRP_OEM_MSGS: All OEM message will be sent to this group + * Ex: LOWI messages + */ +enum cld80211_multicast_groups { + CLD80211_MCGRP_SVC_MSGS, + CLD80211_MCGRP_HOST_LOGS, + CLD80211_MCGRP_FW_LOGS, + CLD80211_MCGRP_PER_PKT_STATS, + CLD80211_MCGRP_DIAG_EVENTS, + CLD80211_MCGRP_FATAL_EVENTS, + CLD80211_MCGRP_OEM_MSGS, +}; + +/** + * typedef cld80211_cb - Callback to be called when an nlmsg is received with + * the registered cmd_id command from userspace + * @data: Payload of the message to be sent to driver + * @data_len: Length of the payload + * @cb_ctx: callback context to be returned to driver when the callback + * is called + * @pid: process id of the sender + */ +typedef void (*cld80211_cb)(const void *data, int data_len, + void *cb_ctx, int pid); + +/** + * register_cld_cmd_cb() - Allows cld driver to register for commands with + * callback + * @cmd_id: Command to be registered. Valid range [1, CLD80211_MAX_COMMANDS] + * @cb: Callback to be called when an nlmsg is received with cmd_id command + * from userspace + * @cb_ctx: context provided by driver; Send this as cb_ctx of func() + * to driver + */ +int register_cld_cmd_cb(u8 cmd_id, cld80211_cb cb, void *cb_ctx); + +/** + * deregister_cld_cmd_cb() - Allows cld driver to de-register the command it + * has already registered + * @cmd_id: Command to be deregistered. + */ +int deregister_cld_cmd_cb(u8 cmd_id); + +/** + * cld80211_get_genl_family() - Returns current netlink family context + */ +struct genl_family *cld80211_get_genl_family(void); + +#endif /* _NET_CNSS_GENETLINK_H_ */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index bd406ad0159671d4e96613052e26d4e791b8c022..d9ed0372b205b14e45616abaf5d6d754e62a307f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -862,6 +862,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; } +static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) +{ + return fl6->flowlabel & IPV6_FLOWLABEL_MASK; +} + /* * Prototypes exported by ipv6 */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index c4f5caaf37786d54596ab63aac497b7e06066af8..f6a3543e52477d0b3ec6c883fff554c4e924e0d6 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipcm6_cookie *ipc6, struct sockcm_cookie *sockc); -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, - __u16 srcp, __u16 destp, int bucket); +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + __u16 srcp, __u16 destp, int rqueue, int bucket); +static inline void +ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, + __u16 destp, int bucket) +{ + __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp), + bucket); +} #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) diff --git a/include/net/udp.h b/include/net/udp.h index c5e986ab5c733aec6c2b62c5cc28a0684a0cd11d..dd05e091b21d452d54adb35722bffb655417559a 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -248,6 +248,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb, return htons((((u64) hash * (max - min)) >> 32) + min); } +static inline int udp_rqueue_get(struct sock *sk) +{ + return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); +} + /* net/ipv4/udp.c */ void udp_destruct_sock(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 08f3d8699a27ae668b67e765b644fe1779fd93ee..5a24b4c700e5974464d3280f80c9c44fead89737 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -3558,6 +3558,20 @@ static inline int ib_check_mr_access(int flags) return 0; } +static inline bool ib_access_writable(int access_flags) +{ + /* + * We have writable memory backing the MR if any of the following + * access flags are set. "Local write" and "remote write" obviously + * require write access. "Remote atomic" can do things like fetch and + * add, which will modify memory, and "MW bind" can change permissions + * by binding a window. + */ + return access_flags & + (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); +} + /** * ib_check_mr_status: lightweight check of MR status. * This routine may provide status checks on a selected diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 1ba84a78f1c5435f4e049313acdf581820667ecc..c653af91da160ee188455e92c3670984f82b7de3 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -409,7 +409,7 @@ struct rvt_dev_info { spinlock_t pending_lock; /* protect pending mmap list */ /* CQ */ - struct kthread_worker *worker; /* per device cq worker */ + struct kthread_worker __rcu *worker; /* per device cq worker */ u32 n_cqs_allocated; /* number of CQs allocated for device */ spinlock_t n_cqs_lock; /* protect count of in use cqs */ diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index cb979ad90401e299344dd5fae38d09c489d8bd58..b86c4c367004ce576f64ad7bda1978b0232210fa 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -125,13 +125,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len) { - return 0; + return -ENOSYS; } static inline int rpi_firmware_property_list(struct rpi_firmware *fw, void *data, size_t tag_size) { - return 0; + return -ENOSYS; } static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h index 152f434210e776db78130cf9514ac9fbd07a0361..e171d8cf53d1fcfd475dd2056e78b1141dfa8d30 100644 --- a/include/soc/qcom/qmi_rmnet.h +++ b/include/soc/qcom/qmi_rmnet.h @@ -18,13 +18,14 @@ #include #ifdef CONFIG_QCOM_QMI_DFC -void *qmi_rmnet_qos_init(struct net_device *real_dev, uint8_t mux_id); +void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id); void qmi_rmnet_qos_exit(struct net_device *dev); +void qmi_rmnet_qmi_exit(void *qmi_pt, void *port); void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt); void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb); #else static inline void *qmi_rmnet_qos_init(struct net_device *real_dev, - uint8_t mux_id) + u8 mux_id) { return NULL; } @@ -33,6 +34,10 @@ static inline void qmi_rmnet_qos_exit(struct net_device *dev) { } +static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) +{ +} + static inline void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) { diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h index 532c69d008b08f652d0dd70fbf78d724a221fe89..7ab5a62410087695c99f67f5f6daa81f74cf44a0 100644 --- a/include/soc/qcom/rmnet_qmi.h +++ b/include/soc/qcom/rmnet_qmi.h @@ -19,7 +19,7 @@ void *rmnet_get_qmi_pt(void *port); void *rmnet_get_qos_pt(struct net_device *dev); void *rmnet_get_rmnet_port(struct net_device *dev); -struct net_device *rmnet_get_rmnet_dev(void *port, uint8_t mux_id); +struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id); void rmnet_reset_qmi_pt(void *port); void rmnet_init_qmi_pt(void *port, void *qmi); #else @@ -39,7 +39,7 @@ static inline void *rmnet_get_rmnet_port(struct net_device *dev) } static inline struct net_device *rmnet_get_rmnet_dev(void *port, - uint8_t mux_id) + u8 mux_id) { return NULL; } diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h index 853f823415567c72f5a34b0981fe62287fa3b125..889ec4ce4656d97790c7ed306906f0a641d64bbf 100644 --- a/include/soc/qcom/rpm-smd.h +++ b/include/soc/qcom/rpm-smd.h @@ -16,111 +16,6 @@ #ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H #define __ARCH_ARM_MACH_MSM_RPM_SMD_H -#define SMD_EVENT_DATA 1 -#define SMD_EVENT_OPEN 2 -#define SMD_EVENT_CLOSE 3 -#define SMD_EVENT_STATUS 4 -#define SMD_EVENT_REOPEN_READY 5 - -enum { - GLINK_CONNECTED, - GLINK_LOCAL_DISCONNECTED, - GLINK_REMOTE_DISCONNECTED, -}; - -enum tx_flags { - GLINK_TX_REQ_INTENT = 0x1, - GLINK_TX_SINGLE_THREADED = 0x2, - GLINK_TX_TRACER_PKT = 0x4, - GLINK_TX_ATOMIC = 0x8, -}; - -enum glink_link_state { - GLINK_LINK_STATE_UP, - GLINK_LINK_STATE_DOWN, -}; - -struct glink_link_state_cb_info { - const char *transport; - const char *edge; - enum glink_link_state link_state; -}; - -struct glink_link_info { - const char *transport; - const char *edge; - void (*glink_link_state_notif_cb)( - struct glink_link_state_cb_info *cb_info, - void *priv); -}; - -struct smd_channel { - void __iomem *send; /* some variant of smd_half_channel */ - void __iomem *recv; /* some variant of smd_half_channel */ - unsigned char *send_data; - unsigned char *recv_data; - unsigned int fifo_size; - struct list_head ch_list; - unsigned int current_packet; - unsigned int n; - void *priv; - void (*notify)(void *priv, unsigned int flags); - int (*read)(struct smd_channel *ch, void *data, int len); - int (*write)(struct smd_channel *ch, const void *data, int len, - bool int_ntfy); - int (*read_avail)(struct smd_channel *ch); - int (*write_avail)(struct smd_channel *ch); - int (*read_from_cb)(struct smd_channel *ch, void *data, int len); - void (*update_state)(struct smd_channel *ch); - unsigned int last_state; - void (*notify_other_cpu)(struct smd_channel *ch); - void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes); - void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes); - char name[20]; - struct platform_device pdev; - unsigned int type; - int pending_pkt_sz; - char is_pkt_ch; - /* - * private internal functions to access *send and *recv. - * never to be exported outside of smd - */ - struct smd_half_channel_access *half_ch; -}; - -struct glink_open_config { - void *priv; - uint32_t options; - const char *transport; - const char *edge; - const char *name; - unsigned int rx_intent_req_timeout_ms; - void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv, - const void *ptr, size_t size); - void (*notify_tx_done)(void *handle, const void *priv, - const void *pkt_priv, const void *ptr); - void (*notify_state)(void *handle, const void *priv, - unsigned int event); - bool (*notify_rx_intent_req)(void *handle, const void *priv, - size_t req_size); - void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv, - void *iovec, size_t size, - void * (*vbuf_provider)(void *iovec, size_t offset, - size_t *size), - void * (*pbuf_provider)(void *iovec, size_t offset, - size_t *size)); - void (*notify_rx_sigs)(void *handle, const void *priv, - uint32_t old_sigs, uint32_t new_sigs); - void (*notify_rx_abort)(void *handle, const void *priv, - const void *pkt_priv); - void (*notify_tx_abort)(void *handle, const void *priv, - const void *pkt_priv); - void (*notify_rx_tracer_pkt)(void *handle, const void *priv, - const void *pkt_priv, const void *ptr, size_t size); - void (*notify_remote_rx_intent)(void *handle, const void *priv, - size_t size); -}; - /** * enum msm_rpm_set - RPM enumerations for sleep/active set * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode. @@ -413,82 +308,4 @@ static inline int __init msm_rpm_driver_init(void) } #endif -static inline int glink_rpm_rx_poll(void *handle) -{ - return -ENODEV; -} - -static inline int smd_is_pkt_avail(struct smd_channel *ch) -{ - return -ENODEV; -} - -static inline int smd_cur_packet_size(struct smd_channel *ch) -{ - return -ENODEV; -} - -static inline int smd_read_avail(struct smd_channel *ch) -{ - return -ENODEV; -} - -static inline int smd_read(struct smd_channel *ch, void *data, int len) -{ - return -ENODEV; -} - -static inline int smd_write_avail(struct smd_channel *ch) -{ - return -ENODEV; -} -static inline int smd_write(struct smd_channel *ch, const void *data, int len) -{ - return -ENODEV; -} - -static inline int glink_tx(void *handle, void *pkt_priv, void *data, - size_t size, uint32_t tx_flags) -{ - return -ENODEV; -} - -static inline int smd_mask_receive_interrupt(struct smd_channel *ch, bool mask, - const struct cpumask *cpumask) -{ - return -ENODEV; -} - -static inline int glink_rpm_mask_rx_interrupt(void *handle, bool mask, - void *pstruct) -{ - return -ENODEV; -} - -static inline int glink_rx_done(void *handle, const void *ptr, bool reuse) -{ - return -ENODEV; -} -static inline void *glink_open(const struct glink_open_config *cfg_ptr) -{ - return NULL; -} - -static inline void *glink_register_link_state_cb( - struct glink_link_info *link_info, void *priv) -{ - return NULL; -} - -static inline int smd_named_open_on_edge(const char *name, uint32_t edge, - struct smd_channel **_ch, void *priv, - void (*notify)(void *, unsigned int)) -{ - return -ENODEV; -} - -static inline void smd_disable_read_intr(struct smd_channel *ch) -{ -} - #endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/ diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h index b9b333e266c8a38947449ba5f3fb315f6025b308..75f017ce0dffc14c5388390e1451903b88ce75a8 100644 --- a/include/soc/qcom/secure_buffer.h +++ b/include/soc/qcom/secure_buffer.h @@ -41,6 +41,7 @@ enum vmid { VMID_CP_CAMERA_PREVIEW = 0x1D, VMID_CP_SPSS_SP_SHARED = 0x22, VMID_CP_SPSS_HLOS_SHARED = 0x24, + VMID_CP_CDSP = 0x2A, VMID_LAST, VMID_INVAL = -1 }; diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h index 267cfedb508ccf8e265120142e89afcda20489d7..d20676facd84315b60c2b1133b8867dc7555ce9e 100644 --- a/include/soc/qcom/socinfo.h +++ b/include/soc/qcom/socinfo.h @@ -59,14 +59,20 @@ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8996-cdp") #define early_machine_is_sm8150() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sm8150") -#define early_machine_is_sa8150() \ - of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sa8150") +#define early_machine_is_sa8155() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sa8155") +#define early_machine_is_sa8155p() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sa8155p") #define early_machine_is_sdmshrike() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmshrike") #define early_machine_is_sm6150() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sm6150") #define early_machine_is_qcs405() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs405") +#define early_machine_is_qcs403() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs403") +#define early_machine_is_qcs401() \ + of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs401") #define early_machine_is_sdxprairie() \ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxprairie") #define early_machine_is_sdmmagpie() \ @@ -90,10 +96,13 @@ #define early_machine_is_apq8084() 0 #define early_machine_is_msm8996() 0 #define early_machine_is_sm8150() 0 -#define early_machine_is_sa8150() 0 +#define early_machine_is_sa8155() 0 +#define early_machine_is_sa8155p() 0 #define early_machine_is_sdmshrike() 0 #define early_machine_is_sm6150() 0 #define early_machine_is_qcs405() 0 +#define early_machine_is_qcs403() 0 +#define early_machine_is_qcs401() 0 #define early_machine_is_sdxprairie() 0 #define early_machine_is_sdmmagpie() 0 #endif @@ -117,10 +126,13 @@ enum msm_cpu { MSM_CPU_8084, MSM_CPU_8996, MSM_CPU_SM8150, - MSM_CPU_SA8150, + MSM_CPU_SA8155, + MSM_CPU_SA8155P, MSM_CPU_SDMSHRIKE, MSM_CPU_SM6150, MSM_CPU_QCS405, + MSM_CPU_QCS403, + MSM_CPU_QCS401, SDX_CPU_SDXPRAIRIE, MSM_CPU_SDMMAGPIE, }; diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 6665cb29e1a238af24b7dcb4df543c147cbad742..9955e0b85de5f1bae0ec7b349ac8107cff129d43 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -78,6 +78,7 @@ struct snd_rawmidi_runtime { size_t xruns; /* over/underruns counter */ /* misc */ spinlock_t lock; + struct mutex realloc_mutex; wait_queue_head_t sleep; /* event handler (new bytes, input only) */ void (*event)(struct snd_rawmidi_substream *substream); diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h index 68d9f8d928d0c4dd4304632548428b3b4c8ec38a..52b52c4fc4c664322fe9d236488b623199a3c267 100644 --- a/include/sound/wcd-dsp-mgr.h +++ b/include/sound/wcd-dsp-mgr.h @@ -80,6 +80,7 @@ enum wdsp_signal { /* Software generated signal indicating debug dumps to be collected */ WDSP_DEBUG_DUMP, + WDSP_DEBUG_DUMP_INTERNAL, }; /* diff --git a/include/trace/events/almk.h b/include/trace/events/almk.h new file mode 100644 index 0000000000000000000000000000000000000000..85d712d48f5007a021b88eae84f19d6dda430dd0 --- /dev/null +++ b/include/trace/events/almk.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM almk + +#if !defined(_TRACE_EVENT_ALMK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_ALMK_H + +#include +#include + +TRACE_EVENT(almk_vmpressure, + + TP_PROTO(unsigned long pressure, + int other_free, + int other_file), + + TP_ARGS(pressure, other_free, other_file), + + TP_STRUCT__entry( + __field(unsigned long, pressure) + __field(int, other_free) + __field(int, other_file) + ), + + TP_fast_assign( + __entry->pressure = pressure; + __entry->other_free = other_free; + __entry->other_file = other_file; + ), + + TP_printk("%lu, %d, %d", + __entry->pressure, __entry->other_free, + __entry->other_file) +); + +TRACE_EVENT(almk_shrink, + + TP_PROTO(int tsize, + int vmp, + int other_free, + int other_file, + short adj), + + TP_ARGS(tsize, vmp, other_free, other_file, adj), + + TP_STRUCT__entry( + __field(int, tsize) + __field(int, vmp) + __field(int, other_free) + __field(int, other_file) + __field(short, adj) + ), + + TP_fast_assign( + __entry->tsize = tsize; + __entry->vmp = vmp; + __entry->other_free = other_free; + __entry->other_file = other_file; + __entry->adj = adj; + ), + + TP_printk("%d, %d, %d, %d, %d", + __entry->tsize, + __entry->vmp, + __entry->other_free, + __entry->other_file, + __entry->adj) +); + +#endif + +#include + diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h new file mode 100644 index 0000000000000000000000000000000000000000..dc8dd5ae23b95d15962b1d896dc4d116cf5bc6c5 --- /dev/null +++ b/include/trace/events/dfc.h @@ -0,0 +1,185 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dfc + +#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DFC_H + +#include + +TRACE_EVENT(dfc_qmi_tc, + + TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen, + u32 tcm_handle, int enable), + + TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable), + + TP_STRUCT__entry( + __field(u8, bid) + __field(u32, fid) + __field(u32, grant) + __field(int, qlen) + __field(u32, tcm_handle) + __field(int, enable) + ), + + TP_fast_assign( + __entry->bid = bearer_id; + __entry->fid = flow_id; + __entry->grant = grant; + __entry->qlen = qlen; + __entry->tcm_handle = tcm_handle; + __entry->enable = enable; + ), + + TP_printk("bearer_id=%u grant=%u qdisc_len=%d flow_id=%u " + "tcm_handle=0x%x %s", + __entry->bid, __entry->grant, __entry->qlen, __entry->fid, + __entry->tcm_handle, + __entry->enable ? "enable" : "disable") +); + +TRACE_EVENT(dfc_flow_ind, + + TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant, + u16 seq_num, u8 ack_req), + + TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req), + + TP_STRUCT__entry( + __field(int, src) + __field(int, idx) + __field(u8, mid) + __field(u8, bid) + __field(u32, grant) + __field(u16, seq) + __field(u8, ack_req) + ), + + TP_fast_assign( + __entry->src = src; + __entry->idx = idx; + __entry->mid = mux_id; + __entry->bid = bearer_id; + __entry->grant = grant; + __entry->seq = seq_num; + __entry->ack_req = ack_req; + ), + + TP_printk("src=%d idx[%d]: mux_id=%u bearer_id=%u grant=%u " + "seq_num=%u ack_req=%u", + __entry->src, __entry->idx, __entry->mid, __entry->bid, + __entry->grant, __entry->seq, __entry->ack_req) +); + +TRACE_EVENT(dfc_flow_check, + + TP_PROTO(u8 bearer_id, unsigned int len, u32 grant), + + TP_ARGS(bearer_id, len, grant), + + TP_STRUCT__entry( + __field(u8, bearer_id) + __field(unsigned int, len) + __field(u32, grant) + ), + + TP_fast_assign( + __entry->bearer_id = bearer_id; + __entry->len = len; + __entry->grant = grant; + ), + + TP_printk("bearer_id=%u skb_len=%u current_grant=%u", + __entry->bearer_id, __entry->len, __entry->grant) +); + +TRACE_EVENT(dfc_flow_info, + + TP_PROTO(u8 bearer_id, u32 flow_id, int ip_type, u32 handle, int add), + + TP_ARGS(bearer_id, flow_id, ip_type, handle, add), + + TP_STRUCT__entry( + __field(u8, bid) + __field(u32, fid) + __field(int, ip) + __field(u32, handle) + __field(int, action) + ), + + TP_fast_assign( + __entry->bid = bearer_id; + __entry->fid = flow_id; + __entry->ip = ip_type; + __entry->handle = handle; + __entry->action = add; + ), + + TP_printk("%s: bearer_id=%u flow_id=%u ip_type=%d tcm_handle=0x%x", + __entry->action ? "add flow" : "delete flow", + __entry->bid, __entry->fid, __entry->ip, __entry->handle) +); + +TRACE_EVENT(dfc_client_state_up, + + TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface), + + TP_ARGS(idx, instance, ep_type, iface), + + TP_STRUCT__entry( + __field(int, idx) + __field(u32, instance) + __field(u32, ep_type) + __field(u32, iface) + ), + + TP_fast_assign( + __entry->idx = idx; + __entry->instance = instance; + __entry->ep_type = ep_type; + __entry->iface = iface; + ), + + TP_printk("Client[%d]: Connection established with DFC Service " + "instance=%u ep_type=%u iface_id=%u", + __entry->idx, __entry->instance, + __entry->ep_type, __entry->iface) +); + +TRACE_EVENT(dfc_client_state_down, + + TP_PROTO(int idx, int from_cb), + + TP_ARGS(idx, from_cb), + + TP_STRUCT__entry( + __field(int, idx) + __field(int, from_cb) + ), + + TP_fast_assign( + __entry->idx = idx; + __entry->from_cb = from_cb; + ), + + TP_printk("Client[%d]: Connection with DFC service lost. " + "Exit by callback %d", + __entry->idx, __entry->from_cb) +); + +#endif /* _TRACE_DFC_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/pagefault.h b/include/trace/events/pagefault.h new file mode 100644 index 0000000000000000000000000000000000000000..a9643b3759f2bc0f78cdc8b472794bc742408277 --- /dev/null +++ b/include/trace/events/pagefault.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pagefault + +#if !defined(_TRACE_PAGEFAULT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGEFAULT_H + +#include +#include + +DECLARE_EVENT_CLASS(spf, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(unsigned long, address) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end; + __entry->address = address; + ), + + TP_printk("ip:%lx vma:%lx-%lx address:%lx", + __entry->caller, __entry->vm_start, __entry->vm_end, + __entry->address) +); + +DEFINE_EVENT(spf, spf_pte_lock, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_changed, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_noanon, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_notsup, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_access, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_pmd_changed, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +#endif /* _TRACE_PAGEFAULT_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/process_reclaim.h b/include/trace/events/process_reclaim.h new file mode 100644 index 0000000000000000000000000000000000000000..d79327ee4969e3c17bd37e2b7d65dfddc00b88c7 --- /dev/null +++ b/include/trace/events/process_reclaim.h @@ -0,0 +1,85 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM process_reclaim + +#if !defined(_TRACE_EVENT_PROCESSRECLAIM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_PROCESSRECLAIM_H + +#include +#include +#include + +TRACE_EVENT(process_reclaim, + + TP_PROTO(int tasksize, + short oom_score_adj, + int nr_scanned, int nr_reclaimed, + int per_swap_size, int total_sz, + int nr_to_reclaim), + + TP_ARGS(tasksize, oom_score_adj, nr_scanned, + nr_reclaimed, per_swap_size, + total_sz, nr_to_reclaim), + + TP_STRUCT__entry( + __field(int, tasksize) + __field(short, oom_score_adj) + __field(int, nr_scanned) + __field(int, nr_reclaimed) + __field(int, per_swap_size) + __field(int, total_sz) + __field(int, nr_to_reclaim) + ), + + TP_fast_assign( + __entry->tasksize = tasksize; + __entry->oom_score_adj = oom_score_adj; + __entry->nr_scanned = nr_scanned; + __entry->nr_reclaimed = nr_reclaimed; + __entry->per_swap_size = per_swap_size; + __entry->total_sz = total_sz; + __entry->nr_to_reclaim = nr_to_reclaim; + ), + + TP_printk("%d, %hd, %d, %d, %d, %d, %d", + __entry->tasksize, __entry->oom_score_adj, + __entry->nr_scanned, __entry->nr_reclaimed, + __entry->per_swap_size, __entry->total_sz, + __entry->nr_to_reclaim) +); + +TRACE_EVENT(process_reclaim_eff, + + TP_PROTO(int efficiency, int reclaim_avg_efficiency), + + TP_ARGS(efficiency, reclaim_avg_efficiency), + + TP_STRUCT__entry( + __field(int, efficiency) + __field(int, reclaim_avg_efficiency) + ), + + TP_fast_assign( + __entry->efficiency = efficiency; + __entry->reclaim_avg_efficiency = reclaim_avg_efficiency; + ), + + TP_printk("%d, %d", __entry->efficiency, + __entry->reclaim_avg_efficiency) +); + +#endif + +#include + diff --git a/include/trace/events/rmnet.h b/include/trace/events/rmnet.h new file mode 100644 index 0000000000000000000000000000000000000000..5904792c9fffa0cafde0a1017f324a061f0ec465 --- /dev/null +++ b/include/trace/events/rmnet.h @@ -0,0 +1,43 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rmnet + +#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RMNET_H + +#include +#include + +TRACE_EVENT(rmnet_xmit_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __string(dev_name, skb->dev->name) + __field(unsigned int, len) + ), + + TP_fast_assign( + __assign_str(dev_name, skb->dev->name); + __entry->len = skb->len; + ), + + TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len) +); + +#endif /* _TRACE_RMNET_H */ + +#include diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index e63888bcf20891e250f407e6e783e43e32630660..3b636833e16f6de3272d4257684b1c925e41af8d 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -82,7 +82,7 @@ TRACE_EVENT(sched_enq_deq_task, __entry->cpu = task_cpu(p); __entry->enqueue = enqueue; __entry->nr_running = task_rq(p)->nr_running; - __entry->cpu_load = cpu_util(task_cpu(p)); + __entry->cpu_load = task_rq(p)->cpu_load[0]; __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running; __entry->cpus_allowed = cpus_allowed; __entry->demand = task_load(p); @@ -1157,10 +1157,12 @@ TRACE_EVENT(sched_find_best_target, TP_PROTO(struct task_struct *tsk, bool prefer_idle, unsigned long min_util, int start_cpu, - int best_idle, int best_active, int target), + int best_idle, int best_active, int most_spare_cap, int target, + int backup_cpu), TP_ARGS(tsk, prefer_idle, min_util, start_cpu, - best_idle, best_active, target), + best_idle, best_active, most_spare_cap, target, + backup_cpu), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -1170,7 +1172,9 @@ TRACE_EVENT(sched_find_best_target, __field( int, start_cpu ) __field( int, best_idle ) __field( int, best_active ) + __field( int, most_spare_cap ) __field( int, target ) + __field( int, backup_cpu) ), TP_fast_assign( @@ -1181,15 +1185,19 @@ TRACE_EVENT(sched_find_best_target, __entry->start_cpu = start_cpu; __entry->best_idle = best_idle; __entry->best_active = best_active; + __entry->most_spare_cap = most_spare_cap; __entry->target = target; + __entry->backup_cpu = backup_cpu; ), TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d " - "best_idle=%d best_active=%d target=%d", + "best_idle=%d best_active=%d most_spare_cap=%d target=%d backup=%d", __entry->pid, __entry->comm, __entry->prefer_idle, __entry->start_cpu, __entry->best_idle, __entry->best_active, - __entry->target) + __entry->most_spare_cap, + __entry->target, + __entry->backup_cpu) ); TRACE_EVENT(sched_cpu_util, @@ -1208,6 +1216,10 @@ TRACE_EVENT(sched_cpu_util, __field(unsigned int, capacity_orig ) __field(int, idle_state ) __field(u64, irqload ) + __field(int, online ) + __field(int, isolated ) + __field(int, reserved ) + __field(int, high_irq_load ) ), TP_fast_assign( @@ -1220,10 +1232,15 @@ TRACE_EVENT(sched_cpu_util, __entry->capacity_orig = capacity_orig_of(cpu); __entry->idle_state = idle_get_state_idx(cpu_rq(cpu)); __entry->irqload = sched_irqload(cpu); + __entry->online = cpu_online(cpu); + __entry->isolated = cpu_isolated(cpu); + __entry->reserved = is_reserved(cpu); + __entry->high_irq_load = sched_cpu_high_irqload(cpu); ), - TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu", - __entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload) + TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u", + __entry->cpu, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->capacity_curr, __entry->capacity, __entry->capacity_orig, __entry->idle_state, __entry->irqload, + __entry->online, __entry->isolated, __entry->reserved, __entry->high_irq_load) ); TRACE_EVENT(sched_energy_diff, diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 8f659bb7badc79b3c2f1a94fa31edb884e3fc3a9..7115838fbf2a307ba266cfdc09a67b9aba2d1cbf 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -456,6 +456,7 @@ struct btrfs_free_space_header { #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) +#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34) /* diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index ab7e36fac3d89922fe94cd1ec7a37dffa297cfc4..893caec1ca58b34ac7801b8d384f03caea3b0d6f 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -95,7 +95,11 @@ #define IPA_IOCTL_ALLOC_IPV6CT_TABLE 53 #define IPA_IOCTL_DEL_NAT_TABLE 54 #define IPA_IOCTL_DEL_IPV6CT_TABLE 55 -#define IPA_IOCTL_GET_VLAN_MODE 56 +#define IPA_IOCTL_CLEANUP 56 +#define IPA_IOCTL_QUERY_WLAN_CLIENT 57 +#define IPA_IOCTL_GET_VLAN_MODE 58 +#define IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING 59 +#define IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING 60 /** * max size of the header to be inserted @@ -510,7 +514,13 @@ enum ipa_per_client_stats_event { IPA_PER_CLIENT_STATS_EVENT_MAX }; -#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX) +enum ipa_vlan_bridge_event { + ADD_BRIDGE_VLAN_MAPPING = IPA_PER_CLIENT_STATS_EVENT_MAX, + DEL_BRIDGE_VLAN_MAPPING, + BRIDGE_VLAN_MAPPING_MAX +}; + +#define IPA_EVENT_MAX_NUM (BRIDGE_VLAN_MAPPING_MAX) #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** @@ -1878,6 +1888,20 @@ struct ipa_ioc_get_vlan_mode { uint32_t is_vlan_mode; }; +/** + * struct ipa_ioc_bridge_vlan_mapping_info - vlan to bridge mapping info + * @bridge_name: bridge interface name + * @vlan_id: vlan ID bridge is mapped to + * @bridge_ipv4: bridge interface ipv4 address + * @subnet_mask: bridge interface subnet mask + */ +struct ipa_ioc_bridge_vlan_mapping_info { + char bridge_name[IPA_RESOURCE_NAME_MAX]; + uint16_t vlan_id; + uint32_t bridge_ipv4; + uint32_t subnet_mask; +}; + /** * actual IOCTLs supported by IPA driver */ @@ -2059,6 +2083,17 @@ struct ipa_ioc_get_vlan_mode { #define IPA_IOC_GET_VLAN_MODE _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_GET_VLAN_MODE, \ struct ipa_ioc_get_vlan_mode *) +#define IPA_IOC_ADD_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING, \ + struct ipa_ioc_bridge_vlan_mapping_info) + +#define IPA_IOC_DEL_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING, \ + struct ipa_ioc_bridge_vlan_mapping_info) +#define IPA_IOC_CLEANUP _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_CLEANUP) +#define IPA_IOC_QUERY_WLAN_CLIENT _IO(IPA_IOC_MAGIC,\ + IPA_IOCTL_QUERY_WLAN_CLIENT) /* * unique magic number of the Tethering bridge ioctls */ diff --git a/include/uapi/linux/nfc/Kbuild b/include/uapi/linux/nfc/Kbuild index 90710153e46268229ec7a492338b12f56641d765..62a89b999e882618f1553521761673517971f60d 100644 --- a/include/uapi/linux/nfc/Kbuild +++ b/include/uapi/linux/nfc/Kbuild @@ -1,2 +1,3 @@ #UAPI export list header-y += nfcinfo.h +header-y += ntaginfo.h diff --git a/include/uapi/linux/nfc/ntaginfo.h b/include/uapi/linux/nfc/ntaginfo.h new file mode 100644 index 0000000000000000000000000000000000000000..1deee3e4d455162a27fb7a9c181344c691301d46 --- /dev/null +++ b/include/uapi/linux/nfc/ntaginfo.h @@ -0,0 +1,21 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UAPI_NTAGINFO_H_ +#define _UAPI_NTAGINFO_H_ + +#include + +#define NTAG_FD_STATE _IOW(0xE9, 0x01, unsigned int) +#define NTAG_SET_OFFSET _IOW(0xE9, 0x02, unsigned int) + +#endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 7e8de4bcb231c64b22ee6075250a5954d68383f4..f80bf09394446c89c5b7e0501bb890a343724d5b 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -992,6 +992,27 @@ * @NL80211_CMD_RELOAD_REGDB: Request that the regdb firmware file is reloaded. * * + * @NL80211_CMD_EXTERNAL_AUTH: This interface is exclusively defined for host + * drivers that do not define separate commands for authentication and + * association, but rely on user space for the authentication to happen. + * This interface acts both as the event request (driver to user space) + * to trigger the authentication and command response (userspace to + * driver) to indicate the authentication status. + * + * User space uses the %NL80211_CMD_CONNECT command to the host driver to + * trigger a connection. The host driver selects a BSS and further uses + * this interface to offload only the authentication part to the user + * space. Authentication frames are passed between the driver and user + * space through the %NL80211_CMD_FRAME interface. Host driver proceeds + * further with the association after getting successful authentication + * status. User space indicates the authentication status through + * %NL80211_ATTR_STATUS_CODE attribute in %NL80211_CMD_EXTERNAL_AUTH + * command interface. + * + * Host driver reports this status on an authentication failure to the + * user space through the connect result as the user space would have + * initiated the connection through the connect request. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1198,6 +1219,8 @@ enum nl80211_commands { NL80211_CMD_RELOAD_REGDB, + NL80211_CMD_EXTERNAL_AUTH, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2156,6 +2179,16 @@ enum nl80211_commands { * the driver or is not needed (because roaming used the Fast Transition * protocol). * + * @NL80211_ATTR_EXTERNAL_AUTH_ACTION: Identify the requested external + * authentication operation (u32 attribute with an + * &enum nl80211_external_auth_action value). This is used with the + * &NL80211_CMD_EXTERNAL_AUTH request event. + * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user + * space supports external authentication. This attribute shall be used + * only with %NL80211_CMD_CONNECT request. The driver may offload + * authentication processing to user space if this capability is indicated + * in NL80211_CMD_CONNECT requests from the user space. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2582,6 +2615,9 @@ enum nl80211_attrs { NL80211_ATTR_PMKR0_NAME, NL80211_ATTR_PORT_AUTHORIZED, + NL80211_ATTR_EXTERNAL_AUTH_ACTION, + NL80211_ATTR_EXTERNAL_AUTH_SUPPORT, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3867,6 +3903,9 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_PARENT_BSSID. (u64). * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF * is set. + * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update. + * Contains a nested array of signal strength attributes (u8, dBm), + * using the nesting index as the antenna number. * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3890,6 +3929,7 @@ enum nl80211_bss { NL80211_BSS_PAD, NL80211_BSS_PARENT_TSF, NL80211_BSS_PARENT_BSSID, + NL80211_BSS_CHAIN_SIGNAL, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -5161,6 +5201,8 @@ enum nl80211_smps_mode { * non-operating channel is expired and no longer valid. New CAC must * be done on this channel before starting the operation. This is not * applicable for ETSI dfs domain where pre-CAC is valid for ever. + * @NL80211_RADAR_CAC_STARTED: Channel Availability Check has been started, + * should be generated by HW if NL80211_EXT_FEATURE_DFS_OFFLOAD is enabled. */ enum nl80211_radar_event { NL80211_RADAR_DETECTED, @@ -5168,6 +5210,7 @@ enum nl80211_radar_event { NL80211_RADAR_CAC_ABORTED, NL80211_RADAR_NOP_FINISHED, NL80211_RADAR_PRE_CAC_EXPIRED, + NL80211_RADAR_CAC_STARTED, }; /** @@ -5503,4 +5546,15 @@ enum nl80211_nan_match_attributes { NL80211_NAN_MATCH_ATTR_MAX = NUM_NL80211_NAN_MATCH_ATTR - 1 }; +/** + * nl80211_external_auth_action - Action to perform with external + * authentication request. Used by NL80211_ATTR_EXTERNAL_AUTH_ACTION. + * @NL80211_EXTERNAL_AUTH_START: Start the authentication. + * @NL80211_EXTERNAL_AUTH_ABORT: Abort the ongoing authentication. + */ +enum nl80211_external_auth_action { + NL80211_EXTERNAL_AUTH_START, + NL80211_EXTERNAL_AUTH_ABORT, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/qg-profile.h b/include/uapi/linux/qg-profile.h index bffddbb038e0ca3bd17cd7b7409bf9a24edcbddc..0230b3227f786d1ce144710ccd98e5a580d431c7 100644 --- a/include/uapi/linux/qg-profile.h +++ b/include/uapi/linux/qg-profile.h @@ -55,6 +55,8 @@ struct battery_params { #define QG_MAX_FCC_MAH 16000 #define QG_MIN_SLOPE 1 #define QG_MAX_SLOPE 50000 +#define QG_ESR_SF_MIN 5000 +#define QG_ESR_SF_MAX 20000 /* IOCTLs to query battery profile data */ #define BPIOCXSOC _IOWR('B', 0x01, struct battery_params) /* SOC */ diff --git a/include/uapi/linux/qg.h b/include/uapi/linux/qg.h index 2c7b49af873dcb4edc8d6bd0d67088c26dfc091f..40882a7ae8c49a10e196fcae20bb7781b697a65d 100644 --- a/include/uapi/linux/qg.h +++ b/include/uapi/linux/qg.h @@ -14,12 +14,12 @@ enum qg { QG_FIFO_TIME_DELTA, QG_BATT_SOC, QG_CC_SOC, - QG_RESERVED_3, - QG_RESERVED_4, - QG_RESERVED_5, - QG_RESERVED_6, - QG_RESERVED_7, - QG_RESERVED_8, + QG_ESR_CHARGE_DELTA, + QG_ESR_DISCHARGE_DELTA, + QG_ESR_CHARGE_SF, + QG_ESR_DISCHARGE_SF, + QG_FULL_SOC, + QG_CLEAR_LEARNT_DATA, QG_RESERVED_9, QG_RESERVED_10, QG_MAX, @@ -27,6 +27,12 @@ enum qg { #define QG_BATT_SOC QG_BATT_SOC #define QG_CC_SOC QG_CC_SOC +#define QG_ESR_CHARGE_DELTA QG_ESR_CHARGE_DELTA +#define QG_ESR_DISCHARGE_DELTA QG_ESR_DISCHARGE_DELTA +#define QG_ESR_CHARGE_SF QG_ESR_CHARGE_SF +#define QG_ESR_DISCHARGE_SF QG_ESR_DISCHARGE_SF +#define QG_FULL_SOC QG_FULL_SOC +#define QG_CLEAR_LEARNT_DATA QG_CLEAR_LEARNT_DATA struct fifo_data { unsigned int v; diff --git a/include/uapi/linux/smcinvoke.h b/include/uapi/linux/smcinvoke.h index 1dc9a63c15e5c30acb21ac88f99bb3c0160b5b9a..adb8968c532364dcf1ed9084db527ee898ba3508 100644 --- a/include/uapi/linux/smcinvoke.h +++ b/include/uapi/linux/smcinvoke.h @@ -7,18 +7,19 @@ #define SMCINVOKE_USERSPACE_OBJ_NULL -1 struct smcinvoke_buf { - uint64_t addr; - uint64_t size; + uint64_t addr; + uint64_t size; }; struct smcinvoke_obj { - int64_t fd; - int64_t reserved; + int64_t fd; + int32_t cb_server_fd; + int32_t reserved; }; union smcinvoke_arg { - struct smcinvoke_buf b; - struct smcinvoke_obj o; + struct smcinvoke_buf b; + struct smcinvoke_obj o; }; /* @@ -30,11 +31,47 @@ union smcinvoke_arg { * @args - args is pointer to buffer having all arguments */ struct smcinvoke_cmd_req { - uint32_t op; - uint32_t counts; - int32_t result; - uint32_t argsize; - uint64_t __user args; + uint32_t op; + uint32_t counts; + int32_t result; + uint32_t argsize; + uint64_t args; +}; + +/* + * struct smcinvoke_accept: structure to process CB req from TEE + * @has_resp: IN: Whether IOCTL is carrying response data + * @txn_id: OUT: An id that should be passed as it is for response + * @result: IN: Outcome of operation op + * @cbobj_id: OUT: Callback object which is target of operation op + * @op: OUT: Operation to be performed on target object + * @counts: OUT: Number of arguments, embedded in buffer pointed by + * buf_addr, to complete operation + * @reserved: IN/OUT: Usage is not defined but should be set to 0. + * @argsize: IN: Size of any argument, all of equal size, embedded + * in buffer pointed by buf_addr + * @buf_len: IN: Len of buffer pointed by buf_addr + * @buf_addr: IN: Buffer containing all arguments which are needed + * to complete operation op + */ +struct smcinvoke_accept { + uint32_t has_resp; + uint32_t txn_id; + int32_t result; + int32_t cbobj_id; + uint32_t op; + uint32_t counts; + int32_t reserved; + uint32_t argsize; + uint64_t buf_len; + uint64_t buf_addr; +}; + +/* + * @cb_buf_size: IN: Max buffer size for any callback obj implemented by client + */ +struct smcinvoke_server { + uint32_t cb_buf_size; }; #define SMCINVOKE_IOC_MAGIC 0x98 @@ -42,4 +79,13 @@ struct smcinvoke_cmd_req { #define SMCINVOKE_IOCTL_INVOKE_REQ \ _IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req) +#define SMCINVOKE_IOCTL_ACCEPT_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept) + +#define SMCINVOKE_IOCTL_SERVER_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server) + +#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 4, int32_t) + #endif /* _UAPI_SMCINVOKE_H_ */ diff --git a/init/main.c b/init/main.c index 2d355a61dfc51a5582f834f4fcca382469c83fb0..0d88f37febcb29d56a1f18297a9b9a63dddb5243 100644 --- a/init/main.c +++ b/init/main.c @@ -974,6 +974,13 @@ __setup("rodata=", set_debug_rodata); static void mark_readonly(void) { if (rodata_enabled) { + /* + * load_module() results in W+X mappings, which are cleaned up + * with call_rcu_sched(). Let's make sure that queued work is + * flushed so that we don't hit false positives looking for + * insecure pages which are W+X. + */ + rcu_barrier_sched(); mark_rodata_ro(); rodata_test(); } else diff --git a/kernel/cpu.c b/kernel/cpu.c index abc9676a13898a5ca2da5839f1c26a2ff0a11ef7..25c003409b074c6f6a38619120284035937106e0 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -867,11 +868,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, if (!cpu_present(cpu)) return -EINVAL; - cpus_write_lock(); - if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1) return -EBUSY; + cpus_write_lock(); + if (trace_cpuhp_latency_enabled()) start_time = sched_clock(); @@ -1392,6 +1393,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup.single = NULL, .teardown.single = rcutree_dying_cpu, }, + [CPUHP_AP_KMAP_DYING] = { + .name = "KMAP:dying", + .startup.single = NULL, + .teardown.single = kmap_remove_unused_cpu, + }, [CPUHP_AP_SMPCFD_DYING] = { .name = "smpcfd:dying", .startup.single = NULL, diff --git a/kernel/events/core.c b/kernel/events/core.c index ebd3baf759c939c2674f551a0ce4e154a28f89e9..21a05dc60d8c68a9ea92084676b8d25dc51f7ad2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4487,7 +4487,7 @@ int perf_event_release_kernel(struct perf_event *event) if (event->state == PERF_EVENT_STATE_ZOMBIE) return 0; - if (!cpu_online(event->cpu) && + if (event->cpu != -1 && !cpu_online(event->cpu) && event->state == PERF_EVENT_STATE_ACTIVE) { event->state = PERF_EVENT_STATE_ZOMBIE; diff --git a/kernel/exit.c b/kernel/exit.c index 129005ce76aec9de31cc4bd97b2d87efdbac2d17..840a7851a4ae8c3cd9907a5409e5c0a9ead1d035 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -499,6 +499,7 @@ static void exit_mm(void) { struct mm_struct *mm = current->mm; struct core_state *core_state; + int mm_released; mm_release(current, mm); if (!mm) @@ -545,9 +546,12 @@ static void exit_mm(void) enter_lazy_tlb(mm, current); task_unlock(current); mm_update_next_owner(mm); - mmput(mm); + + mm_released = mmput(mm); if (test_thread_flag(TIF_MEMDIE)) exit_oom_victim(); + if (mm_released) + set_tsk_thread_flag(current, TIF_MM_RELEASED); } static struct task_struct *find_alive_thread(struct task_struct *p) diff --git a/kernel/fork.c b/kernel/fork.c index 2b8a70d31108fbc74429d5dbfad4712e53a14880..93537008aa358c562da3ac48cd209ea6d67b5eeb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -654,7 +654,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, if (!tmp) goto fail_nomem; *tmp = *mpnt; - INIT_LIST_HEAD(&tmp->anon_vma_chain); + INIT_VMA(tmp); retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; @@ -811,6 +811,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + rwlock_init(&mm->mm_rb_lock); +#endif atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); @@ -940,12 +943,17 @@ static inline void __mmput(struct mm_struct *mm) /* * Decrement the use count and release all resources for an mm. */ -void mmput(struct mm_struct *mm) +int mmput(struct mm_struct *mm) { + int mm_freed = 0; might_sleep(); - if (atomic_dec_and_test(&mm->mm_users)) + if (atomic_dec_and_test(&mm->mm_users)) { __mmput(mm); + mm_freed = 1; + } + + return mm_freed; } EXPORT_SYMBOL_GPL(mmput); diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 1c87ea49e5dfab62b774da0eded684c23dfb8fb6..9409b5540d45001d5cfe07700fc26adb30dea138 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -41,7 +41,7 @@ static inline bool irq_needs_fixup(struct irq_data *d) * If this happens then there was a missed IRQ fixup at some * point. Warn about it and enforce fixup. */ - pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", + pr_info("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", cpumask_pr_args(m), d->irq, cpu); return true; } @@ -124,19 +124,41 @@ static bool migrate_one_irq(struct irq_desc *desc) irq_shutdown(desc); return false; } + /* + * The order of preference for selecting a fallback CPU is + * + * (1) online and un-isolated CPU from default affinity + * (2) online and un-isolated CPU + * (3) online CPU + */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); - if (cpumask_empty(affinity)) + if (cpumask_intersects(&available_cpus, irq_default_affinity)) + cpumask_and(&available_cpus, &available_cpus, + irq_default_affinity); + else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; + + /* + * We are overriding the affinity with all online and + * un-isolated cpus. irq_set_affinity_locked() call + * below notify this mask to PM QOS affinity listener. + * That results in applying the CPU_DMA_LATENCY QOS + * to all the CPUs specified in the mask. But the low + * level irqchip driver sets the affinity of an irq + * to only one CPU. So pick only one CPU from the + * prepared mask while overriding the user affinity. + */ + affinity = cpumask_of(cpumask_any(affinity)); brokeaff = true; } /* - * Do not set the force argument of irq_do_set_affinity() as this + * Do not set the force argument of irq_set_affinity_locked() as this * disables the masking of offline CPUs from the supplied affinity * mask and therefore might keep/reassign the irq to the outgoing * CPU. */ - err = irq_do_set_affinity(d, affinity, false); + err = irq_set_affinity_locked(d, affinity, false); if (err) { pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", d->irq, err); @@ -176,7 +198,7 @@ void irq_migrate_all_off_this_cpu(void) raw_spin_unlock(&desc->lock); if (affinity_broken) { - pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n", + pr_info_ratelimited("IRQ %u: no longer affine to CPU%u\n", irq, smp_processor_id()); } } diff --git a/kernel/kthread.c b/kernel/kthread.c index 1c19edf824272db47a48730478af5f3b582bbf67..df461383a2a79333f89b781718c4a2fef2fc50a9 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -53,7 +53,6 @@ enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP, KTHREAD_SHOULD_PARK, - KTHREAD_IS_PARKED, }; static inline void set_kthread_struct(void *kthread) @@ -169,14 +168,23 @@ void *kthread_probe_data(struct task_struct *task) static void __kthread_parkme(struct kthread *self) { - __set_current_state(TASK_PARKED); - while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { - if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) - complete(&self->parked); + for (;;) { + /* + * TASK_PARKED is a special state; we must serialize against + * possible pending wakeups to avoid store-store collisions on + * task->state. + * + * Such a collision might possibly result in the task state + * changin from TASK_PARKED and us failing the + * wait_task_inactive() in kthread_park(). + */ + set_special_state(TASK_PARKED); + if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) + break; + + complete_all(&self->parked); schedule(); - __set_current_state(TASK_PARKED); } - clear_bit(KTHREAD_IS_PARKED, &self->flags); __set_current_state(TASK_RUNNING); } @@ -443,22 +451,19 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = to_kthread(k); + /* + * Newly created kthread was parked when the CPU was offline. + * The binding was lost and we need to set it again. + */ + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu, TASK_PARKED); + + reinit_completion(&kthread->parked); clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* - * We clear the IS_PARKED bit here as we don't wait - * until the task has left the park code. So if we'd - * park before that happens we'd see the IS_PARKED bit - * which might be about to be cleared. + * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. */ - if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - /* - * Newly created kthread was parked when the CPU was offline. - * The binding was lost and we need to set it again. - */ - if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) - __kthread_bind(k, kthread->cpu, TASK_PARKED); - wake_up_state(k, TASK_PARKED); - } + wake_up_state(k, TASK_PARKED); } EXPORT_SYMBOL_GPL(kthread_unpark); @@ -481,12 +486,19 @@ int kthread_park(struct task_struct *k) if (WARN_ON(k->flags & PF_EXITING)) return -ENOSYS; - if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { - set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); - if (k != current) { - wake_up_process(k); - wait_for_completion(&kthread->parked); - } + set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + if (k != current) { + wake_up_process(k); + /* + * Wait for __kthread_parkme() to complete(), this means we + * _will_ have TASK_PARKED and are about to call schedule(). + */ + wait_for_completion(&kthread->parked); + /* + * Now wait for that schedule() to complete and the task to + * get scheduled out. + */ + WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); } return 0; diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index e795908f36070dd33ed94630bb63b188065f21ca..a903367793758f3e1cc52ab34c18f1bfa78f38e3 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -352,16 +352,15 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) struct task_struct *owner; bool ret = true; + BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN)); + if (need_resched()) return false; rcu_read_lock(); owner = READ_ONCE(sem->owner); - if (!rwsem_owner_is_writer(owner)) { - /* - * Don't spin if the rwsem is readers owned. - */ - ret = !rwsem_owner_is_reader(owner); + if (!owner || !is_rwsem_owner_spinnable(owner)) { + ret = !owner; /* !owner is spinnable */ goto done; } @@ -382,11 +381,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner = READ_ONCE(sem->owner); - if (!rwsem_owner_is_writer(owner)) - goto out; + if (!is_rwsem_owner_spinnable(owner)) + return false; rcu_read_lock(); - while (sem->owner == owner) { + while (owner && (READ_ONCE(sem->owner) == owner)) { /* * Ensure we emit the owner->on_cpu, dereference _after_ * checking sem->owner still matches owner, if that fails, @@ -408,12 +407,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) cpu_relax(); } rcu_read_unlock(); -out: + /* * If there is a new owner or the owner is not set, we continue * spinning. */ - return !rwsem_owner_is_reader(READ_ONCE(sem->owner)); + return is_rwsem_owner_spinnable(READ_ONCE(sem->owner)); } static bool rwsem_optimistic_spin(struct rw_semaphore *sem) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a6c76a4832b40d60dacafaa33490c3a30eb77c70..22bd01a7dcaa4f4e7402cdc7b7dbe7753ffaf98a 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -201,5 +201,3 @@ void up_read_non_owner(struct rw_semaphore *sem) EXPORT_SYMBOL(up_read_non_owner); #endif - - diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index a883b8f1fdc6efbadcab3ca130120cd28130e518..410ee7b9ac2c05d7edeb75c1b7e5c9056932e02c 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h @@ -1,20 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * The owner field of the rw_semaphore structure will be set to - * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear + * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear * the owner field when it unlocks. A reader, on the other hand, will * not touch the owner field when it unlocks. * - * In essence, the owner field now has the following 3 states: + * In essence, the owner field now has the following 4 states: * 1) 0 * - lock is free or the owner hasn't set the field yet * 2) RWSEM_READER_OWNED * - lock is currently or previously owned by readers (lock is free * or not set by owner yet) - * 3) Other non-zero value - * - a writer owns the lock + * 3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well + * - lock is owned by an anonymous writer, so spinning on the lock + * owner should be disabled. + * 4) Other non-zero value + * - a writer owns the lock and other writers can spin on the lock owner. */ -#define RWSEM_READER_OWNED ((struct task_struct *)1UL) +#define RWSEM_ANONYMOUSLY_OWNED (1UL << 0) +#define RWSEM_READER_OWNED ((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED) #ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* @@ -45,14 +49,22 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem) WRITE_ONCE(sem->owner, RWSEM_READER_OWNED); } -static inline bool rwsem_owner_is_writer(struct task_struct *owner) +/* + * Return true if the a rwsem waiter can spin on the rwsem's owner + * and steal the lock, i.e. the lock is not anonymously owned. + * N.B. !owner is considered spinnable. + */ +static inline bool is_rwsem_owner_spinnable(struct task_struct *owner) { - return owner && owner != RWSEM_READER_OWNED; + return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED); } -static inline bool rwsem_owner_is_reader(struct task_struct *owner) +/* + * Return true if rwsem is owned by an anonymous writer or readers. + */ +static inline bool rwsem_has_anonymous_owner(struct task_struct *owner) { - return owner == RWSEM_READER_OWNED; + return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED; } #else static inline void rwsem_set_owner(struct rw_semaphore *sem) diff --git a/kernel/module.c b/kernel/module.c index 365a85deff6aff526ffb1776c479936dbc1a43dc..d89d348f185ce8452760b2b105940a885ca73ffc 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3517,6 +3517,11 @@ static noinline int do_init_module(struct module *mod) * walking this with preempt disabled. In all the failure paths, we * call synchronize_sched(), but we don't want to slow down the success * path, so use actual RCU here. + * Note that module_alloc() on most architectures creates W+X page + * mappings which won't be cleaned up until do_free_init() runs. Any + * code such as mark_rodata_ro() which depends on those mappings to + * be cleaned up needs to sync with the queued work - ie + * rcu_barrier_sched() */ call_rcu_sched(&freeinit->rcu, do_free_init); mutex_unlock(&module_mutex); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index f4330a2842a39661a7fa509a11b16ad858fa1530..bf60b37f1df1f91890c1f8f35b4f61cc24dce5f2 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -77,6 +77,33 @@ config HIBERNATION For more information take a look at . +config HIBERNATION_IMAGE_REUSE + bool "Reuse hibernation image" + depends on HIBERNATION + ---help--- + By default this hibernation image is erased after either a + successful or unsuccessful hibernation restore sequeunce. Since + filesystem contents on disk are not part of the hibernation + image, failure to create a new hibernation image every boot can + lead to filesystem corruption. + + Conversely, if the usecase can guarantee that the filesystem is + not ever modified, the same hibernation image can be reused. This + prevents creating additional hibernation images unncesarily. + + For more details, refer to the description of CONFIG_HIBERNATION + for booting without resuming. + +config HIBERNATION_SKIP_CRC + bool "Skip LZO image CRC check" + default n + depends on HIBERNATION + ---help--- + Some filesystem devices may have hw based integrity checks. In this + scenario, repeating the integrity check in software is unnecessary + and wasteful. This config option has no effect if uncompressed + hibernation images are used. + config ARCH_SAVE_PAGE_KEYS bool diff --git a/kernel/power/swap.c b/kernel/power/swap.c index d7cdc426ee3809bbaafdfd9d5698f83db654e23a..52623f04f18f5e61c7b2e6566a235371f4200290 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -605,9 +605,10 @@ static int crc32_threadfn(void *data) } atomic_set(&d->ready, 0); - for (i = 0; i < d->run_threads; i++) - *d->crc32 = crc32_le(*d->crc32, - d->unc[i], *d->unc_len[i]); + if (!IS_ENABLED(CONFIG_HIBERNATION_SKIP_CRC)) + for (i = 0; i < d->run_threads; i++) + *d->crc32 = crc32_le(*d->crc32, + d->unc[i], *d->unc_len[i]); atomic_set(&d->stop, 1); wake_up(&d->done); } @@ -1453,7 +1454,8 @@ static int load_image_lzo(struct swap_map_handle *handle, if (!snapshot_image_loaded(snapshot)) ret = -ENODATA; if (!ret) { - if (swsusp_header->flags & SF_CRC32_MODE) { + if ((swsusp_header->flags & SF_CRC32_MODE) && + (!IS_ENABLED(CONFIG_HIBERNATION_SKIP_CRC))) { if(handle->crc32 != swsusp_header->crc32) { printk(KERN_ERR "PM: Invalid image CRC32!\n"); @@ -1540,10 +1542,12 @@ int swsusp_check(void) if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); +#ifndef CONFIG_HIBERNATION_IMAGE_REUSE /* Reset swap signature now */ error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, swsusp_resume_block, swsusp_header, NULL); +#endif } else { error = -EINVAL; } diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 3cdaeaef9ce1a63bfde3bc631946e40ed731e035..d989cc2381988e4574b76194bd7bf9e401ef4403 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -85,6 +85,7 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, { int add; size_t len; + va_list ap; again: len = atomic_read(&s->len); @@ -103,7 +104,9 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, if (!len) smp_rmb(); - add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args); + va_copy(ap, args); + add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap); + va_end(ap); if (!add) return 0; diff --git a/kernel/resource.c b/kernel/resource.c index 7ee3dd1ad2af17989aca69c6fdb9502d7ee92472..fbea5afe10a862d64e7004667f6fa4ceac50a43b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -172,7 +172,7 @@ static const struct file_operations proc_iomem_operations = { static int __init ioresources_init(void) { proc_create("ioports", 0, NULL, &proc_ioports_operations); - proc_create("iomem", 0, NULL, &proc_iomem_operations); + proc_create("iomem", 0400, NULL, &proc_iomem_operations); return 0; } __initcall(ioresources_init); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 13ec4d8bebe62221b1b6f2a499cdf0f0ec598393..8d6358125852113516e23695a2404e005565c032 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -29,6 +29,8 @@ #include #include +#include + #include #include #ifdef CONFIG_PARAVIRT @@ -2786,20 +2788,21 @@ static struct rq *finish_task_switch(struct task_struct *prev) fire_sched_in_preempt_notifiers(current); if (mm) mmdrop(mm); - if (unlikely(prev_state == TASK_DEAD)) { - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(prev); + + /* Task is done with its stack. */ + put_task_stack(prev); - /* Task is done with its stack. */ - put_task_stack(prev); + put_task_struct(prev); - put_task_struct(prev); } tick_nohz_task_switch(); @@ -3571,23 +3574,8 @@ static void __sched notrace __schedule(bool preempt) void __noreturn do_task_dead(void) { - /* - * The setting of TASK_RUNNING by try_to_wake_up() may be delayed - * when the following two conditions become true. - * - There is race condition of mmap_sem (It is acquired by - * exit_mm()), and - * - SMI occurs before setting TASK_RUNINNG. - * (or hypervisor of virtual machine switches to other guest) - * As a result, we may become TASK_RUNNING after becoming TASK_DEAD - * - * To avoid it, we have to wait for releasing tsk->pi_lock which - * is held by try_to_wake_up() - */ - raw_spin_lock_irq(¤t->pi_lock); - raw_spin_unlock_irq(¤t->pi_lock); - /* Causes final put_task_struct in finish_task_switch(): */ - __set_current_state(TASK_DEAD); + set_special_state(TASK_DEAD); /* Tell freezer to ignore us: */ current->flags |= PF_NOFREEZE; @@ -6852,12 +6840,11 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write, { int ret, i; unsigned int *data = (unsigned int *)table->data; - unsigned int old_val; + unsigned int *old_val; static DEFINE_MUTEX(mutex); static int cap_margin_levels = -1; mutex_lock(&mutex); - old_val = *data; if (cap_margin_levels == -1 || table->maxlen != (sizeof(unsigned int) * cap_margin_levels)) { @@ -6866,25 +6853,36 @@ int sched_updown_migrate_handler(struct ctl_table *table, int write, } if (cap_margin_levels <= 0) { - mutex_unlock(&mutex); - return -EINVAL; + ret = -EINVAL; + goto unlock_mutex; } + old_val = kzalloc(table->maxlen, GFP_KERNEL); + if (!old_val) { + ret = -ENOMEM; + goto unlock_mutex; + } + + memcpy(old_val, data, table->maxlen); + ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos); if (!ret && write) { for (i = 0; i < cap_margin_levels; i++) { if (sysctl_sched_capacity_margin_up[i] > sysctl_sched_capacity_margin_down[i]) { - *data = old_val; - mutex_unlock(&mutex); - return -EINVAL; + memcpy(data, old_val, table->maxlen); + ret = -EINVAL; + goto free_old_val; } } ret = sched_update_updown_migrate_values(data, cap_margin_levels, ret); } +free_old_val: + kfree(old_val); +unlock_mutex: mutex_unlock(&mutex); return ret; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c53f5a3f2159ea08d801a111897959c56b186a7e..98739c5f55c478458a242c95408c78525721811b 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -153,7 +153,9 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy, /* Track cycles in current window */ delta_ns = upto - sg_policy->last_cyc_update_time; - cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ); + delta_ns *= prev_freq; + do_div(delta_ns, (NSEC_PER_SEC / KHZ)); + cycles = delta_ns; sg_policy->curr_cycles += cycles; sg_policy->last_cyc_update_time = upto; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0f22c5390e035844fb52917c834508fff04d930f..4e2846f4b87f4701096b9319a7efa96b84867c97 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1087,7 +1087,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds. * So, overflow is not an issue here. */ -u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) +static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) { u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ u64 u_act; @@ -2668,8 +2668,6 @@ bool dl_cpu_busy(unsigned int cpu) #endif #ifdef CONFIG_SCHED_DEBUG -extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); - void print_dl_stats(struct seq_file *m, int cpu) { print_dl_rq(m, cpu, &cpu_rq(cpu)->dl); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3ace54a93c45d2c8f5183d844ef562775cfacf15..96e44fb1e720d6bb058ccc35bb5e176321a5fb87 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -196,6 +196,11 @@ unsigned int sched_capacity_margin_up[NR_CPUS] = { unsigned int sched_capacity_margin_down[NR_CPUS] = { [0 ... NR_CPUS-1] = 1205}; /* ~15% margin */ +#ifdef CONFIG_SCHED_WALT +/* 1ms default for 20ms window size scaled to 1024 */ +unsigned int sysctl_sched_min_task_util_for_boost_colocation = 51; +#endif + static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; @@ -5003,9 +5008,10 @@ static inline void update_overutilized_status(struct rq *rq) rcu_read_lock(); sd = rcu_dereference(rq->sd); - if (sd && !sd_overutilized(sd) && - cpu_overutilized(rq->cpu)) + if (sd && (sd->flags & SD_LOAD_BALANCE)) set_sd_overutilized(sd); + else if (sd && sd->parent) + set_sd_overutilized(sd->parent); rcu_read_unlock(); } #else @@ -6999,9 +7005,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) if (capacity == max_capacity) return true; - if (sched_boost_policy() == SCHED_BOOST_ON_BIG && - task_sched_boost(p) && - is_min_capacity_cpu(cpu)) + if (task_boost_on_big_eligible(p) && is_min_capacity_cpu(cpu)) return false; return task_fits_capacity(p, capacity, cpu); @@ -7045,36 +7049,35 @@ static bool is_packing_eligible(struct task_struct *p, int target_cpu, return (estimated_capacity <= capacity_curr_of(target_cpu)); } -static inline bool skip_sg(struct task_struct *p, struct sched_group *sg, - struct cpumask *rtg_target) +static int start_cpu(struct task_struct *p, bool boosted, + struct cpumask *rtg_target) { - int fcpu = group_first_cpu(sg); - - /* Are all CPUs isolated in this group? */ - if (!sg->group_weight) - return true; - - /* - * Don't skip a group if a task affinity allows it - * to run only on that group. - */ - if (cpumask_subset(&p->cpus_allowed, sched_group_span(sg))) - return false; + struct root_domain *rd = cpu_rq(smp_processor_id())->rd; + int start_cpu = -1; - if (!task_fits_max(p, fcpu)) - return true; + if (boosted) + return rd->max_cap_orig_cpu; - if (rtg_target && !cpumask_test_cpu(fcpu, rtg_target)) - return true; + /* A task always fits on its rtg_target */ + if (rtg_target) { + int rtg_target_cpu = cpumask_first_and(rtg_target, + cpu_online_mask); - return false; -} + if (rtg_target_cpu < nr_cpu_ids) + return rtg_target_cpu; + } -static int start_cpu(bool boosted) -{ - struct root_domain *rd = cpu_rq(smp_processor_id())->rd; + /* Where the task should land based on its demand */ + if (rd->min_cap_orig_cpu != -1 + && task_fits_max(p, rd->min_cap_orig_cpu)) + start_cpu = rd->min_cap_orig_cpu; + else if (rd->mid_cap_orig_cpu != -1 + && task_fits_max(p, rd->mid_cap_orig_cpu)) + start_cpu = rd->mid_cap_orig_cpu; + else + start_cpu = rd->max_cap_orig_cpu; - return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu; + return start_cpu; } static inline int find_best_target(struct task_struct *p, int *backup_cpu, @@ -7088,6 +7091,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, unsigned long target_util = ULONG_MAX; unsigned long best_active_util = ULONG_MAX; unsigned long best_active_cuml_util = ULONG_MAX; + unsigned long best_idle_cuml_util = ULONG_MAX; int best_idle_cstate = INT_MAX; struct sched_domain *sd; struct sched_group *sg; @@ -7095,13 +7099,15 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, int best_idle_cpu = -1; int target_cpu = -1; int cpu, i; - unsigned long spare_cap; + long spare_cap, most_spare_cap = 0; + int most_spare_cap_cpu = -1; unsigned int active_cpus_count = 0; + int prev_cpu = task_cpu(p); *backup_cpu = -1; /* Find start CPU based on boost value */ - cpu = start_cpu(boosted); + cpu = start_cpu(p, boosted, fbt_env->rtg_target); if (cpu < 0) return -1; @@ -7113,14 +7119,13 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, /* Scan CPUs in all SDs */ sg = sd->groups; do { - if (skip_sg(p, sg, fbt_env->rtg_target)) - continue; - for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) { unsigned long capacity_curr = capacity_curr_of(i); unsigned long capacity_orig = capacity_orig_of(i); unsigned long wake_util, new_util, new_util_cuml; + trace_sched_cpu_util(i); + if (!cpu_online(i) || cpu_isolated(i)) continue; @@ -7135,8 +7140,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (sched_cpu_high_irqload(i)) continue; - trace_sched_cpu_util(i); - /* * p's blocked utilization is still accounted for on prev_cpu * so prev_cpu will receive a negative bias due to the double @@ -7146,6 +7149,11 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, new_util = wake_util + task_util(p); spare_cap = capacity_orig_of(i) - wake_util; + if (spare_cap > most_spare_cap) { + most_spare_cap = spare_cap; + most_spare_cap_cpu = i; + } + /* * Cumulative demand may already be accounting for the * task. If so, add just the boost-utilization to @@ -7204,7 +7212,8 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, trace_sched_find_best_target(p, prefer_idle, min_util, cpu, best_idle_cpu, - best_active_cpu, i); + best_active_cpu, + -1, i, -1); return i; } @@ -7290,12 +7299,19 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, * shallow idle big CPU. */ if (sysctl_sched_cstate_aware && - best_idle_cstate <= idle_idx) + best_idle_cstate < idle_idx) + continue; + + if (best_idle_cstate == idle_idx && + (best_idle_cpu == prev_cpu || + (i != prev_cpu && + new_util_cuml > best_idle_cuml_util))) continue; /* Keep track of best idle CPU */ target_capacity = capacity_orig; best_idle_cstate = idle_idx; + best_idle_cuml_util = new_util_cuml; best_idle_cpu = i; continue; } @@ -7339,13 +7355,22 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, } /* - * When placement boost is active, we traverse CPUs - * other than min capacity CPUs. Reset the target_capacity - * to keep traversing the other clusters. + * We start with group where the task should be placed. When + * placement boost is active reset the target_capacity to keep + * traversing the other higher clusters. Don't reset it if we + * are already at the highest cluster. */ - if (fbt_env->placement_boost) + if (fbt_env->placement_boost && + !is_max_capacity_cpu(group_first_cpu(sg))) target_capacity = ULONG_MAX; + /* + * if we have found a target cpu within a group, don't bother + * checking other groups + */ + if (target_capacity != ULONG_MAX) + break; + } while (sg = sg->next, sg != sd->groups); if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env, @@ -7383,9 +7408,16 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, ? best_active_cpu : best_idle_cpu; + if (target_cpu == -1 && most_spare_cap_cpu != -1 && + /* ensure we use active cpu for active migration */ + !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu))) + target_cpu = most_spare_cap_cpu; + trace_sched_find_best_target(p, prefer_idle, min_util, cpu, best_idle_cpu, best_active_cpu, - target_cpu); + most_spare_cap_cpu, + target_cpu, + *backup_cpu); /* it is possible for target and backup * to select same CPU - if so, drop backup @@ -7393,6 +7425,19 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, if (*backup_cpu == target_cpu) *backup_cpu = -1; + /* + * The next step of energy evaluation includes + * prev_cpu. Drop target or backup if it is + * same as prev_cpu + */ + if (*backup_cpu == prev_cpu) + *backup_cpu = -1; + + if (target_cpu == prev_cpu) { + target_cpu = *backup_cpu; + *backup_cpu = -1; + } + return target_cpu; } @@ -7602,7 +7647,9 @@ static inline struct cpumask *find_rtg_target(struct task_struct *p) rcu_read_lock(); grp = task_related_thread_group(p); - if (grp && grp->preferred_cluster) { + if (grp && grp->preferred_cluster && + (task_util(p) > + sysctl_sched_min_task_util_for_boost_colocation)) { rtg_target = &grp->preferred_cluster->cpus; if (!task_fits_max(p, cpumask_first(rtg_target))) rtg_target = NULL; @@ -7644,8 +7691,10 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, struct cpumask *rtg_target = find_rtg_target(p); struct find_best_target_env fbt_env; bool need_idle = wake_to_idle(p); + bool placement_boost = task_placement_boost_enabled(p); u64 start_t = 0; int fastpath = 0; + int next_cpu = -1, backup_cpu = -1; if (trace_sched_task_util_enabled()) start_t = sched_clock(); @@ -7714,13 +7763,15 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, eenv->max_cpu_count = EAS_CPU_BKP + 1; fbt_env.rtg_target = rtg_target; - fbt_env.placement_boost = task_placement_boost_enabled(p); + fbt_env.placement_boost = placement_boost; fbt_env.need_idle = need_idle; /* Find a cpu with sufficient capacity */ eenv->cpu[EAS_CPU_NXT].cpu_id = find_best_target(p, &eenv->cpu[EAS_CPU_BKP].cpu_id, boosted, prefer_idle, &fbt_env); + next_cpu = eenv->cpu[EAS_CPU_NXT].cpu_id; + backup_cpu = eenv->cpu[EAS_CPU_BKP].cpu_id; /* take note if no backup was found */ if (eenv->cpu[EAS_CPU_BKP].cpu_id < 0) @@ -7758,11 +7809,9 @@ static int find_energy_efficient_cpu(struct sched_domain *sd, eenv->cpu[eenv->next_idx].cpu_id; out: - trace_sched_task_util(p, eenv->cpu[EAS_CPU_NXT].cpu_id, - eenv->cpu[EAS_CPU_BKP].cpu_id, energy_cpu, sync, - fbt_env.need_idle, fastpath, fbt_env.placement_boost, - rtg_target ? cpumask_first(rtg_target) : -1, - start_t); + trace_sched_task_util(p, next_cpu, backup_cpu, energy_cpu, sync, + need_idle, fastpath, placement_boost, + rtg_target ? cpumask_first(rtg_target) : -1, start_t); return energy_cpu; } @@ -9911,7 +9960,22 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) capa_move /= SCHED_CAPACITY_SCALE; /* Move if we gain throughput */ - if (capa_move > capa_now) + if (capa_move > capa_now) { + env->imbalance = busiest->load_per_task; + return; + } + + /* We can't see throughput improvement with the load-based + * method, but it is possible depending upon group size and + * capacity range that there might still be an underutilized + * cpu available in an asymmetric capacity system. Do one last + * check just in case. + */ + if (env->sd->flags & SD_ASYM_CPUCAPACITY && + busiest->group_type == group_overloaded && + busiest->sum_nr_running > busiest->group_weight && + local->sum_nr_running < local->group_weight && + local->group_capacity < busiest->group_capacity) env->imbalance = busiest->load_per_task; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index cb41bad9c3392ba233e4ef1e71285b38407b5fb7..093077acd2d5252ec241d73dce038eb9539391ca 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2925,8 +2925,6 @@ int sched_rr_handler(struct ctl_table *table, int write, } #ifdef CONFIG_SCHED_DEBUG -extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); - void print_rt_stats(struct seq_file *m, int cpu) { rt_rq_iter_t iter; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a537b5fea80bce433f6eb49d8857df73a6f11b9..ee2e7684d21d05d28da5559859756f93f7cc4e9b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -731,6 +731,8 @@ struct root_domain { /* First cpu with maximum and minimum original capacity */ int max_cap_orig_cpu, min_cap_orig_cpu; + /* First cpu with mid capacity */ + int mid_cap_orig_cpu; }; extern struct root_domain def_root_domain; @@ -2332,8 +2334,9 @@ extern bool sched_debug_enabled; extern void print_cfs_stats(struct seq_file *m, int cpu); extern void print_rt_stats(struct seq_file *m, int cpu); extern void print_dl_stats(struct seq_file *m, int cpu); -extern void -print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); +extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); +extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); +extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); #ifdef CONFIG_NUMA_BALANCING extern void show_numa_stats(struct task_struct *p, struct seq_file *m); @@ -2899,6 +2902,25 @@ static inline bool task_placement_boost_enabled(struct task_struct *p) return false; } +static inline bool task_boost_on_big_eligible(struct task_struct *p) +{ + bool boost_on_big = task_sched_boost(p) && + sched_boost_policy() == SCHED_BOOST_ON_BIG; + + if (boost_on_big) { + /* + * Filter out tasks less than min task util threshold + * under conservative boost. + */ + if (sysctl_sched_boost == CONSERVATIVE_BOOST && + task_util(p) <= + sysctl_sched_min_task_util_for_boost_colocation) + boost_on_big = false; + } + + return boost_on_big; +} + #else /* CONFIG_SCHED_WALT */ struct walt_sched_stats; @@ -2915,6 +2937,11 @@ static inline bool task_placement_boost_enabled(struct task_struct *p) return false; } +static inline bool task_boost_on_big_eligible(struct task_struct *p) +{ + return false; +} + static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } static inline int sched_boost(void) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 2eb04946e4b81e2847c34cdc4b7171c269bb6032..9894e0d87445995fcd13bf430726639dc2e5b80e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -303,6 +303,7 @@ static int init_rootdomain(struct root_domain *rd) goto free_cpudl; rd->max_cap_orig_cpu = rd->min_cap_orig_cpu = -1; + rd->mid_cap_orig_cpu = -1; return 0; @@ -1851,6 +1852,20 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att cpu_attach_domain(sd, d.rd, i); } + + /* set the mid capacity cpu (assumes only 3 capacities) */ + for_each_cpu(i, cpu_map) { + int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu); + int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu); + + if ((cpu_rq(i)->cpu_capacity_orig + != cpu_rq(min_cpu)->cpu_capacity_orig) && + (cpu_rq(i)->cpu_capacity_orig + != cpu_rq(max_cpu)->cpu_capacity_orig)) { + WRITE_ONCE(d.rd->mid_cap_orig_cpu, i); + break; + } + } rcu_read_unlock(); if (rq && sched_debug_enabled) { diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 22b0419eae057b2ac75888941c5363c293f5f4fc..9d949aa316ff346ff79ee3353b38fe441f073ee7 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -1609,6 +1609,13 @@ account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event) (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) return 0; + /* + * The idle exit time is not accounted for the first task _picked_ up to + * run on the idle CPU. + */ + if (event == PICK_NEXT_TASK && rq->curr == rq->idle) + return 0; + /* * TASK_UPDATE can be called on sleeping task, when its moved between * related groups @@ -2529,7 +2536,6 @@ static void _set_preferred_cluster(struct related_thread_group *grp) { struct task_struct *p; u64 combined_demand = 0; - bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG; bool group_boost = false; u64 wallclock; @@ -2548,7 +2554,7 @@ static void _set_preferred_cluster(struct related_thread_group *grp) return; list_for_each_entry(p, &grp->tasks, grp_list) { - if (boost_on_big && task_sched_boost(p)) { + if (task_boost_on_big_eligible(p)) { group_boost = true; break; } @@ -2558,7 +2564,6 @@ static void _set_preferred_cluster(struct related_thread_group *grp) continue; combined_demand += p->ravg.coloc_demand; - } grp->preferred_cluster = best_cluster(grp, diff --git a/kernel/signal.c b/kernel/signal.c index 99db4b67269f15709e1a74fa2c428a39e87f077c..6994ceeb6d996506ca0513ccaac4314d09f6e024 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1833,14 +1833,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) return; } + set_special_state(TASK_TRACED); + /* * We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. + * + * TRACER TRACEE + * + * ptrace_attach() + * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) + * do_wait() + * set_current_state() smp_wmb(); + * ptrace_do_wait() + * wait_task_stopped() + * task_stopped_code() + * [L] task_is_traced() [S] task_clear_jobctl_trapping(); */ - set_current_state(TASK_TRACED); + smp_wmb(); current->last_siginfo = info; current->exit_code = exit_code; @@ -2048,7 +2061,7 @@ static bool do_signal_stop(int signr) if (task_participate_group_stop(current)) notify = CLD_STOPPED; - __set_current_state(TASK_STOPPED); + set_special_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); /* diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7591261652d3ea88811f9d2af2a3ce188fb3320..2f6fa95de2d8c6b5b1ca70ff71bbb7765b54ea5b 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -21,6 +21,7 @@ #include #include #include +#include /* * Structure to determine completion condition and record errors. May @@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done) } static void __cpu_stop_queue_work(struct cpu_stopper *stopper, - struct cpu_stop_work *work) + struct cpu_stop_work *work, + struct wake_q_head *wakeq) { list_add_tail(&work->list, &stopper->works); - wake_up_process(stopper->thread); + wake_q_add(wakeq, stopper->thread); } /* queue @work to @stopper. if offline, @work is completed immediately */ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + DEFINE_WAKE_Q(wakeq); unsigned long flags; bool enabled; spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) - __cpu_stop_queue_work(stopper, work); + __cpu_stop_queue_work(stopper, work, &wakeq); else if (work->done) cpu_stop_signal_done(work->done); spin_unlock_irqrestore(&stopper->lock, flags); + wake_up_q(&wakeq); + return enabled; } @@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, { struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); + DEFINE_WAKE_Q(wakeq); int err; retry: spin_lock_irq(&stopper1->lock); @@ -252,8 +258,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, goto unlock; err = 0; - __cpu_stop_queue_work(stopper1, work1); - __cpu_stop_queue_work(stopper2, work2); + __cpu_stop_queue_work(stopper1, work1, &wakeq); + __cpu_stop_queue_work(stopper2, work2, &wakeq); unlock: spin_unlock(&stopper2->lock); spin_unlock_irq(&stopper1->lock); @@ -263,6 +269,13 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, cpu_relax(); goto retry; } + + if (!err) { + preempt_disable(); + wake_up_q(&wakeq); + preempt_enable(); + } + return err; } /** diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0ed69a527bb5e8785203d9a765be72e36199680e..6810972a4e474c325ab6347abdf88e7c4d90ce9a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -373,6 +373,15 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "sched_min_task_util_for_boost_colocation", + .data = &sysctl_sched_min_task_util_for_boost_colocation, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one_thousand, + }, #endif { .procname = "sched_upmigrate", @@ -380,6 +389,8 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS, .mode = 0644, .proc_handler = sched_updown_migrate_handler, + .extra1 = &one, + .extra2 = &sysctl_sched_capacity_margin_down, }, { .procname = "sched_downmigrate", @@ -387,6 +398,7 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS, .mode = 0644, .proc_handler = sched_updown_migrate_handler, + .extra1 = &sysctl_sched_capacity_margin_up, }, #ifdef CONFIG_SCHED_DEBUG { @@ -1478,6 +1490,15 @@ static struct ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, + { + .procname = "want_old_faultaround_pte", + .data = &want_old_faultaround_pte, + .maxlen = sizeof(want_old_faultaround_pte), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", @@ -3249,10 +3270,20 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { + struct do_proc_douintvec_minmax_conv_param *param = data; + if (write) { + int val; + if (*negp) return -EINVAL; - *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; + + val = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; + if ((param->min && *param->min > val) || + (param->max && *param->max < val)) + return -ERANGE; + + *valp = val; } else { *negp = false; *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp; @@ -3275,8 +3306,13 @@ static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, int proc_douintvec_capacity(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { + struct do_proc_douintvec_minmax_conv_param param = { + .min = (unsigned int *) table->extra1, + .max = (unsigned int *) table->extra2, + }; + return do_proc_dointvec(table, write, buffer, lenp, ppos, - do_proc_douintvec_capacity_conv, NULL); + do_proc_douintvec_capacity_conv, ¶m); } #else /* CONFIG_PROC_SYSCTL */ diff --git a/kernel/time/time.c b/kernel/time/time.c index 44a8c1402133be79d00fd6e66eb5825404277185..319935af02fbe18e2c1818449beb2e6135656050 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -28,6 +28,7 @@ */ #include +#include #include #include #include @@ -348,9 +349,10 @@ unsigned int jiffies_to_msecs(const unsigned long j) return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else # if BITS_PER_LONG == 32 - return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; + return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> + HZ_TO_MSEC_SHR32; # else - return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; + return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); # endif #endif } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 407aabe9670b0c98f3d5a9222dd9c4ef5bbf021c..2f12af99a4baeb72ab70e6127bcd785ca1ae55d3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3362,8 +3362,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, print_event_info(buf, m); - seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); - seq_printf(m, "# | | | %s | |\n", tgid ? " | " : ""); + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, @@ -3385,9 +3385,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file tgid ? tgid_space : space); seq_printf(m, "# %s||| / delay\n", tgid ? tgid_space : space); - seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", + seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : space); - seq_printf(m, "# | | | %s|||| | |\n", + seq_printf(m, "# | | %s | |||| | |\n", tgid ? " | " : space); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 9fbaa809d7476284ca9d189d323df4d45a3d9168..416f7fead5faea3aa82d78b0abea54c1606da33e 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%16s-%-5d [%03d] ", - comm, entry->pid, iter->cpu); + trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { unsigned int tgid = trace_find_tgid(entry->pid); @@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter) trace_seq_printf(s, "(%5d) ", tgid); } + trace_seq_printf(s, "[%03d] ", iter->cpu); + if (tr->trace_flags & TRACE_ITER_IRQ_INFO) trace_print_lat_fmt(s, entry); diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index ad1d6164e94603cf29f1b26f0a58f8c37988d57b..e82cff5c842c6e5c8ac961cd6beae0ab0a3ab304 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -304,7 +304,7 @@ static int t_show(struct seq_file *m, void *v) if (!*fmt) return 0; - seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); + seq_printf(m, "0x%lx : \"", 0L); /* * Tabs and new lines need to be converted. diff --git a/lib/Kconfig b/lib/Kconfig index b1445b22a6def496d91bde16deb2b13a01a52915..f4788fbae4d0d8818ad068f908be0770235746ad 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -574,6 +574,15 @@ config STACKDEPOT bool select STACKTRACE +config STACK_HASH_ORDER_SHIFT + int "stack depot hash size (12 => 4KB, 20 => 1024KB)" + range 12 20 + default 20 + depends on STACKDEPOT + help + Select the hash size as a power of 2 for the stackdepot hash table. + Choose a lower value to reduce the memory impact. + config SBITMAP bool diff --git a/lib/stackdepot.c b/lib/stackdepot.c index f87d138e96724a43d219231bb98d6b1a863a0f0a..da258c507675673706a993d1062c75c6e8e4c089 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -146,8 +146,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, return stack; } -#define STACK_HASH_ORDER 20 -#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) +#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER_SHIFT) #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) #define STACK_HASH_SEED 0x9747b28c diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 86c3385b9eb393a54b09cddcfcccef3ec257f8d2..4a990f3fd345820766a49a34e29c499e243af7b4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1392,9 +1392,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, return string(buf, end, NULL, spec); switch (fmt[1]) { - case 'r': - return number(buf, end, clk_get_rate(clk), spec); - case 'n': default: #ifdef CONFIG_COMMON_CLK diff --git a/mm/Kconfig b/mm/Kconfig index 5b77318feff1edc928cdad66b0c18eca716ed8fb..17ad9d7de090a4cbd5aad68a8c334eae17b47cf0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -799,3 +799,27 @@ config SPECULATIVE_PAGE_FAULT detected or because underlying PMD or PTE tables are not yet allocating, it is failing its processing and a classic page fault is then tried. + +config HAVE_LOW_MEMORY_KILLER + bool "Have user/kernel space low memory killer" + default n + help + Say 'y' if you have a user/kernel low memory killer and thus you + want page allocator to provide sufficient time before it triggers + Out of Memory killer. + +config PROCESS_RECLAIM + bool "Enable process reclaim" + depends on PROC_FS + default y + help + It allows to reclaim pages of the process by /proc/pid/reclaim. + + (echo file > /proc/PID/reclaim) reclaims file-backed pages only. + (echo anon > /proc/PID/reclaim) reclaims anonymous pages only. + (echo all > /proc/PID/reclaim) reclaims all pages. + + (echo addr size-byte > /proc/PID/reclaim) reclaims pages in + (addr, addr + size-bytes) of the process. + + Any other value is ignored. diff --git a/mm/Makefile b/mm/Makefile index 5a2b9505e41398b00a77aa3c49bf3fdd15bfaa9c..61e1aac6a15049f6ad9f1fd50ff293b15d75b0d4 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o swap_slots.o \ interval_tree.o list_lru.o workingset.o \ - debug.o $(mmu-y) showmem.o + debug.o $(mmu-y) showmem.o vmpressure.o obj-y += init-mm.o @@ -77,7 +77,7 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o @@ -104,3 +104,4 @@ obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o obj-$(CONFIG_HMM) += hmm.o +obj-$(CONFIG_PROCESS_RECLAIM) += process_reclaim.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index dee049a0ec5b126a373cbb6f8f29707b526518cd..6774e0369ebecf79484aa2f1b55fabb7b89ab042 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -409,6 +409,7 @@ static void wb_exit(struct bdi_writeback *wb) * protected. */ static DEFINE_SPINLOCK(cgwb_lock); +static struct workqueue_struct *cgwb_release_wq; /** * wb_congested_get_create - get or create a wb_congested @@ -519,7 +520,7 @@ static void cgwb_release(struct percpu_ref *refcnt) { struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, refcnt); - schedule_work(&wb->release_work); + queue_work(cgwb_release_wq, &wb->release_work); } static void cgwb_kill(struct bdi_writeback *wb) @@ -783,6 +784,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi) spin_unlock_irq(&cgwb_lock); } +static int __init cgwb_init(void) +{ + /* + * There can be many concurrent release work items overwhelming + * system_wq. Put them in a separate wq and limit concurrency. + * There's no point in executing many of these in parallel. + */ + cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); + if (!cgwb_release_wq) + return -ENOMEM; + + return 0; +} +subsys_initcall(cgwb_init); + #else /* CONFIG_CGROUP_WRITEBACK */ static int cgwb_bdi_init(struct backing_dev_info *bdi) diff --git a/mm/cma.c b/mm/cma.c index 3d91939750a39c78cca235230a0ad5cfecf727eb..e4a219fa0a2d0f42b9c846309dbf4c1e5b17b989 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include "cma.h" @@ -97,6 +98,29 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, mutex_unlock(&cma->lock); } +static int cma_showmem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int i; + unsigned long used; + struct cma *cma; + + for (i = 0; i < cma_area_count; i++) { + cma = &cma_areas[i]; + used = bitmap_weight(cma->bitmap, + (int)cma_bitmap_maxno(cma)); + used <<= cma->order_per_bit; + pr_info("cma-%d pages: => %lu used of %lu total pages\n", + i, used, cma->count); + } + + return 0; +} + +static struct notifier_block cma_nb = { + .notifier_call = cma_showmem_notifier, +}; + static int __init cma_activate_area(struct cma *cma) { int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); @@ -161,6 +185,8 @@ static int __init cma_init_reserved_areas(void) return ret; } + show_mem_notifier_register(&cma_nb); + return 0; } core_initcall(cma_init_reserved_areas); diff --git a/mm/filemap.c b/mm/filemap.c index f7ccba83429b2b0340ee30f35a6a66d31da5175c..e77e15d08670f57c81f2c92d0d4d5214b72f3762 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -48,6 +48,8 @@ #include +int want_old_faultaround_pte = 1; + /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. @@ -2565,6 +2567,14 @@ void filemap_map_pages(struct vm_fault *vmf, if (vmf->pte) vmf->pte += iter.index - last_pgoff; last_pgoff = iter.index; + + if (want_old_faultaround_pte) { + if (iter.index == vmf->pgoff) + vmf->flags &= ~FAULT_FLAG_PREFAULT_OLD; + else + vmf->flags |= FAULT_FLAG_PREFAULT_OLD; + } + if (alloc_set_pte(vmf, NULL, page)) goto unlock; unlock_page(page); diff --git a/mm/gup.c b/mm/gup.c index d2ba0be714411eb56c49ef10cc94b69276b2c789..72c921da0f3b6e050abb56dd99a0e3bc9e43d2e7 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1469,32 +1469,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, return 1; } -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; + int nr_start = *nr; + + fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) + return 0; - fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); - return __gup_device_huge(fault_pfn, addr, end, pages, nr); + if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { + undo_dev_pagemap(nr, nr_start, pages); + return 0; + } + return 1; } -static int __gup_device_huge_pud(pud_t pud, unsigned long addr, +static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; + int nr_start = *nr; + + fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) + return 0; - fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); - return __gup_device_huge(fault_pfn, addr, end, pages, nr); + if (unlikely(pud_val(orig) != pud_val(*pudp))) { + undo_dev_pagemap(nr, nr_start, pages); + return 0; + } + return 1; } #else -static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr, +static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } -static int __gup_device_huge_pud(pud_t pud, unsigned long addr, +static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); @@ -1512,7 +1528,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; if (pmd_devmap(orig)) - return __gup_device_huge_pmd(orig, addr, end, pages, nr); + return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); @@ -1550,7 +1566,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; if (pud_devmap(orig)) - return __gup_device_huge_pud(orig, addr, end, pages, nr); + return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8af604f3b3708542f543ef8cb979e6207aa6406e..5cb7aee06f761e5b63fc9626004a486b27518995 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1188,8 +1188,8 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t entry; - entry = mk_pte(pages[i], vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = mk_pte(pages[i], vmf->vma_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); memcg = (void *)page_private(pages[i]); set_page_private(pages[i], 0); page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); @@ -2115,7 +2115,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_swp_mksoft_dirty(entry); } else { entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); - entry = maybe_mkwrite(entry, vma); + entry = maybe_mkwrite(entry, vma->vm_flags); if (!write) entry = pte_wrprotect(entry); if (!young) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b1f841a9edd487981727d5807aad68674d9036af..c5976d5323be680e80d3bd630d7d06b90ab5a498 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3692,6 +3692,8 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, .vma = vma, .address = address, .flags = flags, + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, /* * Hard to debug if it ends up being * used by a callee that assumes diff --git a/mm/init-mm.c b/mm/init-mm.c index f94d5d15ebc07a853cd5b1c5b50d364dfd97d6a1..e71ac37a98c46029eece9fb0766a7d71b300ec62 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -17,6 +17,9 @@ struct mm_struct init_mm = { .mm_rb = RB_ROOT, +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + .mm_rb_lock = __RW_LOCK_UNLOCKED(init_mm.mm_rb_lock), +#endif .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/mm/internal.h b/mm/internal.h index 1df011f624801ffbdf3379a6d17218d9b32280e1..7cc2c6e787bf45e02e914419ab48e686b4baba1c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -40,6 +40,26 @@ void page_writeback_init(void); int do_swap_page(struct vm_fault *vmf); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +extern struct vm_area_struct *get_vma(struct mm_struct *mm, + unsigned long addr); +extern void put_vma(struct vm_area_struct *vma); + +static inline bool vma_has_changed(struct vm_fault *vmf) +{ + int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb); + unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence); + + /* + * Matches both the wmb in write_seqlock_{begin,end}() and + * the wmb in vma_rb_erase(). + */ + smp_rmb(); + + return ret || seq != vmf->sequence; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0a5bb3e8a8a3c78d9a99fbb6260743275de88d1a..aaae33402d613486144585bbb4c78721e9a0ff01 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -887,6 +887,8 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, .pgoff = linear_page_index(vma, address), + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, }; /* we only decide to swapin, if there is enough young ptes */ @@ -1013,6 +1015,7 @@ static void collapse_huge_page(struct mm_struct *mm, if (mm_find_pmd(mm, address) != pmd) goto out; + vm_write_begin(vma); anon_vma_lock_write(vma->anon_vma); pte = pte_offset_map(pmd, address); @@ -1048,6 +1051,7 @@ static void collapse_huge_page(struct mm_struct *mm, pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); + vm_write_end(vma); result = SCAN_FAIL; goto out; } @@ -1082,6 +1086,7 @@ static void collapse_huge_page(struct mm_struct *mm, set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); + vm_write_end(vma); *hpage = NULL; diff --git a/mm/ksm.c b/mm/ksm.c index fdc8746ebcb459eccc70e36365b26c5fe24496d6..31e6420c209b8e4520a5a99675e74b465bedf9bb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -199,6 +199,8 @@ struct rmap_item { #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ +#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG) + /* to mask all the flags */ /* The stable and unstable tree heads */ static struct rb_root one_stable_tree[1] = { RB_ROOT }; @@ -2552,6 +2554,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) stable_node = page_stable_node(page); if (!stable_node) return; + again: hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; @@ -2562,10 +2565,15 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) anon_vma_lock_read(anon_vma); anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 0, ULONG_MAX) { + unsigned long addr; + cond_resched(); vma = vmac->vma; - if (rmap_item->address < vma->vm_start || - rmap_item->address >= vma->vm_end) + + /* Ignore the stable/unstable/sqnr flags */ + addr = rmap_item->address & ~KSM_FLAG_MASK; + + if (addr < vma->vm_start || addr >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this @@ -2579,8 +2587,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; - if (!rwc->rmap_one(page, vma, - rmap_item->address, rwc->arg)) { + if (!rwc->rmap_one(page, vma, addr, rwc->arg)) { anon_vma_unlock_read(anon_vma); return; } diff --git a/mm/madvise.c b/mm/madvise.c index 86e514bace7e06303cef30df92e91ab920a5d31e..ca3bcc9f9d1f2218e9188caac73166f790887511 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -184,7 +184,9 @@ static long madvise_behavior(struct vm_area_struct *vma, /* * vm_flags is protected by the mmap_sem held in write mode. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); + vm_write_end(vma); out: return error; } @@ -450,9 +452,11 @@ static void madvise_free_page_range(struct mmu_gather *tlb, .private = tlb, }; + vm_write_begin(vma); tlb_start_vma(tlb, vma); walk_page_range(addr, end, &free_walk); tlb_end_vma(tlb, vma); + vm_write_end(vma); } static int madvise_free_single_vma(struct vm_area_struct *vma, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 546cd481a2ca9226bc09ef1370b61c25d5b50839..942d9342b63bcf744aa4c23c278b91e79f7bbf32 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2205,7 +2205,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, { struct memcg_kmem_cache_create_work *cw; - cw = kmalloc(sizeof(*cw), GFP_NOWAIT); + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); if (!cw) return; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 1cd3b3569af8a79285b75bfdb2485b7de7a69aa8..a9f6e138e4f2f9a7938d62db68a1ab876068259d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -994,7 +994,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, if (kill) collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); - unmap_success = try_to_unmap(hpage, ttu); + unmap_success = try_to_unmap(hpage, ttu, NULL); if (!unmap_success) pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); diff --git a/mm/memory.c b/mm/memory.c index 6fb74d4bf7bb7bc0aad00659efca69b9fa0b5ddb..b41012b9f5c2122bf6719bf18357e9cb4ccd8c03 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -81,6 +81,9 @@ #include "internal.h" +#define CREATE_TRACE_POINTS +#include + #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif @@ -617,7 +620,9 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ + vm_write_begin(vma); unlink_anon_vmas(vma); + vm_write_end(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { @@ -631,7 +636,9 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; + vm_write_begin(vma); unlink_anon_vmas(vma); + vm_write_end(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, @@ -758,7 +765,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, if (page) dump_page(page, "bad pte"); pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", - (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); + (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, + mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ @@ -772,7 +780,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, } /* - * vm_normal_page -- This function gets the "struct page" associated with a pte. + * __vm_normal_page -- This function gets the "struct page" associated with + * a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this @@ -818,8 +827,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, #else # define HAVE_PTE_SPECIAL 0 #endif -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte, bool with_public_device) +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, bool with_public_device, + unsigned long vma_flags) { unsigned long pfn = pte_pfn(pte); @@ -828,7 +838,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; @@ -859,9 +869,13 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, } /* !HAVE_PTE_SPECIAL case follows: */ + /* + * This part should never get called when CONFIG_SPECULATIVE_PAGE_FAULT + * is set. This is mainly because we can't rely on vm_start. + */ - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { - if (vma->vm_flags & VM_MIXEDMAP) { + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; @@ -870,7 +884,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; - if (!is_cow_mapping(vma->vm_flags)) + if (!is_cow_mapping(vma_flags)) return NULL; } } @@ -1499,6 +1513,7 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long next; BUG_ON(addr >= end); + vm_write_begin(vma); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { @@ -1508,6 +1523,7 @@ void unmap_page_range(struct mmu_gather *tlb, next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); + vm_write_end(vma); } @@ -1816,7 +1832,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, out_mkwrite: if (mkwrite) { entry = pte_mkyoung(entry); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags); } set_pte_at(mm, addr, pte, entry); @@ -2270,6 +2286,141 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static bool pte_spinlock(struct vm_fault *vmf) +{ + bool ret = false; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t pmdval; +#endif + + /* Check if vma is still valid */ + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + return true; + } + + local_irq_disable(); + if (vma_has_changed(vmf)) { + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * We check if the pmd value is still the same to ensure that there + * is not a huge collapse operation in progress in our back. + */ + pmdval = READ_ONCE(*vmf->pmd); + if (!pmd_same(pmdval, vmf->orig_pmd)) { + trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } +#endif + + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + if (unlikely(!spin_trylock(vmf->ptl))) { + trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + + if (vma_has_changed(vmf)) { + spin_unlock(vmf->ptl); + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + + ret = true; +out: + local_irq_enable(); + return ret; +} + +static bool pte_map_lock(struct vm_fault *vmf) +{ + bool ret = false; + pte_t *pte; + spinlock_t *ptl; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t pmdval; +#endif + + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; + } + + /* + * The first vma_has_changed() guarantees the page-tables are still + * valid, having IRQs disabled ensures they stay around, hence the + * second vma_has_changed() to make sure they are still valid once + * we've got the lock. After that a concurrent zap_pte_range() will + * block on the PTL and thus we're safe. + */ + local_irq_disable(); + if (vma_has_changed(vmf)) { + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * We check if the pmd value is still the same to ensure that there + * is not a huge collapse operation in progress in our back. + */ + pmdval = READ_ONCE(*vmf->pmd); + if (!pmd_same(pmdval, vmf->orig_pmd)) { + trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } +#endif + + /* + * Same as pte_offset_map_lock() except that we call + * spin_trylock() in place of spin_lock() to avoid race with + * unmap path which may have the lock and wait for this CPU + * to invalidate TLB but this CPU has irq disabled. + * Since we are in a speculative patch, accept it could fail + */ + ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + pte = pte_offset_map(vmf->pmd, vmf->address); + if (unlikely(!spin_trylock(ptl))) { + pte_unmap(pte); + trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + + if (vma_has_changed(vmf)) { + pte_unmap_unlock(pte, ptl); + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); + goto out; + } + + vmf->pte = pte; + vmf->ptl = ptl; + ret = true; +out: + local_irq_enable(); + return ret; +} +#else +static inline bool pte_spinlock(struct vm_fault *vmf) +{ + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + return true; +} + +static inline bool pte_map_lock(struct vm_fault *vmf) +{ + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures @@ -2277,21 +2428,29 @@ EXPORT_SYMBOL_GPL(apply_to_page_range); * parts, do_swap_page must check under lock before unmapping the pte and * proceeding (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). + * + * pte_unmap_same() returns: + * 0 if the PTE are the same + * VM_FAULT_PTNOTSAME if the PTE are different + * VM_FAULT_RETRY if the VMA has changed in our back during + * a speculative page fault handling. */ -static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, - pte_t *page_table, pte_t orig_pte) +static inline int pte_unmap_same(struct vm_fault *vmf) { - int same = 1; + int ret = 0; + #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) if (sizeof(pte_t) > sizeof(unsigned long)) { - spinlock_t *ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); - same = pte_same(*page_table, orig_pte); - spin_unlock(ptl); + if (pte_spinlock(vmf)) { + if (!pte_same(*vmf->pte, vmf->orig_pte)) + ret = VM_FAULT_PTNOTSAME; + spin_unlock(vmf->ptl); + } else + ret = VM_FAULT_RETRY; } #endif - pte_unmap(page_table); - return same; + pte_unmap(vmf->pte); + return ret; } static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) @@ -2426,7 +2585,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2459,25 +2618,26 @@ static int wp_page_copy(struct vm_fault *vmf) const unsigned long mmun_start = vmf->address & PAGE_MASK; const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; + int ret = VM_FAULT_OOM; if (unlikely(anon_vma_prepare(vma))) - goto oom; + goto out; if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, vmf->address); if (!new_page) - goto oom; + goto out; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) - goto oom; + goto out; cow_user_page(new_page, old_page, vmf->address, vma); } if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) - goto oom_free_new; + goto out_free_new; __SetPageUptodate(new_page); @@ -2486,7 +2646,10 @@ static int wp_page_copy(struct vm_fault *vmf) /* * Re-check the pte - we dropped the lock */ - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + goto out_uncharge; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { @@ -2498,8 +2661,8 @@ static int wp_page_copy(struct vm_fault *vmf) inc_mm_counter_fast(mm, MM_ANONPAGES); } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); - entry = mk_pte(new_page, vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = mk_pte(new_page, vmf->vma_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition @@ -2507,9 +2670,9 @@ static int wp_page_copy(struct vm_fault *vmf) * thread doing COW. */ ptep_clear_flush_notify(vma, vmf->address, vmf->pte); - page_add_new_anon_rmap(new_page, vma, vmf->address, false); + __page_add_new_anon_rmap(new_page, vma, vmf->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); - lru_cache_add_active_or_unevictable(new_page, vma); + __lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the @@ -2560,7 +2723,7 @@ static int wp_page_copy(struct vm_fault *vmf) * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ - if (page_copied && (vma->vm_flags & VM_LOCKED)) { + if (page_copied && (vmf->vma_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ if (PageMlocked(old_page)) munlock_vma_page(old_page); @@ -2569,12 +2732,14 @@ static int wp_page_copy(struct vm_fault *vmf) put_page(old_page); } return page_copied ? VM_FAULT_WRITE : 0; -oom_free_new: +out_uncharge: + mem_cgroup_cancel_charge(new_page, memcg, false); +out_free_new: put_page(new_page); -oom: +out: if (old_page) put_page(old_page); - return VM_FAULT_OOM; + return ret; } /** @@ -2594,9 +2759,9 @@ static int wp_page_copy(struct vm_fault *vmf) */ int finish_mkwrite_fault(struct vm_fault *vmf) { - WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED)); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -2687,7 +2852,8 @@ static int do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false, + vmf->vma_flags); if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -2696,7 +2862,7 @@ static int do_wp_page(struct vm_fault *vmf) * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ - if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) return wp_pfn_shared(vmf); @@ -2714,8 +2880,11 @@ static int do_wp_page(struct vm_fault *vmf) get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + unlock_page(vmf->page); + put_page(vmf->page); + return VM_FAULT_RETRY; + } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2740,7 +2909,7 @@ static int do_wp_page(struct vm_fault *vmf) return VM_FAULT_WRITE; } unlock_page(vmf->page); - } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(vmf); } @@ -2848,10 +3017,19 @@ int do_swap_page(struct vm_fault *vmf) pte_t pte; int locked; int exclusive = 0; - int ret = 0; + int ret; - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) + ret = pte_unmap_same(vmf); + if (ret) { + /* + * If pte != orig_pte, this means another thread did the + * swap operation in our back. + * So nothing else to do. + */ + if (ret == VM_FAULT_PTNOTSAME) + ret = 0; goto out; + } entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { @@ -2903,11 +3081,16 @@ int do_swap_page(struct vm_fault *vmf) if (!page) { /* - * Back out if somebody else faulted in this pte - * while we released the pte lock. + * Back out if the VMA has changed in our back during + * a speculative page fault or if somebody else + * faulted in this pte while we released the pte lock. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + ret = VM_FAULT_RETRY; + goto out; + } + if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); @@ -2960,10 +3143,13 @@ int do_swap_page(struct vm_fault *vmf) } /* - * Back out if somebody else already faulted in this pte. + * Back out if the VMA has changed in our back during a speculative + * page fault or if somebody else already faulted in this pte. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + goto out_cancel_cgroup; + } if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; @@ -2984,9 +3170,9 @@ int do_swap_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); - pte = mk_pte(page, vma->vm_page_prot); + pte = mk_pte(page, vmf->vma_page_prot); if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { - pte = maybe_mkwrite(pte_mkdirty(pte), vma); + pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags); vmf->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; @@ -2999,9 +3185,9 @@ int do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(page != swapcache && swapcache)) { - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); @@ -3010,7 +3196,7 @@ int do_swap_page(struct vm_fault *vmf) swap_free(entry); if (mem_cgroup_swap_full(page) || - (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + (vmf->vma_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (page != swapcache && swapcache) { @@ -3040,8 +3226,9 @@ int do_swap_page(struct vm_fault *vmf) out: return ret; out_nomap: - mem_cgroup_cancel_charge(page, memcg, false); pte_unmap_unlock(vmf->pte, vmf->ptl); +out_cancel_cgroup: + mem_cgroup_cancel_charge(page, memcg, false); out_page: unlock_page(page); out_release: @@ -3067,7 +3254,7 @@ static int do_anonymous_page(struct vm_fault *vmf) pte_t entry; /* File mapping without ->vm_ops ? */ - if (vma->vm_flags & VM_SHARED) + if (vmf->vma_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* @@ -3091,14 +3278,22 @@ static int do_anonymous_page(struct vm_fault *vmf) if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), - vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + vmf->vma_page_prot)); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; if (!pte_none(*vmf->pte)) goto unlock; ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock; + /* + * Don't call the userfaultfd during the speculative path. + * We already checked for the VMA to not be managed through + * userfaultfd, but it may be set in our back once we have lock + * the pte. In such a case we can ignore it this time. + */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto setpte; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -3124,21 +3319,24 @@ static int do_anonymous_page(struct vm_fault *vmf) */ __SetPageUptodate(page); - entry = mk_pte(page, vma->vm_page_prot); - if (vma->vm_flags & VM_WRITE) + entry = mk_pte(page, vmf->vma_page_prot); + if (vmf->vma_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); - if (!pte_none(*vmf->pte)) + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; goto release; + } + if (!pte_none(*vmf->pte)) + goto unlock_and_release; ret = check_stable_address_space(vma->vm_mm); if (ret) - goto release; + goto unlock_and_release; /* Deliver the page fault to userland, check inside PT lock */ - if (userfaultfd_missing(vma)) { + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && + userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); mem_cgroup_cancel_charge(page, memcg, false); put_page(page); @@ -3146,9 +3344,9 @@ static int do_anonymous_page(struct vm_fault *vmf) } inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); setpte: set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); @@ -3157,10 +3355,12 @@ static int do_anonymous_page(struct vm_fault *vmf) unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; +unlock_and_release: + pte_unmap_unlock(vmf->pte, vmf->ptl); release: mem_cgroup_cancel_charge(page, memcg, false); put_page(page); - goto unlock; + return ret; oom_free_page: put_page(page); oom: @@ -3253,8 +3453,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf) * pte_none() under vmf->ptl protection when we return to * alloc_set_pte(). */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; + return 0; } @@ -3317,7 +3518,7 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page) for (i = 0; i < HPAGE_PMD_NR; i++) flush_icache_page(vma, page + i); - entry = mk_huge_pmd(page, vma->vm_page_prot); + entry = mk_huge_pmd(page, vmf->vma_page_prot); if (write) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); @@ -3391,15 +3592,19 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, return VM_FAULT_NOPAGE; flush_icache_page(vma, page); - entry = mk_pte(page, vma->vm_page_prot); + entry = mk_pte(page, vmf->vma_page_prot); if (write) - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); + + if (vmf->flags & FAULT_FLAG_PREFAULT_OLD) + entry = pte_mkold(entry); + /* copy-on-write page */ - if (write && !(vma->vm_flags & VM_SHARED)) { + if (write && !(vmf->vma_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); @@ -3434,7 +3639,7 @@ int finish_fault(struct vm_fault *vmf) /* Did we COW the page? */ if ((vmf->flags & FAULT_FLAG_WRITE) && - !(vmf->vma->vm_flags & VM_SHARED)) + !(vmf->vma_flags & VM_SHARED)) page = vmf->cow_page; else page = vmf->page; @@ -3452,8 +3657,16 @@ int finish_fault(struct vm_fault *vmf) return ret; } +/* + * If architecture emulates "accessed" or "young" bit without HW support, + * there is no much gain with fault_around. + */ static unsigned long fault_around_bytes __read_mostly = - rounddown_pow_of_two(4096); +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS + PAGE_SIZE; +#else + rounddown_pow_of_two(65536); +#endif #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) @@ -3688,7 +3901,7 @@ static int do_fault(struct vm_fault *vmf) ret = VM_FAULT_SIGBUS; else if (!(vmf->flags & FAULT_FLAG_WRITE)) ret = do_read_fault(vmf); - else if (!(vma->vm_flags & VM_SHARED)) + else if (!(vmf->vma_flags & VM_SHARED)) ret = do_cow_fault(vmf); else ret = do_shared_fault(vmf); @@ -3733,8 +3946,8 @@ static int do_numa_page(struct vm_fault *vmf) * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. */ - vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); + if (!pte_spinlock(vmf)) + return VM_FAULT_RETRY; if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; @@ -3745,14 +3958,14 @@ static int do_numa_page(struct vm_fault *vmf) * accessible ptes, some can allow access by kernel mode. */ pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte); - pte = pte_modify(pte, vma->vm_page_prot); + pte = pte_modify(pte, vmf->vma_page_prot); pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte); update_mmu_cache(vma, vmf->address, vmf->pte); - page = vm_normal_page(vma, vmf->address, pte); + page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags); if (!page) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; @@ -3779,7 +3992,7 @@ static int do_numa_page(struct vm_fault *vmf) * Flag if the page is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) + if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED)) flags |= TNF_SHARED; last_cpupid = page_cpupid_last(page); @@ -3793,7 +4006,7 @@ static int do_numa_page(struct vm_fault *vmf) } /* Migrate to the requested node */ - migrated = migrate_misplaced_page(page, vma, target_nid); + migrated = migrate_misplaced_page(page, vmf, target_nid); if (migrated) { page_nid = target_nid; flags |= TNF_MIGRATED; @@ -3823,7 +4036,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); /* COW handled on pte level: split pmd */ - VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); + VM_BUG_ON_VMA(vmf->vma_flags & VM_SHARED, vmf->vma); __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; @@ -3878,6 +4091,15 @@ static int handle_pte_fault(struct vm_fault *vmf) pte_t entry; if (unlikely(pmd_none(*vmf->pmd))) { + /* + * In the case of the speculative page fault handler we abort + * the speculative path immediately as the pmd is probably + * in the way to be converted in a huge one. We will try + * again holding the mmap_sem (which implies that the collapse + * operation is done). + */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table @@ -3885,7 +4107,7 @@ static int handle_pte_fault(struct vm_fault *vmf) * concurrent faults and from rmap lookups. */ vmf->pte = NULL; - } else { + } else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { /* See comment in pte_alloc_one_map() */ if (pmd_devmap_trans_unstable(vmf->pmd)) return 0; @@ -3894,6 +4116,9 @@ static int handle_pte_fault(struct vm_fault *vmf) * pmd from under us anymore at this point because we hold the * mmap_sem read mode and khugepaged takes it in write mode. * So now it's safe to run pte_offset_map(). + * This is not applicable to the speculative page fault handler + * but in that case, the pte is fetched earlier in + * handle_speculative_fault(). */ vmf->pte = pte_offset_map(vmf->pmd, vmf->address); vmf->orig_pte = *vmf->pte; @@ -3916,6 +4141,8 @@ static int handle_pte_fault(struct vm_fault *vmf) if (!vmf->pte) { if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf); + else if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; else return do_fault(vmf); } @@ -3926,8 +4153,8 @@ static int handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); + if (!pte_spinlock(vmf)) + return VM_FAULT_RETRY; entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) goto unlock; @@ -3970,6 +4197,8 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, }; unsigned int dirty = flags & FAULT_FLAG_WRITE; struct mm_struct *mm = vma->vm_mm; @@ -4011,6 +4240,9 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd) return VM_FAULT_OOM; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + vmf.sequence = raw_read_seqcount(&vma->vm_sequence); +#endif if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) @@ -4044,6 +4276,260 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, return handle_pte_fault(&vmf); } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + +#ifndef __HAVE_ARCH_PTE_SPECIAL +/* This is required by vm_normal_page() */ +#error "Speculative page fault handler requires __HAVE_ARCH_PTE_SPECIAL" +#endif +/* + * vm_normal_page() adds some processing which should be done while + * hodling the mmap_sem. + */ + +/* + * Tries to handle the page fault in a speculative way, without grabbing the + * mmap_sem. + * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must + * be checked later when the mmap_sem has been grabbed by calling + * can_reuse_spf_vma(). + * This is needed as the returned vma is kept in memory until the call to + * can_reuse_spf_vma() is made. + */ +int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, + unsigned int flags, struct vm_area_struct **vma) +{ + struct vm_fault vmf = { + .address = address, + }; + pgd_t *pgd, pgdval; + p4d_t *p4d, p4dval; + pud_t pudval; + int seq, ret; + + /* Clear flags that may lead to release the mmap_sem to retry */ + flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE); + flags |= FAULT_FLAG_SPECULATIVE; + + *vma = get_vma(mm, address); + if (!*vma) + return VM_FAULT_RETRY; + vmf.vma = *vma; + + /* rmb <-> seqlock,vma_rb_erase() */ + seq = raw_read_seqcount(&vmf.vma->vm_sequence); + if (seq & 1) { + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + /* + * Can't call vm_ops service has we don't know what they would do + * with the VMA. + * This include huge page from hugetlbfs. + */ + if (vmf.vma->vm_ops) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + /* + * __anon_vma_prepare() requires the mmap_sem to be held + * because vm_next and vm_prev must be safe. This can't be guaranteed + * in the speculative path. + */ + if (unlikely(!vmf.vma->anon_vma)) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags); + vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot); + + /* Can't call userland page fault handler in the speculative path */ + if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) { + /* + * This could be detected by the check address against VMA's + * boundaries but we want to trace it as not supported instead + * of changed. + */ + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + if (address < READ_ONCE(vmf.vma->vm_start) + || READ_ONCE(vmf.vma->vm_end) <= address) { + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE, + flags & FAULT_FLAG_INSTRUCTION, + flags & FAULT_FLAG_REMOTE)) + goto out_segv; + + /* This is one is required to check that the VMA has write access set */ + if (flags & FAULT_FLAG_WRITE) { + if (unlikely(!(vmf.vma_flags & VM_WRITE))) + goto out_segv; + } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) + goto out_segv; + +#ifdef CONFIG_NUMA + struct mempolicy *pol; + + /* + * MPOL_INTERLEAVE implies additional checks in + * mpol_misplaced() which are not compatible with the + *speculative page fault processing. + */ + pol = __get_vma_policy(vmf.vma, address); + if (!pol) + pol = get_task_policy(current); + if (!pol) + if (pol && pol->mode == MPOL_INTERLEAVE) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } +#endif + + /* + * Do a speculative lookup of the PTE entry. + */ + local_irq_disable(); + pgd = pgd_offset(mm, address); + pgdval = READ_ONCE(*pgd); + if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) + goto out_walk; + + p4d = p4d_offset(pgd, address); + p4dval = READ_ONCE(*p4d); + if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) + goto out_walk; + + vmf.pud = pud_offset(p4d, address); + pudval = READ_ONCE(*vmf.pud); + if (pud_none(pudval) || unlikely(pud_bad(pudval))) + goto out_walk; + + /* Huge pages at PUD level are not supported. */ + if (unlikely(pud_trans_huge(pudval))) + goto out_walk; + + vmf.pmd = pmd_offset(vmf.pud, address); + vmf.orig_pmd = READ_ONCE(*vmf.pmd); + /* + * pmd_none could mean that a hugepage collapse is in progress + * in our back as collapse_huge_page() mark it before + * invalidating the pte (which is done once the IPI is catched + * by all CPU and we have interrupt disabled). + * For this reason we cannot handle THP in a speculative way since we + * can't safely indentify an in progress collapse operation done in our + * back on that PMD. + * Regarding the order of the following checks, see comment in + * pmd_devmap_trans_unstable() + */ + if (unlikely(pmd_devmap(vmf.orig_pmd) || + pmd_none(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) || + is_swap_pmd(vmf.orig_pmd))) + goto out_walk; + + /* + * The above does not allocate/instantiate page-tables because doing so + * would lead to the possibility of instantiating page-tables after + * free_pgtables() -- and consequently leaking them. + * + * The result is that we take at least one !speculative fault per PMD + * in order to instantiate it. + */ + + vmf.pte = pte_offset_map(vmf.pmd, address); + vmf.orig_pte = READ_ONCE(*vmf.pte); + barrier(); /* See comment in handle_pte_fault() */ + if (pte_none(vmf.orig_pte)) { + pte_unmap(vmf.pte); + vmf.pte = NULL; + } + + vmf.pgoff = linear_page_index(vmf.vma, address); + vmf.gfp_mask = __get_fault_gfp_mask(vmf.vma); + vmf.sequence = seq; + vmf.flags = flags; + + local_irq_enable(); + + /* + * We need to re-validate the VMA after checking the bounds, otherwise + * we might have a false positive on the bounds. + */ + if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) { + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; + } + + mem_cgroup_oom_enable(); + ret = handle_pte_fault(&vmf); + mem_cgroup_oom_disable(); + + /* + * If there is no need to retry, don't return the vma to the caller. + */ + if (ret != VM_FAULT_RETRY) { + count_vm_event(SPECULATIVE_PGFAULT); + put_vma(vmf.vma); + *vma = NULL; + } + + /* + * The task may have entered a memcg OOM situation but + * if the allocation error was handled gracefully (no + * VM_FAULT_OOM), there is no need to kill anything. + * Just clean up the OOM state peacefully. + */ + if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) + mem_cgroup_oom_synchronize(false); + return ret; + +out_walk: + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + local_irq_enable(); + return VM_FAULT_RETRY; + +out_segv: + trace_spf_vma_access(_RET_IP_, vmf.vma, address); + /* + * We don't return VM_FAULT_RETRY so the caller is not expected to + * retrieve the fetched VMA. + */ + put_vma(vmf.vma); + *vma = NULL; + return VM_FAULT_SIGSEGV; +} + +/* + * This is used to know if the vma fetch in the speculative page fault handler + * is still valid when trying the regular fault path while holding the + * mmap_sem. + * The call to put_vma(vma) must be made after checking the vma's fields, as + * the vma may be freed by put_vma(). In such a case it is expected that false + * is returned. + */ +bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address) +{ + bool ret; + + ret = !RB_EMPTY_NODE(&vma->vm_rb) && + vma->vm_start <= address && address < vma->vm_end; + put_vma(vma); + return ret; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* * By the time we get here, we already hold the mm semaphore * diff --git a/mm/mempolicy.c b/mm/mempolicy.c index aa169bfa4dd1ce117deedb6e2ae529113cf86537..024b91102e3a4c3501a7c29925e13fecb1d6f152 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -379,8 +379,11 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) struct vm_area_struct *vma; down_write(&mm->mmap_sem); - for (vma = mm->mmap; vma; vma = vma->vm_next) + for (vma = mm->mmap; vma; vma = vma->vm_next) { + vm_write_begin(vma); mpol_rebind_policy(vma->vm_policy, new); + vm_write_end(vma); + } up_write(&mm->mmap_sem); } @@ -578,9 +581,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, { int nr_updated; + vm_write_begin(vma); nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); + vm_write_end(vma); return nr_updated; } @@ -681,6 +686,7 @@ static int vma_replace_policy(struct vm_area_struct *vma, if (IS_ERR(new)) return PTR_ERR(new); + vm_write_begin(vma); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) @@ -688,11 +694,17 @@ static int vma_replace_policy(struct vm_area_struct *vma, } old = vma->vm_policy; - vma->vm_policy = new; /* protected by mmap_sem */ + /* + * The speculative page fault handler accesses this field without + * hodling the mmap_sem. + */ + WRITE_ONCE(vma->vm_policy, new); + vm_write_end(vma); mpol_put(old); return 0; err_out: + vm_write_end(vma); mpol_put(new); return err; } @@ -1584,23 +1596,28 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = NULL; + struct mempolicy *pol; - if (vma) { - if (vma->vm_ops && vma->vm_ops->get_policy) { - pol = vma->vm_ops->get_policy(vma, addr); - } else if (vma->vm_policy) { - pol = vma->vm_policy; + if (!vma) + return NULL; - /* - * shmem_alloc_page() passes MPOL_F_SHARED policy with - * a pseudo vma whose vma->vm_ops=NULL. Take a reference - * count on these policies which will be dropped by - * mpol_cond_put() later - */ - if (mpol_needs_cond_ref(pol)) - mpol_get(pol); - } + if (vma->vm_ops && vma->vm_ops->get_policy) + return vma->vm_ops->get_policy(vma, addr); + + /* + * This could be called without holding the mmap_sem in the + * speculative page fault handler's path. + */ + pol = READ_ONCE(vma->vm_policy); + if (pol) { + /* + * shmem_alloc_page() passes MPOL_F_SHARED policy with + * a pseudo vma whose vma->vm_ops=NULL. Take a reference + * count on these policies which will be dropped by + * mpol_cond_put() later + */ + if (mpol_needs_cond_ref(pol)) + mpol_get(pol); } return pol; diff --git a/mm/migrate.c b/mm/migrate.c index 1236449b4777be3cf61e8cd68616c2c3faa5fd9f..0ff14c3825d38e4cf774e2be6735613291d7d514 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -239,7 +239,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, */ entry = pte_to_swp_entry(*pvmw.pte); if (is_write_migration_entry(entry)) - pte = maybe_mkwrite(pte, vma); + pte = maybe_mkwrite(pte, vma->vm_flags); if (unlikely(is_zone_device_page(new))) { if (is_device_private_page(new)) { @@ -1080,7 +1080,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, page); try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL); page_was_mapped = 1; } @@ -1305,7 +1305,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, if (page_mapped(hpage)) { try_to_unmap(hpage, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL); page_was_mapped = 1; } @@ -1940,7 +1940,7 @@ bool pmd_trans_migrating(pmd_t pmd) * node. Caller is expected to have an elevated reference count on * the page that will be dropped by this function before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_page(struct page *page, struct vm_fault *vmf, int node) { pg_data_t *pgdat = NODE_DATA(node); @@ -1953,7 +1953,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, * with execute permissions as they are probably shared libraries. */ if (page_mapcount(page) != 1 && page_is_file_cache(page) && - (vma->vm_flags & VM_EXEC)) + (vmf->vma_flags & VM_EXEC)) goto out; /* diff --git a/mm/mlock.c b/mm/mlock.c index 658ad5562dcaf5c3a632e49b4975f50753500937..c8ea725535b048e47807d02ac3a2350286465c91 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -439,7 +439,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); + vm_write_end(vma); while (start < end) { struct page *page; @@ -562,10 +564,11 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ - - if (lock) - vma->vm_flags = newflags; - else + if (lock) { + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, newflags); + vm_write_end(vma); + } else munlock_vma_pages_range(vma, start, end); out: diff --git a/mm/mmap.c b/mm/mmap.c index cb5a0cc3ff13a1ed29071cbb1963cac21f862ee6..c6909a428d0158df9483165e2465a6cd5fa737fd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -160,6 +160,27 @@ void unlink_file_vma(struct vm_area_struct *vma) } } +static void __free_vma(struct vm_area_struct *vma) +{ + if (vma->vm_file) + fput(vma->vm_file); + mpol_put(vma_policy(vma)); + kmem_cache_free(vm_area_cachep, vma); +} + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +void put_vma(struct vm_area_struct *vma) +{ + if (atomic_dec_and_test(&vma->vm_ref_count)) + __free_vma(vma); +} +#else +static inline void put_vma(struct vm_area_struct *vma) +{ + __free_vma(vma); +} +#endif + /* * Close a vm structure and free it, returning the next. */ @@ -170,10 +191,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (vma->vm_file) - fput(vma->vm_file); - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); + put_vma(vma); return next; } @@ -393,6 +411,14 @@ static void validate_mm(struct mm_struct *mm) #define validate_mm(mm) do { } while (0) #endif +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +#define mm_rb_write_lock(mm) write_lock(&(mm)->mm_rb_lock) +#define mm_rb_write_unlock(mm) write_unlock(&(mm)->mm_rb_lock) +#else +#define mm_rb_write_lock(mm) do { } while (0) +#define mm_rb_write_unlock(mm) do { } while (0) +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) @@ -411,26 +437,37 @@ static void vma_gap_update(struct vm_area_struct *vma) } static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } -static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) +static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ + mm_rb_write_lock(mm); rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); + mm_rb_write_unlock(mm); /* wmb */ + + /* + * Ensure the removal is complete before clearing the node. + * Matched by vma_has_changed()/handle_speculative_fault(). + */ + RB_CLEAR_NODE(&vma->vm_rb); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, - struct rb_root *root, + struct mm_struct *mm, struct vm_area_struct *ignore) { /* @@ -438,21 +475,21 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ - validate_mm_rb(root, ignore); + validate_mm_rb(&mm->mm_rb, ignore); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ - validate_mm_rb(root, vma); + validate_mm_rb(&mm->mm_rb, vma); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } /* @@ -567,10 +604,12 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ + mm_rb_write_lock(mm); rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); + vma_rb_insert(vma, mm); + mm_rb_write_unlock(mm); } static void __vma_link_file(struct vm_area_struct *vma) @@ -646,7 +685,7 @@ static __always_inline void __vma_unlink_common(struct mm_struct *mm, { struct vm_area_struct *next; - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); + vma_rb_erase_ignore(vma, mm, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; @@ -680,7 +719,7 @@ static inline void __vma_unlink_prev(struct mm_struct *mm, */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand) + struct vm_area_struct *expand, bool keep_locked) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; @@ -692,6 +731,30 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, long adjust_next = 0; int remove_next = 0; + /* + * Why using vm_raw_write*() functions here to avoid lockdep's warning ? + * + * Locked is complaining about a theoretical lock dependency, involving + * 3 locks: + * mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim + * + * Here are the major path leading to this dependency : + * 1. __vma_adjust() mmap_sem -> vm_sequence -> i_mmap_rwsem + * 2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim + * 3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem + * 4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence + * + * So there is no way to solve this easily, especially because in + * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted + * VMAs are not yet known. + * However, the way the vm_seq is used is guarantying that we will + * never block on it since we just check for its value and never wait + * for it to move, see vma_has_changed() and handle_speculative_fault(). + */ + vm_raw_write_begin(vma); + if (next) + vm_raw_write_begin(next); + if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -772,8 +835,12 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) + if (error) { + if (next && next != vma) + vm_raw_write_end(next); + vm_raw_write_end(vma); return error; + } } } again: @@ -819,17 +886,18 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (start != vma->vm_start) { - vma->vm_start = start; + WRITE_ONCE(vma->vm_start, start); start_changed = true; } if (end != vma->vm_end) { - vma->vm_end = end; + WRITE_ONCE(vma->vm_end, end); end_changed = true; } - vma->vm_pgoff = pgoff; + WRITE_ONCE(vma->vm_pgoff, pgoff); if (adjust_next) { - next->vm_start += adjust_next << PAGE_SHIFT; - next->vm_pgoff += adjust_next; + WRITE_ONCE(next->vm_start, + next->vm_start + (adjust_next << PAGE_SHIFT)); + WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next); } if (root) { @@ -894,15 +962,13 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (remove_next) { - if (file) { + if (file) uprobe_munmap(next, next->vm_start, next->vm_end); - fput(file); - } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; - mpol_put(vma_policy(next)); - kmem_cache_free(vm_area_cachep, next); + vm_raw_write_end(next); + put_vma(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter @@ -916,6 +982,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * "vma->vm_next" gap must be updated. */ next = vma->vm_next; + if (next) + vm_raw_write_begin(next); } else { /* * For the scope of the comment "next" and @@ -962,6 +1030,11 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (insert && file) uprobe_mmap(insert); + if (next && next != vma) + vm_raw_write_end(next); + if (!keep_locked) + vm_raw_write_end(vma); + validate_mm(mm); return 0; @@ -1101,13 +1174,13 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ -struct vm_area_struct *vma_merge(struct mm_struct *mm, +struct vm_area_struct *__vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char __user *anon_name) + const char __user *anon_name, bool keep_locked) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; @@ -1157,10 +1230,11 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, - prev); + prev, keep_locked); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL, prev); + end, prev->vm_pgoff, NULL, prev, + keep_locked); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); @@ -1178,10 +1252,12 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, anon_name)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL, next); + addr, prev->vm_pgoff, NULL, next, + keep_locked); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, - next->vm_pgoff - pglen, NULL, next); + next->vm_pgoff - pglen, NULL, next, + keep_locked); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has @@ -1324,6 +1400,35 @@ static inline int mlock_future_check(struct mm_struct *mm, return 0; } +static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) +{ + if (S_ISREG(inode->i_mode)) + return MAX_LFS_FILESIZE; + + if (S_ISBLK(inode->i_mode)) + return MAX_LFS_FILESIZE; + + /* Special "we do even unsigned file positions" case */ + if (file->f_mode & FMODE_UNSIGNED_OFFSET) + return 0; + + /* Yes, random drivers might want more. But I'm tired of buggy drivers */ + return ULONG_MAX; +} + +static inline bool file_mmap_ok(struct file *file, struct inode *inode, + unsigned long pgoff, unsigned long len) +{ + u64 maxsize = file_mmap_size_max(file, inode); + + if (maxsize && len > maxsize) + return false; + maxsize -= len; + if (pgoff > maxsize >> PAGE_SHIFT) + return false; + return true; +} + /* * The caller must hold down_write(¤t->mm->mmap_sem). */ @@ -1397,6 +1502,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (file) { struct inode *inode = file_inode(file); + if (!file_mmap_ok(file, inode, pgoff, len)) + return -EOVERFLOW; + switch (flags & MAP_TYPE) { case MAP_SHARED: if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) @@ -1676,7 +1784,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); if (file) { if (vm_flags & VM_DENYWRITE) { @@ -1729,13 +1837,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr, out: perf_event_mmap(vma); + vm_write_begin(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))) mm->locked_vm += (len >> PAGE_SHIFT); else - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & VM_LOCKED_CLEAR_MASK); } if (file) @@ -1748,9 +1858,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, * then new mapped in-place (which must be aimed as * a completely new data area). */ - vma->vm_flags |= VM_SOFTDIRTY; + WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY); vma_set_page_prot(vma); + vm_write_end(vma); return addr; @@ -2122,15 +2233,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ -struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +static struct vm_area_struct *__find_vma(struct mm_struct *mm, + unsigned long addr) { struct rb_node *rb_node; - struct vm_area_struct *vma; - - /* Check the cache first. */ - vma = vmacache_find(mm, addr); - if (likely(vma)) - return vma; + struct vm_area_struct *vma = NULL; rb_node = mm->mm_rb.rb_node; @@ -2148,13 +2255,40 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) rb_node = rb_node->rb_right; } + return vma; +} + +struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma; + + /* Check the cache first. */ + vma = vmacache_find(mm, addr); + if (likely(vma)) + return vma; + + vma = __find_vma(mm, addr); if (vma) vmacache_update(addr, vma); return vma; } - EXPORT_SYMBOL(find_vma); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = NULL; + + read_lock(&mm->mm_rb_lock); + vma = __find_vma(mm, addr); + if (vma) + atomic_inc(&vma->vm_ref_count); + read_unlock(&mm->mm_rb_lock); + + return vma; +} +#endif + /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ @@ -2379,8 +2513,8 @@ int expand_downwards(struct vm_area_struct *vma, mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; + WRITE_ONCE(vma->vm_start, address); + WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow); anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); @@ -2522,7 +2656,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { - vma_rb_erase(vma, &mm->mm_rb); + vma_rb_erase(vma, mm); mm->map_count--; tail_vma = vma; vma = vma->vm_next; @@ -2562,7 +2696,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, /* most fields are the same, copy all, and then fixup */ *new = *vma; - INIT_LIST_HEAD(&new->anon_vma_chain); + INIT_VMA(new); if (new_below) new->vm_end = addr; @@ -2933,7 +3067,7 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long return -ENOMEM; } - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; @@ -3117,9 +3251,21 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ - new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); + + /* There is 3 cases to manage here in + * AAAA AAAA AAAA AAAA + * PPPP.... PPPP......NNNN PPPP....NNNN PP........NN + * PPPPPPPP(A) PPPP..NNNNNNNN(B) PPPPPPPPPPPP(1) NULL + * PPPPPPPPNNNN(2) + * PPPPNNNNNNNN(3) + * + * new_vma == prev in case A,1,2 + * new_vma == next in case B,3 + */ + new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags, + vma->anon_vma, vma->vm_file, pgoff, + vma_policy(vma), vma->vm_userfaultfd_ctx, + vma_get_anon_name(vma), true); if (new_vma) { /* * Source vma may have been merged into new_vma @@ -3152,13 +3298,22 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; - INIT_LIST_HEAD(&new_vma->anon_vma_chain); + INIT_VMA(new_vma); if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); + /* + * As the VMA is linked right now, it may be hit by the + * speculative page fault handler. But we don't want it to + * to start mapping page in this area until the caller has + * potentially move the pte from the moved VMA. To prevent + * that we protect it right now, and let the caller unprotect + * it once the move is done. + */ + vm_raw_write_begin(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } @@ -3293,7 +3448,7 @@ static struct vm_area_struct *__install_special_mapping( if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; diff --git a/mm/mprotect.c b/mm/mprotect.c index fc969367ef604ae59ac928ade450efaac6335561..44d3a9c8f0e7866ff7b69a9fa3ee13371c59c016 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -361,12 +361,14 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ - vma->vm_flags = newflags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, newflags); dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); vma_set_page_prot(vma); change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); + vm_write_end(vma); /* * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major diff --git a/mm/mremap.c b/mm/mremap.c index 049470aa1e3eefc88407e9a35f1ca252fd01912d..8ed1a1d6eaedfd461d987d8070d5c6df7a988fbe 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -302,6 +302,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (!new_vma) return -ENOMEM; + /* new_vma is returned protected by copy_vma, to prevent speculative + * page fault to be done in the destination area before we move the pte. + * Now, we must also protect the source VMA since we don't want pages + * to be mapped in our back while we are copying the PTEs. + */ + if (vma != new_vma) + vm_raw_write_begin(vma); + moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { @@ -318,6 +326,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); + if (vma != new_vma) + vm_raw_write_end(vma); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -326,7 +336,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, mremap_userfaultfd_prep(new_vma, uf); arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); + if (vma != new_vma) + vm_raw_write_end(vma); } + vm_raw_write_end(new_vma); /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { diff --git a/mm/nommu.c b/mm/nommu.c index 17c00d93de2e4973278845e98554ce2040445f65..040bde3ef3f782ddde0fd184df13d6e9aa21a1dd 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1244,7 +1244,7 @@ unsigned long do_mmap(struct file *file, region->vm_flags = vm_flags; region->vm_pgoff = pgoff; - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); vma->vm_flags = vm_flags; vma->vm_pgoff = pgoff; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d69ed837fb89b180d25e3f1568155a37d70cda83..96ec709430a81b59d88886617e3212dcc0b0363b 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include "internal.h" @@ -206,13 +207,6 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); task_unlock(p); - /* - * Root processes get 3% bonus, just like the __vm_enough_memory() - * implementation used by LSMs. - */ - if (has_capability_noaudit(p, CAP_SYS_ADMIN)) - points -= (points * 3) / 100; - /* Normalize to oom_score_adj units */ adj *= totalpages / 1000; points += adj; @@ -424,8 +418,11 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) dump_stack(); if (oc->memcg) mem_cgroup_print_oom_info(oc->memcg, p); - else + else { show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); + show_mem_call_notifiers(); + } + if (sysctl_oom_dump_tasks) dump_tasks(oc->memcg, oc->nodemask); } @@ -625,7 +622,7 @@ static int oom_reaper(void *unused) return 0; } -static void wake_oom_reaper(struct task_struct *tsk) +void wake_oom_reaper(struct task_struct *tsk) { if (!oom_reaper_th) return; @@ -1128,8 +1125,35 @@ void pagefault_out_of_memory(void) mutex_unlock(&oom_lock); } +/* Call this function with task_lock being held as we're accessing ->mm */ +void dump_killed_info(struct task_struct *selected) +{ + int selected_tasksize = get_mm_rss(selected->mm); + + pr_info("Killing '%s' (%d), adj %hd,\n" + " to free %ldkB on behalf of '%s' (%d)\n" + " Free CMA is %ldkB\n" + " Total reserve is %ldkB\n" + " Total free pages is %ldkB\n" + " Total file cache is %ldkB\n", + selected->comm, selected->pid, + selected->signal->oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + global_zone_page_state(NR_FREE_CMA_PAGES) * + (long)(PAGE_SIZE / 1024), + totalreserve_pages * (long)(PAGE_SIZE / 1024), + global_zone_page_state(NR_FREE_PAGES) * + (long)(PAGE_SIZE / 1024), + global_node_page_state(NR_FILE_PAGES) * + (long)(PAGE_SIZE / 1024)); +} + void add_to_oom_reaper(struct task_struct *p) { + static DEFINE_RATELIMIT_STATE(reaper_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + if (!sysctl_reap_mem_on_sigkill) return; @@ -1142,6 +1166,16 @@ void add_to_oom_reaper(struct task_struct *p) __mark_oom_victim(p); wake_oom_reaper(p); } + + dump_killed_info(p); task_unlock(p); + + if (__ratelimit(&reaper_rs) && p->signal->oom_score_adj == 0) { + show_mem(SHOW_MEM_FILTER_NODES, NULL); + show_mem_call_notifiers(); + if (sysctl_oom_dump_tasks) + dump_tasks(NULL, NULL); + } + put_task_struct(p); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5f67c3902a32d98887d03d2ea7e3dbe5180af34a..001b2eff4e9ce95114eaa865ba2ab790b4c096ec 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -1854,6 +1855,11 @@ static int fallbacks[MIGRATE_TYPES][4] = { #endif }; +int *get_migratetype_fallbacks(int mtype) +{ + return fallbacks[mtype]; +} + #ifdef CONFIG_CMA static struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) @@ -3229,6 +3235,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) filter &= ~SHOW_MEM_FILTER_NODES; show_mem(filter, nodemask); + show_mem_call_notifiers(); } void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) @@ -3424,6 +3431,46 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, return NULL; } +#ifdef CONFIG_HAVE_LOW_MEMORY_KILLER +static inline bool +should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags) +{ + struct zone *zone; + struct zoneref *z; + + /* Let costly order requests check for compaction progress */ + if (order > PAGE_ALLOC_COSTLY_ORDER) + return false; + + /* + * For (0 < order < PAGE_ALLOC_COSTLY_ORDER) allow the shrinkers + * to run and free up memory. Do not let these allocations fail + * if shrinkers can free up memory. This is similar to + * should_compact_retry implementation for !CONFIG_COMPACTION. + */ + for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, + ac->high_zoneidx, ac->nodemask) { + unsigned long available; + + available = zone_reclaimable_pages(zone); + available += + zone_page_state_snapshot(zone, NR_FREE_PAGES); + + if (__zone_watermark_ok(zone, 0, min_wmark_pages(zone), + ac_classzone_idx(ac), alloc_flags, available)) + return true; + } + + return false; +} +#else +static inline bool +should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags) +{ + return false; +} +#endif + static inline bool should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, @@ -3439,6 +3486,9 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, if (!order) return false; + if (should_compact_lmk_retry(ac, order, alloc_flags)) + return true; + if (compaction_made_progress(compact_result)) (*compaction_retries)++; @@ -3750,7 +3800,8 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order, * their order will become available due to high fragmentation so * always increment the no progress counter for them */ - if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) + if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || + IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) *no_progress_loops = 0; else (*no_progress_loops)++; @@ -3993,7 +4044,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * orientated. */ if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { - ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->high_zoneidx, ac->nodemask); } @@ -4052,7 +4102,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * implementation of the compaction depends on the sufficient amount * of free memory (see __compaction_suitable) */ - if (did_some_progress > 0 && + if ((did_some_progress > 0 || + IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) && should_compact_retry(ac, order, alloc_flags, compact_result, &compact_priority, &compaction_retries)) diff --git a/mm/process_reclaim.c b/mm/process_reclaim.c new file mode 100644 index 0000000000000000000000000000000000000000..92ce0a5c105c669979512599449e6ec27b6bd440 --- /dev/null +++ b/mm/process_reclaim.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define MAX_SWAP_TASKS SWAP_CLUSTER_MAX + +static void swap_fn(struct work_struct *work); +DECLARE_WORK(swap_work, swap_fn); + +/* User knob to enable/disable process reclaim feature */ +static int enable_process_reclaim; +module_param_named(enable_process_reclaim, enable_process_reclaim, int, 0644); + +/* The max number of pages tried to be reclaimed in a single run */ +int per_swap_size = SWAP_CLUSTER_MAX * 32; +module_param_named(per_swap_size, per_swap_size, int, 0644); + +int reclaim_avg_efficiency; +module_param_named(reclaim_avg_efficiency, reclaim_avg_efficiency, int, 0444); + +/* The vmpressure region where process reclaim operates */ +static unsigned long pressure_min = 50; +static unsigned long pressure_max = 90; +module_param_named(pressure_min, pressure_min, ulong, 0644); +module_param_named(pressure_max, pressure_max, ulong, 0644); + +/* + * Scheduling process reclaim workqueue unecessarily + * when the reclaim efficiency is low does not make + * sense. We try to detect a drop in efficiency and + * disable reclaim for a time period. This period and the + * period for which we monitor a drop in efficiency is + * defined by swap_eff_win. swap_opt_eff is the optimal + * efficincy used as theshold for this. + */ +static int swap_eff_win = 2; +module_param_named(swap_eff_win, swap_eff_win, int, 0644); + +static int swap_opt_eff = 50; +module_param_named(swap_opt_eff, swap_opt_eff, int, 0644); + +static atomic_t skip_reclaim = ATOMIC_INIT(0); +/* Not atomic since only a single instance of swap_fn run at a time */ +static int monitor_eff; + +struct selected_task { + struct task_struct *p; + int tasksize; + short oom_score_adj; +}; + +int selected_cmp(const void *a, const void *b) +{ + const struct selected_task *x = a; + const struct selected_task *y = b; + int ret; + + ret = x->tasksize < y->tasksize ? -1 : 1; + + return ret; +} + +static int test_task_flag(struct task_struct *p, int flag) +{ + struct task_struct *t = p; + + rcu_read_lock(); + for_each_thread(p, t) { + task_lock(t); + if (test_tsk_thread_flag(t, flag)) { + task_unlock(t); + rcu_read_unlock(); + return 1; + } + task_unlock(t); + } + rcu_read_unlock(); + + return 0; +} + +static void swap_fn(struct work_struct *work) +{ + struct task_struct *tsk; + struct reclaim_param rp; + + /* Pick the best MAX_SWAP_TASKS tasks in terms of anon size */ + struct selected_task selected[MAX_SWAP_TASKS] = {{0, 0, 0},}; + int si = 0; + int i; + int tasksize; + int total_sz = 0; + short min_score_adj = 360; + int total_scan = 0; + int total_reclaimed = 0; + int nr_to_reclaim; + int efficiency; + + rcu_read_lock(); + for_each_process(tsk) { + struct task_struct *p; + short oom_score_adj; + + if (tsk->flags & PF_KTHREAD) + continue; + + if (test_task_flag(tsk, TIF_MEMDIE)) + continue; + + p = find_lock_task_mm(tsk); + if (!p) + continue; + + oom_score_adj = p->signal->oom_score_adj; + if (oom_score_adj < min_score_adj) { + task_unlock(p); + continue; + } + + tasksize = get_mm_counter(p->mm, MM_ANONPAGES); + task_unlock(p); + + if (tasksize <= 0) + continue; + + if (si == MAX_SWAP_TASKS) { + sort(&selected[0], MAX_SWAP_TASKS, + sizeof(struct selected_task), + &selected_cmp, NULL); + if (tasksize < selected[0].tasksize) + continue; + selected[0].p = p; + selected[0].oom_score_adj = oom_score_adj; + selected[0].tasksize = tasksize; + } else { + selected[si].p = p; + selected[si].oom_score_adj = oom_score_adj; + selected[si].tasksize = tasksize; + si++; + } + } + + for (i = 0; i < si; i++) + total_sz += selected[i].tasksize; + + /* Skip reclaim if total size is too less */ + if (total_sz < SWAP_CLUSTER_MAX) { + rcu_read_unlock(); + return; + } + + for (i = 0; i < si; i++) + get_task_struct(selected[i].p); + + rcu_read_unlock(); + + while (si--) { + nr_to_reclaim = + (selected[si].tasksize * per_swap_size) / total_sz; + /* scan atleast a page */ + if (!nr_to_reclaim) + nr_to_reclaim = 1; + + rp = reclaim_task_anon(selected[si].p, nr_to_reclaim); + + trace_process_reclaim(selected[si].tasksize, + selected[si].oom_score_adj, rp.nr_scanned, + rp.nr_reclaimed, per_swap_size, total_sz, + nr_to_reclaim); + total_scan += rp.nr_scanned; + total_reclaimed += rp.nr_reclaimed; + put_task_struct(selected[si].p); + } + + if (total_scan) { + efficiency = (total_reclaimed * 100) / total_scan; + + if (efficiency < swap_opt_eff) { + if (++monitor_eff == swap_eff_win) { + atomic_set(&skip_reclaim, swap_eff_win); + monitor_eff = 0; + } + } else { + monitor_eff = 0; + } + + reclaim_avg_efficiency = + (efficiency + reclaim_avg_efficiency) / 2; + trace_process_reclaim_eff(efficiency, reclaim_avg_efficiency); + } +} + +static int vmpressure_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + unsigned long pressure = action; + + if (!enable_process_reclaim) + return 0; + + if (!current_is_kswapd()) + return 0; + + if (atomic_dec_if_positive(&skip_reclaim) >= 0) + return 0; + + if ((pressure >= pressure_min) && (pressure < pressure_max)) + if (!work_pending(&swap_work)) + queue_work(system_unbound_wq, &swap_work); + return 0; +} + +static struct notifier_block vmpr_nb = { + .notifier_call = vmpressure_notifier, +}; + +static int __init process_reclaim_init(void) +{ + vmpressure_notifier_register(&vmpr_nb); + return 0; +} + +static void __exit process_reclaim_exit(void) +{ + vmpressure_notifier_unregister(&vmpr_nb); +} + +module_init(process_reclaim_init); +module_exit(process_reclaim_exit); diff --git a/mm/rmap.c b/mm/rmap.c index b874c4761e8422829610d9a1173c56139db5823b..c481b646ed8a274952b4ad1757bcafa78f1b4133 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1133,7 +1133,7 @@ void do_page_add_anon_rmap(struct page *page, } /** - * page_add_new_anon_rmap - add pte mapping to a new anonymous page + * __page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped @@ -1143,12 +1143,11 @@ void do_page_add_anon_rmap(struct page *page, * This means the inc-and-test can be bypassed. * Page does not have to be locked. */ -void page_add_new_anon_rmap(struct page *page, +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { int nr = compound ? hpage_nr_pages(page) : 1; - VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __SetPageSwapBacked(page); if (compound) { VM_BUG_ON_PAGE(!PageTransHuge(page), page); @@ -1591,19 +1590,24 @@ static int page_mapcount_is_zero(struct page *page) * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped * @flags: action and flags + * @vma : target vma for reclaim * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. + * If @vma is not NULL, this function try to remove @page from only @vma + * without peeking all mapped vma for @page. * * If unmap is successful, return true. Otherwise, false. */ -bool try_to_unmap(struct page *page, enum ttu_flags flags) +bool try_to_unmap(struct page *page, enum ttu_flags flags, + struct vm_area_struct *vma) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = page_mapcount_is_zero, .anon_lock = page_lock_anon_vma_read, + .target_vma = vma, }; /* @@ -1647,6 +1651,7 @@ void try_to_munlock(struct page *page) .arg = (void *)TTU_MUNLOCK, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, + .target_vma = NULL, }; @@ -1708,6 +1713,12 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; + if (rwc->target_vma) { + unsigned long address = vma_address(page, rwc->target_vma); + + rwc->rmap_one(page, rwc->target_vma, address, rwc->arg); + } + if (locked) { anon_vma = page_anon_vma(page); /* anon_vma disappear under us? */ @@ -1715,6 +1726,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, } else { anon_vma = rmap_walk_anon_lock(page, rwc); } + if (!anon_vma) return; @@ -1759,6 +1771,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, struct address_space *mapping = page_mapping(page); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; + unsigned long address; /* * The page lock not only makes sure that page->mapping cannot @@ -1775,6 +1788,13 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; if (!locked) i_mmap_lock_read(mapping); + + if (rwc->target_vma) { + address = vma_address(page, rwc->target_vma); + rwc->rmap_one(page, rwc->target_vma, address, rwc->arg); + goto done; + } + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(page, vma); diff --git a/mm/slab_common.c b/mm/slab_common.c index 65212caa1f2a342d1fdefdd8fba9f17078d8b155..91d271b90600c4d560f4498ce49a3542f0643fb2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -546,10 +546,14 @@ static int shutdown_cache(struct kmem_cache *s) list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { +#ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); +#endif list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { #ifdef SLAB_SUPPORTS_SYSFS + sysfs_slab_unlink(s); sysfs_slab_release(s); #else slab_kmem_cache_release(s); diff --git a/mm/slub.c b/mm/slub.c index 9563197c324ddf31232f33c6de465648beeccab5..fbfe6393be1b3c3e62e0abf5da1cca523bd42585 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5672,7 +5672,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work) kset_unregister(s->memcg_kset); #endif kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); out: kobject_put(&s->kobj); } @@ -5757,6 +5756,12 @@ static void sysfs_slab_remove(struct kmem_cache *s) schedule_work(&s->kobj_remove_work); } +void sysfs_slab_unlink(struct kmem_cache *s) +{ + if (slab_state >= FULL) + kobject_del(&s->kobj); +} + void sysfs_slab_release(struct kmem_cache *s) { if (slab_state >= FULL) diff --git a/mm/swap.c b/mm/swap.c index a77d68f2c1b61de1bc8102656e7e482a442963b1..274981cf0652fec43927e9a029010b2ec346efa6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -479,12 +479,12 @@ void add_page_to_unevictable_list(struct page *page) * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */ -void lru_cache_add_active_or_unevictable(struct page *page, - struct vm_area_struct *vma) +void __lru_cache_add_active_or_unevictable(struct page *page, + unsigned long vma_flags) { VM_BUG_ON_PAGE(PageLRU(page), page); - if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { + if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { SetPageActive(page); lru_cache_add(page); return; diff --git a/mm/swap_state.c b/mm/swap_state.c index 2348a7ae18e8a9a9bd20f1c16efd17f0787f7e57..6b2fa4835719795a49475faf497c61739b48266b 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -560,6 +560,10 @@ static unsigned long swapin_nr_pages(unsigned long offset) * the readahead. * * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. + * This is needed to ensure the VMA will not be freed in our back. In the case + * of the speculative page fault handler, this cannot happen, even if we don't + * hold the mmap_sem. Callees are assumed to take care of reading VMA's fields + * using READ_ONCE() to read consistent values. */ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) @@ -652,9 +656,9 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - *start = max3(lpfn, PFN_DOWN(vma->vm_start), + *start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)), PFN_DOWN(faddr & PMD_MASK)); - *end = min3(rpfn, PFN_DOWN(vma->vm_end), + *end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)), PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); } diff --git a/mm/vmpressure.c b/mm/vmpressure.c index afb5beadd2192a5b4f73913e4c8a79eb6a982155..679fe3020b77db8c1d94147138510c8b3e25c22c 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include /* @@ -38,7 +41,7 @@ * TODO: Make the window size depend on machine size, as we do for vmstat * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). */ -static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; +static unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; /* * These thresholds are used when we account memory pressure through @@ -49,6 +52,33 @@ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; static const unsigned int vmpressure_level_med = 60; static const unsigned int vmpressure_level_critical = 95; +static unsigned long vmpressure_scale_max = 100; +module_param_named(vmpressure_scale_max, vmpressure_scale_max, + ulong, 0644); + +/* vmpressure values >= this will be scaled based on allocstalls */ +static unsigned long allocstall_threshold = 70; +module_param_named(allocstall_threshold, allocstall_threshold, + ulong, 0644); + +static struct vmpressure global_vmpressure; +static BLOCKING_NOTIFIER_HEAD(vmpressure_notifier); + +int vmpressure_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmpressure_notifier, nb); +} + +int vmpressure_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmpressure_notifier, nb); +} + +static void vmpressure_notify(unsigned long pressure) +{ + blocking_notifier_call_chain(&vmpressure_notifier, pressure, NULL); +} + /* * When there are too little pages left to scan, vmpressure() may miss the * critical pressure as number of pages will be less than "window size". @@ -75,6 +105,7 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work) return container_of(work, struct vmpressure, work); } +#ifdef CONFIG_MEMCG static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) { struct cgroup_subsys_state *css = vmpressure_to_css(vmpr); @@ -85,6 +116,12 @@ static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) return NULL; return memcg_to_vmpressure(memcg); } +#else +static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) +{ + return NULL; +} +#endif enum vmpressure_levels { VMPRESSURE_LOW = 0, @@ -121,7 +158,7 @@ static enum vmpressure_levels vmpressure_level(unsigned long pressure) return VMPRESSURE_LOW; } -static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, +static unsigned long vmpressure_calc_pressure(unsigned long scanned, unsigned long reclaimed) { unsigned long scale = scanned + reclaimed; @@ -148,7 +185,20 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, scanned, reclaimed); - return vmpressure_level(pressure); + return pressure; +} + +static unsigned long vmpressure_account_stall(unsigned long pressure, + unsigned long stall, unsigned long scanned) +{ + unsigned long scale; + + if (pressure < allocstall_threshold) + return pressure; + + scale = ((vmpressure_scale_max - pressure) * stall) / scanned; + + return pressure + scale; } struct vmpressure_event { @@ -186,6 +236,7 @@ static void vmpressure_work_fn(struct work_struct *work) struct vmpressure *vmpr = work_to_vmpressure(work); unsigned long scanned; unsigned long reclaimed; + unsigned long pressure; enum vmpressure_levels level; bool ancestor = false; bool signalled = false; @@ -210,7 +261,8 @@ static void vmpressure_work_fn(struct work_struct *work) vmpr->tree_reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed); + pressure = vmpressure_calc_pressure(scanned, reclaimed); + level = vmpressure_level(pressure); do { if (vmpressure_event(vmpr, level, ancestor, signalled)) @@ -219,28 +271,8 @@ static void vmpressure_work_fn(struct work_struct *work) } while ((vmpr = vmpressure_parent(vmpr))); } -/** - * vmpressure() - Account memory pressure through scanned/reclaimed ratio - * @gfp: reclaimer's gfp mask - * @memcg: cgroup memory controller handle - * @tree: legacy subtree mode - * @scanned: number of pages scanned - * @reclaimed: number of pages reclaimed - * - * This function should be called from the vmscan reclaim path to account - * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw - * pressure index is then further refined and averaged over time. - * - * If @tree is set, vmpressure is in traditional userspace reporting - * mode: @memcg is considered the pressure root and userspace is - * notified of the entire subtree's reclaim efficiency. - * - * If @tree is not set, reclaim efficiency is recorded for @memcg, and - * only in-kernel users are notified. - * - * This function does not return any value. - */ -void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, +#ifdef CONFIG_MEMCG +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); @@ -281,6 +313,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, schedule_work(&vmpr->work); } else { enum vmpressure_levels level; + unsigned long pressure; /* For now, no users for root-level efficiency */ if (!memcg || memcg == root_mem_cgroup) @@ -296,7 +329,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, vmpr->scanned = vmpr->reclaimed = 0; spin_unlock(&vmpr->sr_lock); - level = vmpressure_calc_level(scanned, reclaimed); + pressure = vmpressure_calc_pressure(scanned, reclaimed); + level = vmpressure_level(pressure); if (level > VMPRESSURE_LOW) { /* @@ -311,6 +345,108 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, } } } +#else +static void vmpressure_memcg(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ +} +#endif + +static void calculate_vmpressure_win(void) +{ + long x; + + x = global_node_page_state(NR_FILE_PAGES) - + global_node_page_state(NR_SHMEM) - + total_swapcache_pages() + + global_zone_page_state(NR_FREE_PAGES); + if (x < 1) + x = 1; + /* + * For low (free + cached), vmpressure window should be + * small, and high for higher values of (free + cached). + * But it should not be linear as well. This ensures + * timely vmpressure notifications when system is under + * memory pressure, and optimal number of events when + * cached is high. The sqaure root function is empirically + * found to serve the purpose. + */ + x = int_sqrt(x); + vmpressure_win = x; +} + +static void vmpressure_global(gfp_t gfp, unsigned long scanned, + unsigned long reclaimed) +{ + struct vmpressure *vmpr = &global_vmpressure; + unsigned long pressure; + unsigned long stall; + + if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) + return; + + if (!scanned) + return; + + spin_lock(&vmpr->sr_lock); + if (!vmpr->scanned) + calculate_vmpressure_win(); + + vmpr->scanned += scanned; + vmpr->reclaimed += reclaimed; + + if (!current_is_kswapd()) + vmpr->stall += scanned; + + stall = vmpr->stall; + scanned = vmpr->scanned; + reclaimed = vmpr->reclaimed; + spin_unlock(&vmpr->sr_lock); + + if (scanned < vmpressure_win) + return; + + spin_lock(&vmpr->sr_lock); + vmpr->scanned = 0; + vmpr->reclaimed = 0; + vmpr->stall = 0; + spin_unlock(&vmpr->sr_lock); + + pressure = vmpressure_calc_pressure(scanned, reclaimed); + pressure = vmpressure_account_stall(pressure, stall, scanned); + vmpressure_notify(pressure); +} + +/** + * vmpressure() - Account memory pressure through scanned/reclaimed ratio + * @gfp: reclaimer's gfp mask + * @memcg: cgroup memory controller handle + * @tree: legacy subtree mode + * @scanned: number of pages scanned + * @reclaimed: number of pages reclaimed + * + * This function should be called from the vmscan reclaim path to account + * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw + * pressure index is then further refined and averaged over time. + * + * If @tree is set, vmpressure is in traditional userspace reporting + * mode: @memcg is considered the pressure root and userspace is + * notified of the entire subtree's reclaim efficiency. + * + * If @tree is not set, reclaim efficiency is recorded for @memcg, and + * only in-kernel users are notified. + * + * This function does not return any value. + */ +void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, + unsigned long scanned, unsigned long reclaimed) +{ + if (!memcg && tree) + vmpressure_global(gfp, scanned, reclaimed); + + if (IS_ENABLED(CONFIG_MEMCG)) + vmpressure_memcg(gfp, memcg, tree, scanned, reclaimed); +} /** * vmpressure_prio() - Account memory pressure through reclaimer priority level @@ -491,3 +627,10 @@ void vmpressure_cleanup(struct vmpressure *vmpr) */ flush_work(&vmpr->work); } + +static int vmpressure_global_init(void) +{ + vmpressure_init(&global_vmpressure); + return 0; +} +late_initcall(vmpressure_global_init); diff --git a/mm/vmscan.c b/mm/vmscan.c index c7c8e56da46a5ddc6a378a17d1d6fcfb5c6742d6..4ce5b4d8778ff6bf66afa4bce9ad5e33c6fcb560 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -116,6 +116,13 @@ struct scan_control { /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; + + /* + * Reclaim pages from a vma. If the page is shared by other tasks + * it is zapped from a vma without reclaim so it ends up remaining + * on memory until last task zap it. + */ + struct vm_area_struct *target_vma; }; #ifdef ARCH_HAS_PREFETCH @@ -213,7 +220,8 @@ unsigned long zone_reclaimable_pages(struct zone *zone) nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); - if (get_nr_swap_pages() > 0) + if (get_nr_swap_pages() > 0 + || IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER)) nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); @@ -982,7 +990,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, struct address_space *mapping; struct page *page; int may_enter_fs; - enum page_references references = PAGEREF_RECLAIM_CLEAN; + enum page_references references = PAGEREF_RECLAIM; bool dirty, writeback; cond_resched(); @@ -994,6 +1002,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, goto keep; VM_BUG_ON_PAGE(PageActive(page), page); + if (pgdat) + VM_BUG_ON_PAGE(page_pgdat(page) != pgdat, page); sc->nr_scanned++; @@ -1082,7 +1092,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 1 above */ if (current_is_kswapd() && PageReclaim(page) && - test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { + (pgdat && + test_bit(PGDAT_WRITEBACK, &pgdat->flags))) { nr_immediate++; goto activate_locked; @@ -1185,7 +1196,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - if (!try_to_unmap(page, flags)) { + if (!try_to_unmap(page, flags, sc->target_vma)) { nr_unmap_fail++; goto activate_locked; } @@ -1204,7 +1215,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (page_is_file_cache(page) && (!current_is_kswapd() || !PageReclaim(page) || - !test_bit(PGDAT_DIRTY, &pgdat->flags))) { + (pgdat && + !test_bit(PGDAT_DIRTY, &pgdat->flags)))) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() @@ -1330,6 +1342,13 @@ static unsigned long shrink_page_list(struct list_head *page_list, (*get_compound_page_dtor(page))(page); } else list_add(&page->lru, &free_pages); + /* + * If pagelist are from multiple nodes, we should decrease + * NR_ISOLATED_ANON + x on freed pages in here. + */ + if (!pgdat) + dec_node_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); continue; activate_locked: @@ -1377,6 +1396,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, .may_unmap = 1, + /* Doesn't allow to write out dirty page */ + .may_writepage = 0, }; unsigned long ret; struct page *page, *next; @@ -1397,6 +1418,40 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, return ret; } +#ifdef CONFIG_PROCESS_RECLAIM +unsigned long reclaim_pages_from_list(struct list_head *page_list, + struct vm_area_struct *vma) +{ + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .priority = DEF_PRIORITY, + .may_writepage = 1, + .may_unmap = 1, + .may_swap = 1, + .target_vma = vma, + }; + + unsigned long nr_reclaimed; + struct page *page; + + list_for_each_entry(page, page_list, lru) + ClearPageActive(page); + + nr_reclaimed = shrink_page_list(page_list, NULL, &sc, + TTU_IGNORE_ACCESS, NULL, true); + + while (!list_empty(page_list)) { + page = lru_to_page(page_list); + list_del(&page->lru); + dec_node_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); + putback_lru_page(page); + } + + return nr_reclaimed; +} +#endif + /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being diff --git a/mm/vmstat.c b/mm/vmstat.c index 3459b767c307cfdc5ab5d355e39ccd03d7dbc717..ababc033127b73f35a4dbcedfa563c11ee29f0a0 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1219,7 +1219,10 @@ const char * const vmstat_text[] = { "swap_ra", "swap_ra_hit", #endif -#endif /* CONFIG_VM_EVENTS_COUNTERS */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + "speculative_pgfault" +#endif +#endif /* CONFIG_VM_EVENT_COUNTERS */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f950b80c0dd1b448ec0cc6cb866e4796157f1238..d8796a7874b664cf41bc659b8988d64bd98f4976 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1179,7 +1179,7 @@ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys) keys->ports.src = fl6->fl6_sport; keys->ports.dst = fl6->fl6_dport; keys->keyid.keyid = fl6->fl6_gre_key; - keys->tags.flow_label = (__force u32)fl6->flowlabel; + keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); keys->basic.ip_proto = fl6->flowi6_proto; return flow_hash_from_keys(keys); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 927a6dcbad9668c6cb8b5afe150e8b4233d6a66b..8f17724a173ccba3b88a3f0e24ac0b1104fb58ed 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1207,9 +1207,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, cpumask_var_t mask; unsigned long index; - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) - return -ENOMEM; - index = get_netdev_queue_index(queue); if (dev->num_tc) { @@ -1219,6 +1216,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, return -EINVAL; } + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5ace48926b196666265a7f95b77779cbdd1ff848..4cfdad08aca04d067a67875f97c13510dc456c0b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1958,6 +1958,10 @@ static int do_setlink(const struct sk_buff *skb, const struct net_device_ops *ops = dev->netdev_ops; int err; + err = validate_linkmsg(dev, tb); + if (err < 0) + return err; + if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) { struct net *net = rtnl_link_get_net(dev_net(dev), tb); if (IS_ERR(net)) { @@ -2296,10 +2300,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - err = validate_linkmsg(dev, tb); - if (err < 0) - goto errout; - err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0); errout: return err; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b73e7c87f926717b00c9609b5ab700044b4c2276..db973bf72b76ec04e52707d2765452a76347b497 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -870,6 +870,10 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) atomic_inc(&(skb_shinfo(skb)->dataref)); skb->cloned = 1; +#ifdef CONFIG_IPV6_NDISC_NODETYPE + C(ndisc_nodetype); +#endif + return n; #undef C } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index ff3b058cf58ca5e5d21a8ea620052750bbe8376e..936dab12f99f2edff3d606a070b5b397e75a4651 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -280,9 +280,7 @@ int dccp_disconnect(struct sock *sk, int flags) dccp_clear_xmit_timers(sk); ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); - ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_rx_ccid = NULL; - dp->dccps_hc_tx_ccid = NULL; __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_write_queue); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index fcc9aa72877d685986b1ec52df13bc5a650fbcad..374d586b4a2c2e0592357c3566448c67278b9a9e 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -79,7 +79,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, if (unlikely(ds->cpu_port_mask & BIT(source_port))) return NULL; - pskb_trim_rcsum(skb, skb->len - 4); + if (pskb_trim_rcsum(skb, skb->len - 4)) + return NULL; skb->dev = ds->ports[source_port].netdev; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index d72874150905b65c4db60ca032124b75159f8fc9..df8fd3ce713d757c9ea8f227176cf9e4269366f7 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -625,6 +625,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = { [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, + [RTA_TABLE] = { .type = NLA_U32 }, }; static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f39955913d3f107515aabbd2a91af9b44b309b41..b557af72cde96c799cac107441785b5a1c42c268 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -725,6 +725,8 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(tmp, &ecn_ca); } else { + if (nla_len(nla) != sizeof(u32)) + return false; val = nla_get_u32(nla); } @@ -1051,6 +1053,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) if (val == TCP_CA_UNSPEC) return -EINVAL; } else { + if (nla_len(nla) != sizeof(u32)) + return -EINVAL; val = nla_get_u32(nla); } if (type == RTAX_ADVMSS && val > 65535 - 40) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1e70ed5244eafd329c0f96d5496a0992478d4b33..d07ba4d5917b4559ea1f97a81095904d759dbe50 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -511,8 +511,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int err; int copied; - WARN_ON_ONCE(sk->sk_family == AF_INET6); - err = -EAGAIN; skb = sock_dequeue_err_skb(sk); if (!skb) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c9b3e6e069aea65a88c20b0702450240f7f83417..cbd9c0d8a7880b1a3596f974cb82c9b3e3ea8f19 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -323,6 +323,7 @@ static const struct rhashtable_params ipmr_rht_params = { static struct mr_table *ipmr_new_table(struct net *net, u32 id) { struct mr_table *mrt; + int err; /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ if (id != RT_TABLE_DEFAULT && id >= 1000000000) @@ -338,7 +339,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) write_pnet(&mrt->net, net); mrt->id = id; - rhltable_init(&mrt->mfc_hash, &ipmr_rht_params); + err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params); + if (err) { + kfree(mrt); + return ERR_PTR(err); + } INIT_LIST_HEAD(&mrt->mfc_cache_list); INIT_LIST_HEAD(&mrt->mfc_unres_queue); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ba5628689c57abba234761a28aaa91f7efd68cf8..5aae038c58b256e38369325952a53302213fff0c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -648,7 +648,7 @@ void tcp_rcv_space_adjust(struct sock *sk) sk->sk_rcvbuf = rcvbuf; /* Make the window clamp follow along. */ - tp->window_clamp = rcvwin; + tp->window_clamp = tcp_win_from_space(rcvbuf); } } tp->rcvq_space.space = copied; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 32015d3cf69dde9c9c4b9bf78304907d2fcf7621..30ae1204c2ebc2e40a1ad0895140a864ce2d11e2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1675,6 +1675,10 @@ int tcp_v4_rcv(struct sk_buff *skb) reqsk_put(req); goto discard_it; } + if (tcp_checksum_complete(skb)) { + reqsk_put(req); + goto csum_error; + } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index e2f3000d60b56fe70831d17c7c17999247513ef3..48a475ab09617fb78d4034cf2f9731d07b453ac9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2800,7 +2800,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", bucket, src, srcp, dest, destp, state, sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), + udp_rqueue_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index d0390d844ac840e4db3da1f9257e14ded78ba333..d9ad986c7b2c9e073616c63d6d5ab376d2b72d5f 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, void *info) { - r->idiag_rqueue = sk_rmem_alloc_get(sk); + r->idiag_rqueue = udp_rqueue_get(sk); r->idiag_wqueue = sk_wmem_alloc_get(sk); } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7985da72250709b387883cf1453df714ccfc6a6e..7bbae058a277170d7e5ee881b90866670feadef4 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2216,17 +2216,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) return addrconf_ifid_ieee1394(eui, dev); case ARPHRD_TUNNEL6: case ARPHRD_IP6GRE: + case ARPHRD_RAWIP: return addrconf_ifid_ip6tnl(eui, dev); - case ARPHRD_RAWIP: { - struct in6_addr lladdr; - - if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) - get_random_bytes(eui, 8); - else - memcpy(eui, lladdr.s6_addr + 8, 8); - - return 0; - } } return -1; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 3c3e73fbc88eb7006748fa4e512219d8ea45cea4..e438c709706a1ff19de7566d6f9df7839b7d6b63 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -1026,8 +1026,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, } EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); -void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, - __u16 srcp, __u16 destp, int bucket) +void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, + __u16 srcp, __u16 destp, int rqueue, int bucket) { const struct in6_addr *dest, *src; __u8 state = sp->sk_state; @@ -1048,7 +1048,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, dest->s6_addr32[2], dest->s6_addr32[3], destp, state, sk_wmem_alloc_get(sp), - sk_rmem_alloc_get(sp), + rqueue, 0, 0L, 0, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), 0, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8763bccad29f86a7ea78eb9d299dc041a5b11324..1084f22ccf39cbc2483295807f87c4a80a9c8a70 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -506,7 +506,8 @@ int ip6_forward(struct sk_buff *skb) send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ - if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { + if (IP6CB(skb)->iif == dst->dev->ifindex && + opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct inet_peer *peer; struct rt6_info *rt; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 565a0388587aa679f818993b61c7638faced1ba7..84ee2eb88121aa3a5add4004aba875adf7f58010 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1693,8 +1693,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < ETH_MIN_MTU) return -EINVAL; } - if (new_mtu > 0xFFF8 - dev->hard_header_len) - return -EINVAL; + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { + if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) + return -EINVAL; + } else { + if (new_mtu > IP_MAX_MTU - dev->hard_header_len) + return -EINVAL; + } dev->mtu = new_mtu; return 0; } @@ -1842,7 +1847,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; dev->min_mtu = ETH_MIN_MTU; - dev->max_mtu = 0xFFF8 - dev->hard_header_len; + dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index e1060f28410deb0df1e277bab90cf115d6679855..8015e74fd7d9d5ae1e99bfef2b54a4e87815e332 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1795,7 +1795,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns ret = 0; if (!ip6mr_new_table(net, v)) ret = -ENOMEM; - raw6_sk(sk)->ip6mr_table = v; + else + raw6_sk(sk)->ip6mr_table = v; rtnl_unlock(); return ret; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index dd28005efb97e645d70d3c2d751aa9c12501456c..d081db125905225c269bf17803bd26d576138a43 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1568,6 +1568,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL; bool ret; + if (netif_is_l3_master(skb->dev)) { + dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); + if (!dev) + return; + } + if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) { ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n", dev->name); diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 6acb2eecd986cbf64deb0fcdb47c3931cf18a4ed..c764c2a77d946c663804f72992da8f034f6b7a10 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6 fields such as the source, destination, flowlabel, hop-limit and the packet mark. +if NF_NAT_IPV6 + +config NFT_CHAIN_NAT_IPV6 + tristate "IPv6 nf_tables nat chain support" + help + This option enables the "nat" chain for IPv6 in nf_tables. This + chain type is used to perform Network Address Translation (NAT) + packet transformations such as the source, destination address and + source and destination ports. + +config NFT_MASQ_IPV6 + tristate "IPv6 masquerade support for nf_tables" + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV6 + help + This is the expression that provides IPv4 masquerading support for + nf_tables. + +config NFT_REDIR_IPV6 + tristate "IPv6 redirect support for nf_tables" + depends on NFT_REDIR + select NF_NAT_REDIRECT + help + This is the expression that provides IPv4 redirect support for + nf_tables. + +endif # NF_NAT_IPV6 + config NFT_REJECT_IPV6 select NF_REJECT_IPV6 default NFT_REJECT @@ -99,39 +127,12 @@ config NF_NAT_IPV6 if NF_NAT_IPV6 -config NFT_CHAIN_NAT_IPV6 - depends on NF_TABLES_IPV6 - tristate "IPv6 nf_tables nat chain support" - help - This option enables the "nat" chain for IPv6 in nf_tables. This - chain type is used to perform Network Address Translation (NAT) - packet transformations such as the source, destination address and - source and destination ports. - config NF_NAT_MASQUERADE_IPV6 tristate "IPv6 masquerade support" help This is the kernel functionality to provide NAT in the masquerade flavour (automatic source address selection) for IPv6. -config NFT_MASQ_IPV6 - tristate "IPv6 masquerade support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_MASQ - select NF_NAT_MASQUERADE_IPV6 - help - This is the expression that provides IPv4 masquerading support for - nf_tables. - -config NFT_REDIR_IPV6 - tristate "IPv6 redirect support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_REDIR - select NF_NAT_REDIRECT - help - This is the expression that provides IPv4 redirect support for - nf_tables. - endif # NF_NAT_IPV6 config IP6_NF_IPTABLES diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e07044a2487a7ec8416bbe78ae2917a73c2f70de..49aca95c8a020a93d73fb863b78252f5bffaa898 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1250,7 +1250,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb, keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys->addrs.v6addrs.src = key_iph->saddr; keys->addrs.v6addrs.dst = key_iph->daddr; - keys->tags.flow_label = ip6_flowinfo(key_iph); + keys->tags.flow_label = ip6_flowlabel(key_iph); keys->basic.ip_proto = key_iph->nexthdr; } @@ -1476,9 +1476,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; - if (rt6->rt6i_flags & RTF_LOCAL) - return; - if (dst_metric_locked(dst, RTAX_MTU)) return; diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 5fe13948491968f2121782a4e4c2d3b93abe6e17..bf4763fd68c22388f59cb68755ec799545e7fb29 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) hdrlen = (osrh->hdrlen + 1) << 3; tot_len = hdrlen + sizeof(*hdr); - err = skb_cow_head(skb, tot_len); + err = skb_cow_head(skb, tot_len + skb->mac_len); if (unlikely(err)) return err; @@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) hdrlen = (osrh->hdrlen + 1) << 3; - err = skb_cow_head(skb, hdrlen); + err = skb_cow_head(skb, hdrlen + skb->mac_len); if (unlikely(err)) return err; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index ad1e7e6ce0093ff590bcb1789068953a3ca20264..5d00a38cd1cbdb7ae12107b0adb11c208096bd63 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1360,7 +1360,7 @@ static void ipip6_tunnel_setup(struct net_device *dev) dev->hard_header_len = LL_MAX_HEADER + t_hlen; dev->mtu = ETH_DATA_LEN - t_hlen; dev->min_mtu = IPV6_MIN_MTU; - dev->max_mtu = 0xFFF8 - t_hlen; + dev->max_mtu = IP6_MAX_MTU - t_hlen; dev->flags = IFF_NOARP; netif_keep_dst(dev); dev->addr_len = 4; @@ -1572,7 +1572,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, if (tb[IFLA_MTU]) { u32 mtu = nla_get_u32(tb[IFLA_MTU]); - if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) + if (mtu >= IPV6_MIN_MTU && + mtu <= IP6_MAX_MTU - dev->hard_header_len) dev->mtu = mtu; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 99f27598d3c83bf99c4d0ba8b92f56ff0ae5222c..00b67c2adfbee5c59502c216b26e6b601c945bb1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1453,6 +1453,10 @@ static int tcp_v6_rcv(struct sk_buff *skb) reqsk_put(req); goto discard_it; } + if (tcp_checksum_complete(skb)) { + reqsk_put(req); + goto csum_error; + } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4ead3066676e62086076faf69123890545d3f691..a07d00b48e4180cd1efb768e8846dea96e92e332 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1524,7 +1524,8 @@ int udp6_seq_show(struct seq_file *seq, void *v) struct inet_sock *inet = inet_sk(v); __u16 srcp = ntohs(inet->inet_sport); __u16 destp = ntohs(inet->inet_dport); - ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); + __ip6_dgram_sock_seq_show(seq, v, srcp, destp, + udp_rqueue_get(v), bucket); } return 0; } diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 01a4ff3df60b6015cfe4e7127cfdd139be03c4a1..9bf9974049185641dbd92839a4bdb6dfe9aceaea 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1672,7 +1672,7 @@ static struct file *kcm_clone(struct socket *osock) __module_get(newsock->ops->owner); newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, - &kcm_proto, true); + &kcm_proto, false); if (!newsk) { sock_release(newsock); return ERR_PTR(-ENOMEM); diff --git a/net/key/af_key.c b/net/key/af_key.c index 2ad693232f748e357a82fb19959cb5917bd93cb7..3b209cbfe1dfee0c3e299989e1dcab5d1d5ea685 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -437,6 +437,24 @@ static int verify_address_len(const void *p) return 0; } +static inline int sadb_key_len(const struct sadb_key *key) +{ + int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8); + + return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes, + sizeof(uint64_t)); +} + +static int verify_key_len(const void *p) +{ + const struct sadb_key *key = p; + + if (sadb_key_len(key) > key->sadb_key_len) + return -EINVAL; + + return 0; +} + static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) { return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + @@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * return -EINVAL; if (ext_hdrs[ext_type-1] != NULL) return -EINVAL; - if (ext_type == SADB_EXT_ADDRESS_SRC || - ext_type == SADB_EXT_ADDRESS_DST || - ext_type == SADB_EXT_ADDRESS_PROXY || - ext_type == SADB_X_EXT_NAT_T_OA) { + switch (ext_type) { + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_X_EXT_NAT_T_OA: if (verify_address_len(p)) return -EINVAL; - } - if (ext_type == SADB_X_EXT_SEC_CTX) { + break; + case SADB_X_EXT_SEC_CTX: if (verify_sec_ctx_len(p)) return -EINVAL; + break; + case SADB_EXT_KEY_AUTH: + case SADB_EXT_KEY_ENCRYPT: + if (verify_key_len(p)) + return -EINVAL; + break; + default: + break; } ext_hdrs[ext_type-1] = (void *) p; } @@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (key != NULL && sa->sadb_sa_auth != SADB_X_AALG_NULL && - ((key->sadb_key_bits+7) / 8 == 0 || - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) + key->sadb_key_bits == 0) return ERR_PTR(-EINVAL); key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; if (key != NULL && sa->sadb_sa_encrypt != SADB_EALG_NULL && - ((key->sadb_key_bits+7) / 8 == 0 || - (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) + key->sadb_key_bits == 0) return ERR_PTR(-EINVAL); x = xfrm_state_alloc(net); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index bef516ec47f94c19f57da37d80c744bb534deeb4..197947a07f83cfe78a9b4095872c53604faf8e7f 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -8,6 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -987,6 +988,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, sta->ampdu_mlme.addba_req_num[tid] = 0; + tid_tx->timeout = + le16_to_cpu(mgmt->u.action.u.addba_resp.timeout); + if (tid_tx->timeout) { mod_timer(&tid_tx->session_timer, TU_TO_EXP_TIME(tid_tx->timeout)); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9115cc52ce8312487c74611a59615075708d6f42..052dbd4fa3664659aac9d28a0da3bf0896760fc3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -35,6 +35,7 @@ #define IEEE80211_AUTH_TIMEOUT (HZ / 5) #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) +#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) @@ -3798,16 +3799,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) tx_flags); if (tx_flags == 0) { - auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; - auth_data->timeout_started = true; - run_again(sdata, auth_data->timeout); + if (auth_data->algorithm == WLAN_AUTH_SAE) + auth_data->timeout = jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; } else { auth_data->timeout = round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); - auth_data->timeout_started = true; - run_again(sdata, auth_data->timeout); } + auth_data->timeout_started = true; + run_again(sdata, auth_data->timeout); + return 0; } @@ -3878,8 +3882,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) ifmgd->status_received = false; if (ifmgd->auth_data && ieee80211_is_auth(fc)) { if (status_acked) { - ifmgd->auth_data->timeout = - jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; + if (ifmgd->auth_data->algorithm == + WLAN_AUTH_SAE) + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SAE; + else + ifmgd->auth_data->timeout = + jiffies + + IEEE80211_AUTH_TIMEOUT_SHORT; run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 73429841f1155aa95ec84bee5b3afbfa893fd0eb..ccb65f18df5d731a803d1ab22d5556307bd5a43e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -4,6 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1138,7 +1139,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, } /* reset session timer */ - if (reset_agg_timer && tid_tx->timeout) + if (reset_agg_timer) tid_tx->last_tx = jiffies; return queued; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 689e9c0570ba7c136c61996477e9c3d448756204..cf30c440f7a7a8ede7d17319bc295ab828347236 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4977,7 +4977,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) struct nft_base_chain *basechain; if (nft_trans_chain_name(trans)) - strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); + swap(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!nft_is_base_chain(trans->ctx.chain)) return; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index bd0975d7dd6fe235418b904fe1b7cddd600b1663..5e0d367a09882a1a78e15d1f1d6f0eaed7306abe 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -875,22 +875,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb, struct nft_object *obj, bool reset) { const struct nft_ct_helper_obj *priv = nft_obj_data(obj); - const struct nf_conntrack_helper *helper = priv->helper4; + const struct nf_conntrack_helper *helper; u16 family; + if (priv->helper4 && priv->helper6) { + family = NFPROTO_INET; + helper = priv->helper4; + } else if (priv->helper6) { + family = NFPROTO_IPV6; + helper = priv->helper6; + } else { + family = NFPROTO_IPV4; + helper = priv->helper4; + } + if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) return -1; if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) return -1; - if (priv->helper4 && priv->helper6) - family = NFPROTO_INET; - else if (priv->helper6) - family = NFPROTO_IPV6; - else - family = NFPROTO_IPV4; - if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) return -1; diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 18cd4a5a7a9825f8175e2ae7afa691d635742f90..91639f559e9b9750eb0b3734c5d3ecaf12c4e786 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -76,6 +76,7 @@ struct idletimer_tg { bool send_nl_msg; bool active; uid_t uid; + bool suspend_time_valid; }; static LIST_HEAD(idletimer_tg_list); @@ -245,8 +246,13 @@ static int idletimer_resume(struct notifier_block *notifier, switch (pm_event) { case PM_SUSPEND_PREPARE: get_monotonic_boottime(&timer->last_suspend_time); + timer->suspend_time_valid = true; break; case PM_POST_SUSPEND: + if (!timer->suspend_time_valid) + break; + timer->suspend_time_valid = false; + spin_lock_bh(×tamp_lock); if (!timer->active) { spin_unlock_bh(×tamp_lock); @@ -281,7 +287,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) { int ret; - info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); + info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL); if (!info->timer) { ret = -ENOMEM; goto out; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 8351faabba62a4d8e1af7b9116e44a3261010cfa..4fe2e34522d6c1512d3235aa7ab8199fc5f8138f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2046,7 +2046,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb, return -EINVAL; *len -= sizeof(vnet_hdr); - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true)) + if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0)) return -EINVAL; return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); @@ -2313,7 +2313,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), - vio_le(), true)) { + vio_le(), true, 0)) { spin_lock(&sk->sk_receive_queue.lock); goto drop_n_account; } @@ -2920,7 +2920,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(offset < 0)) goto out_free; } else if (reserve) { - skb_push(skb, reserve); + skb_reserve(skb, -reserve); } /* Returns -EFAULT on error */ @@ -4293,7 +4293,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; if (po->tp_version >= TPACKET_V3 && req->tp_block_size <= - BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index a66b217ba723e376e5ce0f9c15feddd203356196..9b6cd1ef83c98af03ee24402be7f2cdc996aa311 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -15,6 +15,8 @@ #include #include #include /* For TIOCINQ/OUTQ */ +#include +#include #include @@ -27,6 +29,10 @@ #define QRTR_MIN_EPH_SOCKET 0x4000 #define QRTR_MAX_EPH_SOCKET 0x7fff +/* qrtr socket states */ +#define QRTR_STATE_MULTI -2 +#define QRTR_STATE_INIT -1 + /** * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1 * @version: protocol version @@ -93,6 +99,8 @@ struct qrtr_sock { struct sock sk; struct sockaddr_qrtr us; struct sockaddr_qrtr peer; + + int state; }; static inline struct qrtr_sock *qrtr_sk(struct sock *sk) @@ -108,7 +116,7 @@ static RADIX_TREE(qrtr_nodes, GFP_KERNEL); /* broadcast list */ static LIST_HEAD(qrtr_all_epts); /* lock for qrtr_nodes, qrtr_all_epts and node reference */ -static DEFINE_MUTEX(qrtr_node_lock); +static DECLARE_RWSEM(qrtr_node_lock); /* local port allocation management */ static DEFINE_IDR(qrtr_ports); @@ -120,6 +128,9 @@ static DEFINE_MUTEX(qrtr_port_lock); * @ep: endpoint * @ref: reference count for node * @nid: node id + * @qrtr_tx_flow: remote port tx flow control list + * @resume_tx: wait until remote port acks control flag + * @qrtr_tx_lock: lock for qrtr_tx_flow * @rx_queue: receive queue * @work: scheduled work struct for recv work * @item: list item for broadcast list @@ -130,11 +141,22 @@ struct qrtr_node { struct kref ref; unsigned int nid; + struct radix_tree_root qrtr_tx_flow; + struct wait_queue_head resume_tx; + struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */ + struct sk_buff_head rx_queue; struct work_struct work; struct list_head item; }; +struct qrtr_tx_flow { + atomic_t pending; +}; + +#define QRTR_TX_FLOW_HIGH 10 +#define QRTR_TX_FLOW_LOW 5 + static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to); @@ -142,6 +164,32 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to); +static bool refcount_dec_and_rwsem_lock(refcount_t *r, + struct rw_semaphore *sem) +{ + if (refcount_dec_not_one(r)) + return false; + + down_write(sem); + if (!refcount_dec_and_test(r)) { + up_write(sem); + return false; + } + + return true; +} + +static inline int kref_put_rwsem_lock(struct kref *kref, + void (*release)(struct kref *kref), + struct rw_semaphore *sem) +{ + if (refcount_dec_and_rwsem_lock(&kref->refcount, sem)) { + release(kref); + return 1; + } + return 0; +} + /* Release node resources and free the node. * * Do not call directly, use qrtr_node_release. To be used with @@ -149,9 +197,9 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, */ static void __qrtr_node_release(struct kref *kref) { - struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); struct radix_tree_iter iter; - void **slot; + struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); + void __rcu **slot; if (node->nid != QRTR_EP_NID_AUTO) { radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { @@ -161,8 +209,15 @@ static void __qrtr_node_release(struct kref *kref) } list_del(&node->item); - mutex_unlock(&qrtr_node_lock); + up_write(&qrtr_node_lock); + + /* Free tx flow counters */ + radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { + radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); + kfree(*slot); + } + flush_work(&node->work); skb_queue_purge(&node->rx_queue); kfree(node); } @@ -180,7 +235,96 @@ static void qrtr_node_release(struct qrtr_node *node) { if (!node) return; - kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock); + kref_put_rwsem_lock(&node->ref, __qrtr_node_release, &qrtr_node_lock); +} + +/** + * qrtr_tx_resume() - reset flow control counter + * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on + * @skb: skb for resume tx control packet + */ +static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb) +{ + struct qrtr_ctrl_pkt *pkt; + struct qrtr_tx_flow *flow; + unsigned long key; + int dest_node; + int dest_port; + + pkt = (struct qrtr_ctrl_pkt *)skb->data; + if (le32_to_cpu(pkt->cmd) != QRTR_TYPE_RESUME_TX) + return; + + dest_node = le32_to_cpu(pkt->client.node); + dest_port = le32_to_cpu(pkt->client.port); + key = (u64)dest_node << 32 | dest_port; + + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (flow) + atomic_set(&flow->pending, 0); + + wake_up_interruptible_all(&node->resume_tx); +} + +/** + * qrtr_tx_wait() - flow control for outgoing packets + * @node: qrtr_node that the packet is to be send to + * @dest_node: node id of the destination + * @dest_port: port number of the destination + * @type: type of message + * + * The flow control scheme is based around the low and high "watermarks". When + * the low watermark is passed the confirm_rx flag is set on the outgoing + * message, which will trigger the remote to send a control message of the type + * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit + * further transmision should be paused. + * + * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure + */ +static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port, + int type) +{ + struct qrtr_tx_flow *flow; + unsigned long key = (u64)dest_node << 32 | dest_port; + int confirm_rx = 0; + int ret; + + /* Never set confirm_rx on non-data packets */ + if (type != QRTR_TYPE_DATA) + return 0; + + mutex_lock(&node->qrtr_tx_lock); + flow = radix_tree_lookup(&node->qrtr_tx_flow, key); + if (!flow) { + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) + return 1; + else + radix_tree_insert(&node->qrtr_tx_flow, key, flow); + } + mutex_unlock(&node->qrtr_tx_lock); + + for (;;) { + ret = wait_event_interruptible(node->resume_tx, + atomic_read(&flow->pending) < + QRTR_TX_FLOW_HIGH || !node->ep); + if (ret) + return ret; + + if (!node->ep) + return -EPIPE; + + mutex_lock(&node->qrtr_tx_lock); + if (atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH) { + atomic_inc(&flow->pending); + confirm_rx = atomic_read(&flow->pending) == QRTR_TX_FLOW_LOW; + mutex_unlock(&node->qrtr_tx_lock); + break; + } + mutex_unlock(&node->qrtr_tx_lock); + } + + return confirm_rx; } /* Pass an outgoing packet socket buffer to the endpoint driver. */ @@ -189,19 +333,31 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, struct sockaddr_qrtr *to) { struct qrtr_hdr_v1 *hdr; + int confirm_rx; size_t len = skb->len; int rc = -ENODEV; + confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type); + if (confirm_rx < 0) { + kfree_skb(skb); + return confirm_rx; + } + hdr = skb_push(skb, sizeof(*hdr)); hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); hdr->type = cpu_to_le32(type); hdr->src_node_id = cpu_to_le32(from->sq_node); hdr->src_port_id = cpu_to_le32(from->sq_port); - hdr->dst_node_id = cpu_to_le32(to->sq_node); - hdr->dst_port_id = cpu_to_le32(to->sq_port); + if (to->sq_port == QRTR_PORT_CTRL) { + hdr->dst_node_id = cpu_to_le32(node->nid); + hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST); + } else { + hdr->dst_node_id = cpu_to_le32(to->sq_node); + hdr->dst_port_id = cpu_to_le32(to->sq_port); + } hdr->size = cpu_to_le32(len); - hdr->confirm_rx = 0; + hdr->confirm_rx = !!confirm_rx; skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); @@ -223,10 +379,10 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid) { struct qrtr_node *node; - mutex_lock(&qrtr_node_lock); + down_read(&qrtr_node_lock); node = radix_tree_lookup(&qrtr_nodes, nid); node = qrtr_node_acquire(node); - mutex_unlock(&qrtr_node_lock); + up_read(&qrtr_node_lock); return node; } @@ -241,13 +397,13 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid) if (nid == QRTR_EP_NID_AUTO) return; - mutex_lock(&qrtr_node_lock); + down_write(&qrtr_node_lock); if (!radix_tree_lookup(&qrtr_nodes, nid)) radix_tree_insert(&qrtr_nodes, nid, node); if (node->nid == QRTR_EP_NID_AUTO) node->nid = nid; - mutex_unlock(&qrtr_node_lock); + up_write(&qrtr_node_lock); } /** @@ -321,7 +477,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) if (len != ALIGN(size, 4) + hdrlen) goto err; - if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA) + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && + cb->type != QRTR_TYPE_RESUME_TX) goto err; skb_put_data(skb, data + hdrlen, size); @@ -372,47 +529,28 @@ static void qrtr_port_put(struct qrtr_sock *ipc); static void qrtr_node_rx_work(struct work_struct *work) { struct qrtr_node *node = container_of(work, struct qrtr_node, work); - struct qrtr_ctrl_pkt *pkt; - struct sockaddr_qrtr dst; - struct sockaddr_qrtr src; struct sk_buff *skb; while ((skb = skb_dequeue(&node->rx_queue)) != NULL) { struct qrtr_sock *ipc; struct qrtr_cb *cb; - int confirm; cb = (struct qrtr_cb *)skb->cb; - src.sq_node = cb->src_node; - src.sq_port = cb->src_port; - dst.sq_node = cb->dst_node; - dst.sq_port = cb->dst_port; - confirm = !!cb->confirm_rx; - qrtr_node_assign(node, cb->src_node); - ipc = qrtr_port_lookup(cb->dst_port); - if (!ipc) { - kfree_skb(skb); + if (cb->type == QRTR_TYPE_RESUME_TX) { + qrtr_tx_resume(node, skb); + consume_skb(skb); } else { - if (sock_queue_rcv_skb(&ipc->sk, skb)) + ipc = qrtr_port_lookup(cb->dst_port); + if (!ipc) { kfree_skb(skb); + } else { + if (sock_queue_rcv_skb(&ipc->sk, skb)) + kfree_skb(skb); - qrtr_port_put(ipc); - } - - if (confirm) { - skb = qrtr_alloc_ctrl_packet(&pkt); - if (!skb) - break; - - pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX); - pkt->client.node = cpu_to_le32(dst.sq_node); - pkt->client.port = cpu_to_le32(dst.sq_port); - - if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, - &dst, &src)) - break; + qrtr_port_put(ipc); + } } } } @@ -443,11 +581,15 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) node->nid = QRTR_EP_NID_AUTO; node->ep = ep; + mutex_init(&node->qrtr_tx_lock); + INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL); + init_waitqueue_head(&node->resume_tx); + qrtr_node_assign(node, nid); - mutex_lock(&qrtr_node_lock); + down_write(&qrtr_node_lock); list_add(&node->item, &qrtr_all_epts); - mutex_unlock(&qrtr_node_lock); + up_write(&qrtr_node_lock); ep->node = node; return 0; @@ -477,6 +619,9 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep) qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst); } + /* Wake up any transmitters waiting for resume-tx from the node */ + wake_up_interruptible_all(&node->resume_tx); + qrtr_node_release(node); ep->node = NULL; } @@ -508,29 +653,59 @@ static void qrtr_port_put(struct qrtr_sock *ipc) sock_put(&ipc->sk); } -/* Remove port assignment. */ -static void qrtr_port_remove(struct qrtr_sock *ipc) +static void qrtr_send_del_client(struct qrtr_sock *ipc) { struct qrtr_ctrl_pkt *pkt; - struct sk_buff *skb; - int port = ipc->us.sq_port; struct sockaddr_qrtr to; + struct qrtr_node *node; + struct sk_buff *skbn; + struct sk_buff *skb; + int type = QRTR_TYPE_DEL_CLIENT; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return; to.sq_family = AF_QIPCRTR; to.sq_node = QRTR_NODE_BCAST; to.sq_port = QRTR_PORT_CTRL; - skb = qrtr_alloc_ctrl_packet(&pkt); - if (skb) { - pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); - pkt->client.node = cpu_to_le32(ipc->us.sq_node); - pkt->client.port = cpu_to_le32(ipc->us.sq_port); + pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); + pkt->client.node = cpu_to_le32(ipc->us.sq_node); + pkt->client.port = cpu_to_le32(ipc->us.sq_port); + + skb_set_owner_w(skb, &ipc->sk); + + if (ipc->state == QRTR_STATE_MULTI) { + qrtr_bcast_enqueue(NULL, skb, type, &ipc->us, &to); + return; + } - skb_set_owner_w(skb, &ipc->sk); - qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us, - &to); + if (ipc->state > QRTR_STATE_INIT) { + node = qrtr_node_lookup(ipc->state); + if (!node) + goto exit; + + skbn = skb_clone(skb, GFP_KERNEL); + if (!skbn) { + qrtr_node_release(node); + goto exit; + } + + skb_set_owner_w(skbn, &ipc->sk); + qrtr_node_enqueue(node, skbn, type, &ipc->us, &to); + qrtr_node_release(node); } +exit: + qrtr_local_enqueue(NULL, skb, type, &ipc->us, &to); +} +/* Remove port assignment. */ +static void qrtr_port_remove(struct qrtr_sock *ipc) +{ + int port = ipc->us.sq_port; + + qrtr_send_del_client(ipc); if (port == QRTR_PORT_CTRL) port = 0; @@ -715,7 +890,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, { struct sk_buff *skbn; - mutex_lock(&qrtr_node_lock); + down_read(&qrtr_node_lock); list_for_each_entry(node, &qrtr_all_epts, item) { if (node->nid == QRTR_EP_NID_AUTO) continue; @@ -723,14 +898,10 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, if (!skbn) break; skb_set_owner_w(skbn, skb->sk); - to->sq_node = cpu_to_le32(node->nid); - to->sq_port = QRTR_NODE_BCAST; qrtr_node_enqueue(node, skbn, type, from, to); } - mutex_unlock(&qrtr_node_lock); + up_read(&qrtr_node_lock); - to->sq_node = QRTR_NODE_BCAST; - to->sq_port = QRTR_PORT_CTRL; qrtr_local_enqueue(node, skb, type, from, to); return 0; @@ -796,6 +967,11 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) release_sock(sk); return -ECONNRESET; } + + if (ipc->state > QRTR_STATE_INIT && ipc->state != node->nid) + ipc->state = QRTR_STATE_MULTI; + else if (ipc->state == QRTR_STATE_INIT) + ipc->state = node->nid; } plen = (len + 3) & ~3; @@ -835,6 +1011,36 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) return rc; } +static int qrtr_resume_tx(struct qrtr_cb *cb) +{ + struct sockaddr_qrtr remote = { AF_QIPCRTR, + cb->src_node, cb->src_port }; + struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port }; + struct qrtr_ctrl_pkt *pkt; + struct qrtr_node *node; + struct sk_buff *skb; + int ret; + + node = qrtr_node_lookup(remote.sq_node); + if (!node) + return -EINVAL; + + skb = qrtr_alloc_ctrl_packet(&pkt); + if (!skb) + return -ENOMEM; + + pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX); + pkt->client.node = cpu_to_le32(cb->dst_node); + pkt->client.port = cpu_to_le32(cb->dst_port); + + ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, + &local, &remote); + + qrtr_node_release(node); + + return ret; +} + static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { @@ -857,6 +1063,7 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, release_sock(sk); return rc; } + cb = (struct qrtr_cb *)skb->cb; copied = skb->len; if (copied > size) { @@ -870,7 +1077,6 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, rc = copied; if (addr) { - cb = (struct qrtr_cb *)skb->cb; addr->sq_family = AF_QIPCRTR; addr->sq_node = cb->src_node; addr->sq_port = cb->src_port; @@ -878,6 +1084,9 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg, } out: + if (cb->confirm_rx) + qrtr_resume_tx(cb); + skb_free_datagram(sk, skb); release_sock(sk); @@ -1084,6 +1293,7 @@ static int qrtr_create(struct net *net, struct socket *sock, ipc->us.sq_family = AF_QIPCRTR; ipc->us.sq_node = qrtr_local_nid; ipc->us.sq_port = 0; + ipc->state = QRTR_STATE_INIT; return 0; } diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 80fb6f63e768d3461c47533615c875526bb8bab9..6e721c449c4bf373b28e89d25027ce50b1902319 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -546,7 +546,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, ic->i_send_cq, ic->i_recv_cq); - return ret; + goto out; sends_out: vfree(ic->i_sends); @@ -571,6 +571,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_send_cq = NULL; rds_ibdev_out: rds_ib_remove_conn(rds_ibdev, conn); +out: rds_ib_dev_put(rds_ibdev); return ret; diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 7c1cb08874d5152c39ac2e077d67e439c97df17f..2a32f60652d82042a637d270efcbc197691cb549 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -302,7 +302,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, memset(&cp, 0, sizeof(cp)); cp.local = rx->local; cp.key = key; - cp.security_level = 0; + cp.security_level = rx->min_sec_level; cp.exclusive = false; cp.service_id = srx->srx_service; call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index 38b99db30e541e789b838e10600944f11b4124e0..2af42c7d5b82a55f106232d8f04896f256574c7a 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c @@ -133,22 +133,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) } } - /* we want to receive ICMP errors */ - opt = 1; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; - } + switch (local->srx.transport.family) { + case AF_INET: + /* we want to receive ICMP errors */ + opt = 1; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } - /* we want to set the don't fragment bit */ - opt = IP_PMTUDISC_DO; - ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, - (char *) &opt, sizeof(opt)); - if (ret < 0) { - _debug("setsockopt failed"); - goto error; + /* we want to set the don't fragment bit */ + opt = IP_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + break; + + case AF_INET6: + /* we want to receive ICMP errors */ + opt = 1; + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + + /* we want to set the don't fragment bit */ + opt = IPV6_PMTUDISC_DO; + ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, + (char *) &opt, sizeof(opt)); + if (ret < 0) { + _debug("setsockopt failed"); + goto error; + } + break; + + default: + BUG(); } /* set the socket up */ diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index b5f80e675783b3cb3fcd02a67dd04e199b39ee85..f3ed63aa41110f2a029829a16c5da2aeccc106cc 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a, int bind) kfree(d->tcfd_defdata); } -static int alloc_defdata(struct tcf_defact *d, char *defdata) +static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata) { d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL); if (unlikely(!d->tcfd_defdata)) return -ENOMEM; - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); return 0; } -static void reset_policy(struct tcf_defact *d, char *defdata, +static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata, struct tc_defact *p) { spin_lock_bh(&d->tcf_lock); d->tcf_action = p->action; memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); - strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); + nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); spin_unlock_bh(&d->tcf_lock); } @@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, struct tcf_defact *d; bool exists = false; int ret = 0, err; - char *defdata; if (nla == NULL) return -EINVAL; @@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, return -EINVAL; } - defdata = nla_data(tb[TCA_DEF_DATA]); - if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_simp_ops, bind, false); @@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, return ret; d = to_defact(*a); - ret = alloc_defdata(d, defdata); + ret = alloc_defdata(d, tb[TCA_DEF_DATA]); if (ret < 0) { tcf_idr_release(*a, bind); return ret; @@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla, if (!ovr) return -EEXIST; - reset_policy(d, defdata, parm); + reset_policy(d, tb[TCA_DEF_DATA], parm); } if (ret == ACT_P_CREATED) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 59949d61f20da1031b8b47712f817c991c6bed60..6e749497009e83d07470c0549390002f5be19290 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, return 0; if (!flags) { - tcf_idr_release(*a, bind); + if (exists) + tcf_idr_release(*a, bind); return -EINVAL; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 7a838d1c1c0059bddb3da24a59ad73689fec1877..1879665e5a2bcd567d0a3926d329d7ccf8836f8f 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1007,7 +1007,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, return 0; errout_idr: - if (fnew->handle) + if (!fold) idr_remove_ext(&head->handle_idr, fnew->handle); errout: tcf_exts_destroy(&fnew->exts); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 7ef77fd7b52a1e8932efb534d9e4782d9f371ec8..e0c2a4e2303937eff13aaf57d3676da870a231b1 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -637,7 +637,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans) trans->state != SCTP_PF) timeout += trans->hbinterval; - return timeout; + return max_t(unsigned long, timeout, HZ / 5); } /* Reset transport variables to their initial values */ diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index f9c289e05707bcd8f29e6d46237b899c98f40b4b..654a8123840639c48b356abe8e958dbe6c549ddb 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1264,8 +1264,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, smc = smc_sk(sk); lock_sock(sk); - if (sk->sk_state != SMC_ACTIVE) + if (sk->sk_state != SMC_ACTIVE) { + release_sock(sk); goto out; + } + release_sock(sk); if (smc->use_fallback) rc = kernel_sendpage(smc->clcsock, page, offset, size, flags); @@ -1273,7 +1276,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page, rc = sock_no_sendpage(sock, page, offset, size, flags); out: - release_sock(sk); return rc; } diff --git a/net/socket.c b/net/socket.c index 690d308de7c9611695406b46e01dc9bd342cad0a..84faf10210e8d43dbb5f8aa64d117ae4c6f8cfd3 100644 --- a/net/socket.c +++ b/net/socket.c @@ -550,7 +550,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) if (!err && (iattr->ia_valid & ATTR_UID)) { struct socket *sock = SOCKET_I(d_inode(dentry)); - sock->sk->sk_uid = iattr->ia_uid; + if (sock->sk) + sock->sk->sk_uid = iattr->ia_uid; + else + err = -ENOENT; } return err; @@ -600,12 +603,16 @@ EXPORT_SYMBOL(sock_alloc); * an inode not a file. */ -void sock_release(struct socket *sock) +static void __sock_release(struct socket *sock, struct inode *inode) { if (sock->ops) { struct module *owner = sock->ops->owner; + if (inode) + inode_lock(inode); sock->ops->release(sock); + if (inode) + inode_unlock(inode); sock->ops = NULL; module_put(owner); } @@ -620,6 +627,11 @@ void sock_release(struct socket *sock) } sock->file = NULL; } + +void sock_release(struct socket *sock) +{ + __sock_release(sock, NULL); +} EXPORT_SYMBOL(sock_release); void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) @@ -1134,7 +1146,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma) static int sock_close(struct inode *inode, struct file *filp) { - sock_release(SOCKET_I(inode)); + __sock_release(SOCKET_I(inode), inode); return 0; } diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 491ae9fc561f3e66370d220c51530b6d9b6d1f2d..991d5a96f35b2c2ebe48a7cc045e16af7925cff2 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -229,7 +229,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, */ *ppages = alloc_page(GFP_ATOMIC); if (!*ppages) - return -EAGAIN; + return -ENOBUFS; } seg->mr_page = *ppages; seg->mr_offset = (char *)page_base; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 0fcfb3916dcf2f830f9f8add1bbda33968ca3a19..254ddc2c39146135e0206d44c928a249349e1296 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -768,7 +768,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg, ret = tipc_bearer_get_name(net, bearer_name, bearer_id); if (ret || !mon) - return -EINVAL; + return 0; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_MON_GET); diff --git a/net/tipc/node.c b/net/tipc/node.c index f6c5743c170e74cbd28c3bbab6c0c428e4e63f47..42e9bdcc4bb6eee93b73b64d789d82dce71a09a7 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1831,6 +1831,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info) int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; struct tipc_nl_msg msg; char *name; int err; @@ -1838,9 +1839,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) msg.portid = info->snd_portid; msg.seq = info->snd_seq; - if (!info->attrs[TIPC_NLA_LINK_NAME]) + if (!info->attrs[TIPC_NLA_LINK]) return -EINVAL; - name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy, info->extack); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_LINK_NAME]); msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg.skb) @@ -2113,8 +2124,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) struct net *net = sock_net(skb->sk); u32 prev_bearer = cb->args[0]; struct tipc_nl_msg msg; + int bearer_id; int err; - int i; if (prev_bearer == MAX_BEARERS) return 0; @@ -2124,16 +2135,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb) msg.seq = cb->nlh->nlmsg_seq; rtnl_lock(); - for (i = prev_bearer; i < MAX_BEARERS; i++) { - prev_bearer = i; - err = __tipc_nl_add_monitor(net, &msg, prev_bearer); + for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { + err = __tipc_nl_add_monitor(net, &msg, bearer_id); if (err) - goto out; + break; } - -out: rtnl_unlock(); - cb->args[0] = prev_bearer; + cb->args[0] = bearer_id; return skb->len; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 83f886d7c1f8b8b67b3d7097822875b609e8a42b..3c86614462f6d31c3c54caa7809fc82f92834e5f 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -211,18 +211,12 @@ static void tls_free_both_sg(struct sock *sk) } static int tls_do_encryption(struct tls_context *tls_ctx, - struct tls_sw_context *ctx, size_t data_len, - gfp_t flags) + struct tls_sw_context *ctx, + struct aead_request *aead_req, + size_t data_len) { - unsigned int req_size = sizeof(struct aead_request) + - crypto_aead_reqsize(ctx->aead_send); - struct aead_request *aead_req; int rc; - aead_req = kzalloc(req_size, flags); - if (!aead_req) - return -ENOMEM; - ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size; @@ -235,7 +229,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx, ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size; ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size; - kfree(aead_req); return rc; } @@ -244,8 +237,14 @@ static int tls_push_record(struct sock *sk, int flags, { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); + struct aead_request *req; int rc; + req = kzalloc(sizeof(struct aead_request) + + crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation); + if (!req) + return -ENOMEM; + sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); @@ -261,15 +260,14 @@ static int tls_push_record(struct sock *sk, int flags, tls_ctx->pending_open_record_frags = 0; set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); - rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, - sk->sk_allocation); + rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size); if (rc < 0) { /* If we are called from write_space and * we fail, we need to set this SOCK_NOSPACE * to trigger another write_space in the future. */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - return rc; + goto out_req; } free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, @@ -284,6 +282,8 @@ static int tls_push_record(struct sock *sk, int flags, tls_err_abort(sk); tls_advance_record_sn(sk, tls_ctx); +out_req: + kfree(req); return rc; } diff --git a/net/wireless/ap.c b/net/wireless/ap.c index 63682176c96cb969137b5b6e857b8bbe361f3fea..882eb864cb443c6f0006a8a3c91fae2d58ae931c 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c @@ -26,8 +26,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, return -ENOENT; err = rdev_stop_ap(rdev, dev); + wdev->beacon_interval = 0; if (!err) { - wdev->beacon_interval = 0; memset(&wdev->chandef, 0, sizeof(wdev->chandef)); wdev->ssid_len = 0; rdev_set_qos_map(rdev, dev, NULL); diff --git a/net/wireless/chan.c b/net/wireless/chan.c index fad1b5baf8ff0e672419fd0d058ce0c7c8eb88b1..ef5016bf9549c0bad15e1f82cd3bffcbb0316006 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -581,6 +581,10 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy, { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; + bool dfs_offload; + + dfs_offload = wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_DFS_OFFLOAD); start_freq = cfg80211_get_start_freq(center_freq, bandwidth); end_freq = cfg80211_get_end_freq(center_freq, bandwidth); @@ -598,8 +602,9 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy, if (c->flags & IEEE80211_CHAN_DISABLED) return false; - if ((c->flags & IEEE80211_CHAN_RADAR) && - (c->dfs_state != NL80211_DFS_AVAILABLE)) + if ((c->flags & IEEE80211_CHAN_RADAR) && + (c->dfs_state != NL80211_DFS_AVAILABLE) && + !(c->dfs_state == NL80211_DFS_USABLE && dfs_offload)) return false; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 45cbade9ad68cfdd4ecc198b0abced521d8ed974..3ff638ce14e9f9cf1def9971b20735e1b0d94b21 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1008,6 +1008,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); list_del_rcu(&wdev->list); + synchronize_rcu(); rdev->devlist_generation++; switch (wdev->iftype) { diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index e7c64a8dce54a173f4c688656968c8c8bdd71b11..e33da93eb31a9fa8a01a9904ac04289b3a9ae3fd 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -872,7 +872,7 @@ void cfg80211_cac_event(struct net_device *netdev, trace_cfg80211_cac_event(netdev, event); - if (WARN_ON(!wdev->cac_started)) + if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED)) return; if (WARN_ON(!wdev->chandef.chan)) @@ -888,14 +888,17 @@ void cfg80211_cac_event(struct net_device *netdev, sizeof(struct cfg80211_chan_def)); queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk); cfg80211_sched_dfs_chan_update(rdev); - break; + /* fall through */ case NL80211_RADAR_CAC_ABORTED: + wdev->cac_started = false; + break; + case NL80211_RADAR_CAC_STARTED: + wdev->cac_started = true; break; default: WARN_ON(1); return; } - wdev->cac_started = false; nl80211_radar_notify(rdev, chandef, event, netdev, gfp); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7ae58969a6511dd21903d83653b5fead1391d6d6..30683891305f30a540f5863618a961ef5950b4f2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -421,6 +421,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 }, [NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN }, [NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG }, + [NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -3863,9 +3864,10 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev, return false; return true; case NL80211_CMD_CONNECT: - /* SAE not supported yet */ - if (auth_type == NL80211_AUTHTYPE_SAE) + if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) && + auth_type == NL80211_AUTHTYPE_SAE) return false; + /* FILS with SK PFS or PK not supported yet */ if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS || auth_type == NL80211_AUTHTYPE_FILS_PK) @@ -7795,6 +7797,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, intbss->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; + if (!nl80211_put_signal(msg, intbss->pub.chains, + intbss->pub.chain_signal, + NL80211_BSS_CHAIN_SIGNAL)) + goto nla_put_failure; + switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) @@ -9101,6 +9108,13 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) { + if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) { + return -EINVAL; + } + connect.flags |= CONNECT_REQ_EXTERNAL_AUTH_SUPPORT; + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, @@ -12409,6 +12423,41 @@ static int nl80211_del_pmk(struct sk_buff *skb, struct genl_info *info) return ret; } +static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct cfg80211_external_auth_params params; + + if (!rdev->ops->external_auth) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_SSID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_BSSID]) + return -EINVAL; + + if (!info->attrs[NL80211_ATTR_STATUS_CODE]) + return -EINVAL; + + memset(¶ms, 0, sizeof(params)); + + params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); + if (params.ssid.ssid_len == 0 || + params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; + memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]), + params.ssid.ssid_len); + + memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]), + ETH_ALEN); + + params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]); + + return rdev_external_auth(rdev, dev, ¶ms); +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -13298,6 +13347,14 @@ static const struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_EXTERNAL_AUTH, + .doit = nl80211_external_auth, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_NEED_RTNL, + }, }; @@ -15292,6 +15349,47 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev) nlmsg_free(msg); } +int cfg80211_external_auth_request(struct net_device *dev, + struct cfg80211_external_auth_params *params, + gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + struct sk_buff *msg; + void *hdr; + + if (!wdev->conn_owner_nlportid) + return -EINVAL; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EXTERNAL_AUTH); + if (!hdr) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || + nla_put_u32(msg, NL80211_ATTR_AKM_SUITES, params->key_mgmt_suite) || + nla_put_u32(msg, NL80211_ATTR_EXTERNAL_AUTH_ACTION, + params->action) || + nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, params->bssid) || + nla_put(msg, NL80211_ATTR_SSID, params->ssid.ssid_len, + params->ssid.ssid)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, + wdev->conn_owner_nlportid); + return 0; + + nla_put_failure: + nlmsg_free(msg); + return -ENOBUFS; +} +EXPORT_SYMBOL(cfg80211_external_auth_request); + /* initialisation/exit functions */ int __init nl80211_init(void) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 0c06240d25afc9da33ddd23c09c719cd2c06ebce..84f23ae015fc57edd0c9459aeade85e99ebfffed 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1190,4 +1190,19 @@ static inline int rdev_del_pmk(struct cfg80211_registered_device *rdev, trace_rdev_return_int(&rdev->wiphy, ret); return ret; } + +static inline int +rdev_external_auth(struct cfg80211_registered_device *rdev, + struct net_device *dev, + struct cfg80211_external_auth_params *params) +{ + int ret = -EOPNOTSUPP; + + trace_rdev_external_auth(&rdev->wiphy, dev, params); + if (rdev->ops->external_auth) + ret = rdev->ops->external_auth(&rdev->wiphy, dev, params); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 6dfb274c27b1142021c6aabf834616911a16c581..bbd39109aad61e648e4f39693afcbdaeaa652da9 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -419,6 +419,11 @@ static bool is_user_regdom_saved(void) return true; } +static bool is_cfg80211_regdom_intersected(void) +{ + return is_intersected_alpha2(get_cfg80211_regdom()->alpha2); +} + static const struct ieee80211_regdomain * reg_copy_regd(const struct ieee80211_regdomain *src_regd) { @@ -1875,9 +1880,14 @@ __reg_process_hint_user(struct regulatory_request *user_request) */ if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE || lr->initiator == NL80211_REGDOM_SET_BY_DRIVER || - lr->initiator == NL80211_REGDOM_SET_BY_USER) && - regdom_changes(lr->alpha2)) - return REG_REQ_IGNORE; + lr->initiator == NL80211_REGDOM_SET_BY_USER)) { + if (lr->intersect) { + if (!is_cfg80211_regdom_intersected()) + return REG_REQ_IGNORE; + } else if (regdom_changes(lr->alpha2)) { + return REG_REQ_IGNORE; + } + } if (!regdom_changes(user_request->alpha2)) return REG_REQ_ALREADY_SET; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 1289cc14e9f922dd0de1ac27ff86745adc468117..d8e74445d6330a6bd36ddf77fe3caedc926607c2 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, found->ts = tmp->ts; found->ts_boottime = tmp->ts_boottime; found->parent_tsf = tmp->parent_tsf; + found->pub.chains = tmp->pub.chains; + memcpy(found->pub.chain_signal, tmp->pub.chain_signal, + IEEE80211_MAX_CHAINS); ether_addr_copy(found->parent_bssid, tmp->parent_bssid); } else { struct cfg80211_internal_bss *new; @@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); tmp.ts_boottime = data->boottime_ns; tmp.parent_tsf = data->parent_tsf; + tmp.pub.chains = data->chains; + memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(tmp.parent_bssid, data->parent_bssid); signal_valid = abs(data->chan->center_freq - channel->center_freq) <= diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d014aea07160c2302a97b030f822d781240a1ab5..c7cd051e986e0031f60fd5f80b94b41a110f52fd 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -51,6 +51,29 @@ struct cfg80211_conn { bool auto_auth, prev_bssid_valid; }; +static bool cfg80211_is_all_countryie_ignore(void) +{ + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + bool is_all_countryie_ignore = true; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); + if (!(wdev->wiphy->regulatory_flags & + REGULATORY_COUNTRY_IE_IGNORE)) { + is_all_countryie_ignore = false; + wdev_unlock(wdev); + goto out; + } + wdev_unlock(wdev); + } + } + +out: + return is_all_countryie_ignore; +} + static void cfg80211_sme_free(struct wireless_dev *wdev) { if (!wdev->conn) @@ -658,7 +681,8 @@ static bool cfg80211_is_all_idle(void) static void disconnect_work(struct work_struct *work) { rtnl_lock(); - if (cfg80211_is_all_idle()) + if (cfg80211_is_all_idle() && + !cfg80211_is_all_countryie_ignore()) regulatory_hint_disconnect(); rtnl_unlock(); } diff --git a/net/wireless/trace.h b/net/wireless/trace.h index f3353fe5b35b89fb1b0572a2890f47133ed146c9..c8c463bab79d4ba2a81d2ad71ca9807d26aa1734 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2319,6 +2319,29 @@ TRACE_EVENT(rdev_del_pmk, WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(aa)) ); +TRACE_EVENT(rdev_external_auth, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_external_auth_params *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry(WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(bssid) + __array(u8, ssid, IEEE80211_MAX_SSID_LEN + 1) + __field(u16, status) + ), + TP_fast_assign(WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(bssid, params->bssid); + memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); + memcpy(__entry->ssid, params->ssid.ssid, + params->ssid.ssid_len); + __entry->status = params->status; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT + ", ssid: %s, status: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, + __entry->bssid, __entry->ssid, __entry->status) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ diff --git a/scripts/build-all.py b/scripts/build-all.py index 2f4ad4472762aedd6e866b8651efc030fe820d78..ec2f5e64e2e1f8dcf8383752a587d04e44fc8dce 100755 --- a/scripts/build-all.py +++ b/scripts/build-all.py @@ -60,12 +60,8 @@ if not os.environ.get('CROSS_COMPILE'): def check_kernel(): """Ensure that PWD is a kernel directory""" - have_defconfig = any([ - os.path.isfile('arch/arm64/configs/msm_defconfig'), - os.path.isfile('arch/arm64/configs/sm8150_defconfig')]) - - if not all([os.path.isfile('MAINTAINERS'), have_defconfig]): - fail("This doesn't seem to be an MSM kernel dir") + if not os.path.isfile('MAINTAINERS'): + fail("This doesn't seem to be a kernel dir") def check_build(): """Ensure that the build directory is present.""" @@ -234,7 +230,7 @@ class Builder(): self.name = name self.defconfig = defconfig - self.confname = self.defconfig.split('/')[-1] + self.confname = re.sub('arch/arm[64]*/configs/', '', self.defconfig) # Determine if this is a 64-bit target based on the location # of the defconfig. @@ -270,62 +266,34 @@ class Builder(): steps.append(ExecStep(['make', 'O=%s' % dest_dir, self.confname], env=self.make_env)) - if not all_options.updateconfigs: - # Build targets can be dependent upon the completion of - # previous build targets, so build them one at a time. - cmd_line = ['make', - 'INSTALL_HDR_PATH=%s' % hdri_dir, - 'INSTALL_MOD_PATH=%s' % modi_dir, - 'O=%s' % dest_dir, - 'REAL_CC=%s' % clang_bin] - build_targets = [] - for c in make_command: - if re.match(r'^-{1,2}\w', c): - cmd_line.append(c) - else: - build_targets.append(c) - for t in build_targets: - steps.append(ExecStep(cmd_line + [t], env=self.make_env)) - - # Copy the defconfig back. - if all_options.configs or all_options.updateconfigs: - steps.append(ExecStep(['make', 'O=%s' % dest_dir, - 'savedefconfig'], env=self.make_env)) - steps.append(CopyfileStep(savedefconfig, defconfig)) + # Build targets can be dependent upon the completion of + # previous build targets, so build them one at a time. + cmd_line = ['make', + 'INSTALL_HDR_PATH=%s' % hdri_dir, + 'INSTALL_MOD_PATH=%s' % modi_dir, + 'O=%s' % dest_dir, + 'REAL_CC=%s' % clang_bin] + build_targets = [] + for c in make_command: + if re.match(r'^-{1,2}\w', c): + cmd_line.append(c) + else: + build_targets.append(c) + for t in build_targets: + steps.append(ExecStep(cmd_line + [t], env=self.make_env)) return steps -def update_config(file, str): - print 'Updating %s with \'%s\'\n' % (file, str) - with open(file, 'a') as defconfig: - defconfig.write(str + '\n') - def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = [] - arch_pats = ( - r'[fm]sm[0-9]*_defconfig', - r'apq*_defconfig', - r'qsd*_defconfig', - r'mpq*_defconfig', - r'sdm*_defconfig', - r'sdx*_defconfig', - ) - arch64_pats = ( - r'msm*_defconfig', - r'sm*_defconfig', - r'sdm*_defconfig', - r'sdx*_defconfig', - ) - for p in arch_pats: - for n in glob.glob('arch/arm/configs/' + p): - name = os.path.basename(n)[:-10] + "-llvm" - names.append(Builder(name, n)) - if 'CROSS_COMPILE64' in os.environ: - for p in arch64_pats: - for n in glob.glob('arch/arm64/configs/' + p): - name = os.path.basename(n)[:-10] + "-llvm" + "-64" - names.append(Builder(name, n)) + for defconfig in glob.glob('arch/arm*/configs/vendor/*_defconfig'): + target = os.path.basename(defconfig)[:-10] + name = target + "-llvm" + if 'arch/arm64' in defconfig: + name = name + "-64" + names.append(Builder(name, defconfig)) + return names def build_many(targets): @@ -341,8 +309,6 @@ def build_many(targets): tracker = BuildTracker(parallel) for target in targets: - if all_options.updateconfigs: - update_config(target.defconfig, all_options.updateconfigs) steps = target.build() tracker.add_sequence(target.log_name, target.name, steps) tracker.run() @@ -358,25 +324,14 @@ def main(): usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets - %prog [options] perf -- Build all perf targets - %prog [options] noperf -- Build all non-perf targets""") + """) parser = OptionParser(usage=usage, version=version) - parser.add_option('--configs', action='store_true', - dest='configs', - help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') - parser.add_option('--oldconfig', action='store_true', - dest='oldconfig', - help='Only process "make oldconfig"') - parser.add_option('--updateconfigs', - dest='updateconfigs', - help="Update defconfigs with provided option setting, " - "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', @@ -399,25 +354,11 @@ def main(): print " %s" % target.name sys.exit(0) - if options.oldconfig: - make_command = ["oldconfig"] - elif options.make_target: + if options.make_target: make_command = options.make_target if args == ['all']: build_many(configs) - elif args == ['perf']: - targets = [] - for t in configs: - if "perf" in t.name: - targets.append(t) - build_many(targets) - elif args == ['noperf']: - targets = [] - for t in configs: - if "perf" not in t.name: - targets.append(t) - build_many(targets) elif len(args) > 0: all_configs = {} for t in configs: diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 297b079ae4d9f0decbabc76d0f7e833e20aadadb..27aac273205bacf5456680a914a024bbf36960e0 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -745,7 +745,7 @@ int conf_write(const char *name) struct menu *menu; const char *basename; const char *str; - char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; + char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8]; char *env; dirname[0] = 0; diff --git a/security/Kconfig b/security/Kconfig index 65d29c37d1fcd71a6681da297ae527312bec807d..87d8bb2df7c8fa394e5f48efaff38a6028f5f53f 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -6,6 +6,11 @@ menu "Security options" source security/keys/Kconfig +if ARCH_QCOM +source security/pfe/Kconfig +endif + + config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" default n diff --git a/security/Makefile b/security/Makefile index 4d2d3782ddefd3fbdd6d2984a1faa8a2bd6561ae..f15945d3800b8d397c7d7d5b5737d7b39a246599 100644 --- a/security/Makefile +++ b/security/Makefile @@ -10,6 +10,7 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor subdir-$(CONFIG_SECURITY_YAMA) += yama subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin +subdir-$(CONFIG_ARCH_QCOM) += pfe # always enable default capabilities obj-y += commoncap.o @@ -25,6 +26,7 @@ obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ obj-$(CONFIG_SECURITY_YAMA) += yama/ obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/ +obj-$(CONFIG_ARCH_QCOM) += pfe/ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o # Object integrity file lists diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..0cd9e81a49528dd7e63177c1b983106c767b7c88 --- /dev/null +++ b/security/pfe/Kconfig @@ -0,0 +1,28 @@ +menu "Qualcomm Technologies, Inc Per File Encryption security device drivers" + depends on ARCH_QCOM + +config PFT + bool "Per-File-Tagger driver" + depends on SECURITY + default n + help + This driver is used for tagging enterprise files. + It is part of the Per-File-Encryption (PFE) feature. + The driver is tagging files when created by + registered application. + Tagged files are encrypted using the dm-req-crypt driver. + +config PFK + bool "Per-File-Key driver" + depends on SECURITY + depends on SECURITY_SELINUX + default n + help + This driver is used for storing eCryptfs information + in file node. + This is part of eCryptfs hardware enhanced solution + provided by Qualcomm Technologies, Inc. + Information is used when file is encrypted later using + ICE or dm crypto engine + +endmenu diff --git a/security/pfe/Makefile b/security/pfe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..242a2165fccba01327093f1a650c9f4de2a483ee --- /dev/null +++ b/security/pfe/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the MSM specific security device drivers. +# + +ccflags-y += -Isecurity/selinux -Isecurity/selinux/include +ccflags-y += -Ifs/ext4 +ccflags-y += -Ifs/crypto + +obj-$(CONFIG_PFT) += pft.o +obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c new file mode 100644 index 0000000000000000000000000000000000000000..b38cd5c4b05dee4cef9f87dbde01ea5da7b6fb32 --- /dev/null +++ b/security/pfe/pfk.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Per-File-Key (PFK). + * + * This driver is responsible for overall management of various + * Per File Encryption variants that work on top of or as part of different + * file systems. + * + * The driver has the following purpose : + * 1) Define priorities between PFE's if more than one is enabled + * 2) Extract key information from inode + * 3) Load and manage various keys in ICE HW engine + * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER + * that need to take decision on HW encryption management of the data + * Some examples: + * BLOCK LAYER: when it takes decision on whether 2 chunks can be united + * to one encryption / decryption request sent to the HW + * + * UFS DRIVER: when it need to configure ICE HW with a particular key slot + * to be used for encryption / decryption + * + * PFE variants can differ on particular way of storing the cryptographic info + * inside inode, actions to be taken upon file operations, etc., but the common + * properties are described above + * + */ + +#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ext4.h" +#include "objsec.h" +#include "pfk_kc.h" +#include "pfk_ice.h" +#include "pfk_ext4.h" +#include "pfk_internal.h" + +static bool pfk_ready; + +/* might be replaced by a table when more than one cipher is supported */ +#define PFK_SUPPORTED_KEY_SIZE 32 +#define PFK_SUPPORTED_SALT_SIZE 32 + +/* Various PFE types and function tables to support each one of them */ +enum pfe_type {EXT4_CRYPT_PFE, INVALID_PFE}; + +typedef int (*pfk_parse_inode_type)(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe); + +typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2); + +static const pfk_parse_inode_type pfk_parse_inode_ftable[] = { + /* EXT4_CRYPT_PFE */ &pfk_ext4_parse_inode, +}; + +static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = { + /* EXT4_CRYPT_PFE */ &pfk_ext4_allow_merge_bio, +}; + +static void __exit pfk_exit(void) +{ + pfk_ready = false; + pfk_ext4_deinit(); + pfk_kc_deinit(); +} + +static int __init pfk_init(void) +{ + int ret = 0; + + ret = pfk_ext4_init(); + if (ret != 0) + goto fail; + + ret = pfk_kc_init(); + if (ret != 0) { + pr_err("could init pfk key cache, error %d\n", ret); + pfk_ext4_deinit(); + goto fail; + } + + pfk_ready = true; + pr_info("Driver initialized successfully\n"); + + return 0; + +fail: + pr_err("Failed to init driver\n"); + return -ENODEV; +} + +/* + * If more than one type is supported simultaneously, this function will also + * set the priority between them + */ +static enum pfe_type pfk_get_pfe_type(const struct inode *inode) +{ + if (!inode) + return INVALID_PFE; + + if (pfk_is_ext4_type(inode)) + return EXT4_CRYPT_PFE; + + return INVALID_PFE; +} + +/** + * inode_to_filename() - get the filename from inode pointer. + * @inode: inode pointer + * + * it is used for debug prints. + * + * Return: filename string or "unknown". + */ +char *inode_to_filename(const struct inode *inode) +{ + struct dentry *dentry = NULL; + char *filename = NULL; + + if (hlist_empty(&inode->i_dentry)) + return "unknown"; + + dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); + filename = dentry->d_iname; + + return filename; +} + +/** + * pfk_is_ready() - driver is initialized and ready. + * + * Return: true if the driver is ready. + */ +static inline bool pfk_is_ready(void) +{ + return pfk_ready; +} + +/** + * pfk_bio_get_inode() - get the inode from a bio. + * @bio: Pointer to BIO structure. + * + * Walk the bio struct links to get the inode. + * Please note, that in general bio may consist of several pages from + * several files, but in our case we always assume that all pages come + * from the same file, since our logic ensures it. That is why we only + * walk through the first page to look for inode. + * + * Return: pointer to the inode struct if successful, or NULL otherwise. + * + */ +static struct inode *pfk_bio_get_inode(const struct bio *bio) +{ + struct address_space *mapping; + + if (!bio) + return NULL; + if (!bio->bi_io_vec) + return NULL; + if (!bio->bi_io_vec->bv_page) + return NULL; + if (!bio_has_data((struct bio *)bio)) + return NULL; + + if (PageAnon(bio->bi_io_vec->bv_page)) { + struct inode *inode; + + inode = dio_bio_get_inode((struct bio *)bio); + pr_debug("inode on direct-io, inode = 0x%pK.\n", inode); + return inode; + } + + mapping = page_mapping(bio->bi_io_vec->bv_page); + if (!mapping) + return NULL; + + if (!mapping->host) + return NULL; + + return bio->bi_io_vec->bv_page->mapping->host; +} + +/** + * pfk_key_size_to_key_type() - translate key size to key size enum + * @key_size: key size in bytes + * @key_size_type: pointer to store the output enum (can be null) + * + * return 0 in case of success, error otherwise (i.e not supported key size) + */ +int pfk_key_size_to_key_type(size_t key_size, + enum ice_crpto_key_size *key_size_type) +{ + /* + * currently only 32 bit key size is supported + * in the future, table with supported key sizes might + * be introduced + */ + + if (key_size != PFK_SUPPORTED_KEY_SIZE) { + pr_err("not supported key size %zu\n", key_size); + return -EINVAL; + } + + if (key_size_type) + *key_size_type = ICE_CRYPTO_KEY_SIZE_256; + + return 0; +} + +/* + * Retrieves filesystem type from inode's superblock + */ +bool pfe_is_inode_filesystem_type(const struct inode *inode, + const char *fs_type) +{ + if (!inode || !fs_type) + return false; + + if (!inode->i_sb) + return false; + + if (!inode->i_sb->s_type) + return false; + + return (strcmp(inode->i_sb->s_type->name, fs_type) == 0); +} + + +/** + * pfk_load_key_start() - loads PFE encryption key to the ICE + * Can also be invoked from non + * PFE context, in this case it + * is not relevant and is_pfe + * flag is set to false + * + * @bio: Pointer to the BIO structure + * @ice_setting: Pointer to ice setting structure that will be filled with + * ice configuration values, including the index to which the key was loaded + * @is_pfe: will be false if inode is not relevant to PFE, in such a case + * it should be treated as non PFE by the block layer + * + * Returns the index where the key is stored in encryption hw and additional + * information that will be used later for configuration of the encryption hw. + * + * Must be followed by pfk_load_key_end when key is no longer used by ice + * + */ +int pfk_load_key_start(const struct bio *bio, + struct ice_crypto_setting *ice_setting, bool *is_pfe, + bool async) +{ + int ret = 0; + struct pfk_key_info key_info = {NULL, NULL, 0, 0}; + enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; + enum ice_crpto_key_size key_size_type = 0; + u32 key_index = 0; + struct inode *inode = NULL; + enum pfe_type which_pfe = INVALID_PFE; + + if (!is_pfe) { + pr_err("is_pfe is NULL\n"); + return -EINVAL; + } + + /* + * only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_is_ready()) + return -ENODEV; + + if (!ice_setting) { + pr_err("ice setting is NULL\n"); + return -EINVAL; + } + inode = pfk_bio_get_inode(bio); + if (!inode) { + *is_pfe = false; + return -EINVAL; + } + which_pfe = pfk_get_pfe_type(inode); + if (which_pfe == INVALID_PFE) { + *is_pfe = false; + return -EPERM; + } + + pr_debug("parsing file %s with PFE %d\n", + inode_to_filename(inode), which_pfe); + ret = (*(pfk_parse_inode_ftable[which_pfe])) + (bio, inode, &key_info, &algo_mode, is_pfe); + if (ret != 0) + return ret; + ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type); + if (ret != 0) + return ret; + ret = pfk_kc_load_key_start(key_info.key, key_info.key_size, + key_info.salt, key_info.salt_size, &key_index, async); + if (ret) { + if (ret != -EBUSY && ret != -EAGAIN) + pr_err("start: could not load key into pfk key cache, error %d\n", + ret); + + return ret; + } + + ice_setting->key_size = key_size_type; + ice_setting->algo_mode = algo_mode; + /* hardcoded for now */ + ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY; + ice_setting->key_index = key_index; + + pr_debug("loaded key for file %s key_index %d\n", + inode_to_filename(inode), key_index); + + return 0; +} + +/** + * pfk_load_key_end() - marks the PFE key as no longer used by ICE + * Can also be invoked from non + * PFE context, in this case it is not + * relevant and is_pfe flag is + * set to false + * + * @bio: Pointer to the BIO structure + * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked + * from PFE context + */ +int pfk_load_key_end(const struct bio *bio, bool *is_pfe) +{ + int ret = 0; + struct pfk_key_info key_info = {0}; + enum pfe_type which_pfe = INVALID_PFE; + struct inode *inode = NULL; + + if (!is_pfe) { + pr_err("is_pfe is NULL\n"); + return -EINVAL; + } + + /* only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_is_ready()) + return -ENODEV; + + inode = pfk_bio_get_inode(bio); + if (!inode) { + *is_pfe = false; + return -EINVAL; + } + + which_pfe = pfk_get_pfe_type(inode); + if (which_pfe == INVALID_PFE) { + *is_pfe = false; + return -EPERM; + } + + ret = (*(pfk_parse_inode_ftable[which_pfe])) + (bio, inode, &key_info, NULL, is_pfe); + if (ret != 0) + return ret; + + pfk_kc_load_key_end(key_info.key, key_info.key_size, + key_info.salt, key_info.salt_size); + + pr_debug("finished using key for file %s\n", + inode_to_filename(inode)); + + return 0; +} + +/** + * pfk_allow_merge_bio() - Check if 2 BIOs can be merged. + * @bio1: Pointer to first BIO structure. + * @bio2: Pointer to second BIO structure. + * + * Prevent merging of BIOs from encrypted and non-encrypted + * files, or files encrypted with different key. + * Also prevent non encrypted and encrypted data from the same file + * to be merged (ecryptfs header if stored inside file should be non + * encrypted) + * This API is called by the file system block layer. + * + * Return: true if the BIOs allowed to be merged, false + * otherwise. + */ +bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2) +{ + struct inode *inode1 = NULL; + struct inode *inode2 = NULL; + enum pfe_type which_pfe1 = INVALID_PFE; + enum pfe_type which_pfe2 = INVALID_PFE; + + if (!pfk_is_ready()) + return false; + + if (!bio1 || !bio2) + return false; + + if (bio1 == bio2) + return true; + + inode1 = pfk_bio_get_inode(bio1); + inode2 = pfk_bio_get_inode(bio2); + + + which_pfe1 = pfk_get_pfe_type(inode1); + which_pfe2 = pfk_get_pfe_type(inode2); + + /* nodes with different encryption, do not merge */ + if (which_pfe1 != which_pfe2) + return false; + + /* both nodes do not have encryption, allow merge */ + if (which_pfe1 == INVALID_PFE) + return true; + + return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2, + inode1, inode2); +} +/** + * Flush key table on storage core reset. During core reset key configuration + * is lost in ICE. We need to flash the cache, so that the keys will be + * reconfigured again for every subsequent transaction + */ +void pfk_clear_on_reset(void) +{ + if (!pfk_is_ready()) + return; + + pfk_kc_clear_on_reset(); +} + +module_init(pfk_init); +module_exit(pfk_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Per-File-Key driver"); diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c new file mode 100644 index 0000000000000000000000000000000000000000..05a8628e34b8549deaa0f46a2de268c6d989bd0e --- /dev/null +++ b/security/pfe/pfk_ext4.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Per-File-Key (PFK) - EXT4 + * + * This driver is used for working with EXT4 crypt extension + * + * The key information is stored in node by EXT4 when file is first opened + * and will be later accessed by Block Device Driver to actually load the key + * to encryption hw. + * + * PFK exposes API's for loading and removing keys from encryption hw + * and also API to determine whether 2 adjacent blocks can be agregated by + * Block Layer in one request to encryption hw. + * + */ + +#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__ + +#include +#include +#include +#include + +#include "ext4_ice.h" +#include "pfk_ext4.h" + +static bool pfk_ext4_ready; + +/* + * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer + */ +void pfk_ext4_deinit(void) +{ + pfk_ext4_ready = false; +} + +/* + * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer + */ +int __init pfk_ext4_init(void) +{ + pfk_ext4_ready = true; + pr_info("PFK EXT4 inited successfully\n"); + + return 0; +} + +/** + * pfk_ecryptfs_is_ready() - driver is initialized and ready. + * + * Return: true if the driver is ready. + */ +static inline bool pfk_ext4_is_ready(void) +{ + return pfk_ext4_ready; +} + +/** + * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE + * @inode: inode pointer + */ +bool pfk_is_ext4_type(const struct inode *inode) +{ + if (!pfe_is_inode_filesystem_type(inode, "ext4")) + return false; + + return ext4_should_be_processed_by_ice(inode); +} + +/** + * pfk_ext4_parse_cipher() - parse cipher from inode to enum + * @inode: inode + * @algo: pointer to store the output enum (can be null) + * + * return 0 in case of success, error otherwise (i.e not supported cipher) + */ +static int pfk_ext4_parse_cipher(const struct inode *inode, + enum ice_cryto_algo_mode *algo) +{ + /* + * currently only AES XTS algo is supported + * in the future, table with supported ciphers might + * be introduced + */ + + if (!inode) + return -EINVAL; + + if (!ext4_is_aes_xts_cipher(inode)) { + pr_err("ext4 alghoritm is not supported by pfk\n"); + return -EINVAL; + } + + if (algo) + *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS; + + return 0; +} + +int pfk_ext4_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe) +{ + int ret = 0; + + if (!is_pfe) + return -EINVAL; + + /* + * only a few errors below can indicate that + * this function was not invoked within PFE context, + * otherwise we will consider it PFE + */ + *is_pfe = true; + + if (!pfk_ext4_is_ready()) + return -ENODEV; + + if (!inode) + return -EINVAL; + + if (!key_info) + return -EINVAL; + + key_info->key = ext4_get_ice_encryption_key(inode); + if (!key_info->key) { + pr_err("could not parse key from ext4\n"); + return -EINVAL; + } + + key_info->key_size = ext4_get_ice_encryption_key_size(inode); + if (!key_info->key_size) { + pr_err("could not parse key size from ext4\n"); + return -EINVAL; + } + + key_info->salt = ext4_get_ice_encryption_salt(inode); + if (!key_info->salt) { + pr_err("could not parse salt from ext4\n"); + return -EINVAL; + } + + key_info->salt_size = ext4_get_ice_encryption_salt_size(inode); + if (!key_info->salt_size) { + pr_err("could not parse salt size from ext4\n"); + return -EINVAL; + } + + ret = pfk_ext4_parse_cipher(inode, algo); + if (ret != 0) { + pr_err("not supported cipher\n"); + return ret; + } + + return 0; +} + +bool pfk_ext4_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2) +{ + /* if there is no ext4 pfk, don't disallow merging blocks */ + if (!pfk_ext4_is_ready()) + return true; + + if (!inode1 || !inode2) + return false; + + return ext4_is_ice_encryption_info_equal(inode1, inode2); +} diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h new file mode 100644 index 0000000000000000000000000000000000000000..c33232f35a1474e6dae4a8f39a884b638c369b46 --- /dev/null +++ b/security/pfe/pfk_ext4.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PFK_EXT4_H_ +#define _PFK_EXT4_H_ + +#include +#include +#include +#include "pfk_internal.h" + +bool pfk_is_ext4_type(const struct inode *inode); + +int pfk_ext4_parse_inode(const struct bio *bio, + const struct inode *inode, + struct pfk_key_info *key_info, + enum ice_cryto_algo_mode *algo, + bool *is_pfe); + +bool pfk_ext4_allow_merge_bio(const struct bio *bio1, + const struct bio *bio2, const struct inode *inode1, + const struct inode *inode2); + +int __init pfk_ext4_init(void); + +void pfk_ext4_deinit(void); + +#endif /* _PFK_EXT4_H_ */ diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c new file mode 100644 index 0000000000000000000000000000000000000000..bf60dd18dd76163b4d3239148109cbcc186fbb8d --- /dev/null +++ b/security/pfe/pfk_ice.c @@ -0,0 +1,181 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pfk_ice.h" + + +/**********************************/ +/** global definitions **/ +/**********************************/ + +#define TZ_ES_SET_ICE_KEY 0x2 +#define TZ_ES_INVALIDATE_ICE_KEY 0x3 + +/* index 0 and 1 is reserved for FDE */ +#define MIN_ICE_KEY_INDEX 2 + +#define MAX_ICE_KEY_INDEX 31 + +#define TZ_ES_SET_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY) + +#define TZ_ES_INVALIDATE_ICE_KEY_ID \ + TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \ + TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY) + +#define TZ_ES_SET_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_5( \ + TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \ + TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL) + +#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \ + TZ_SYSCALL_CREATE_PARAM_ID_1( \ + TZ_SYSCALL_PARAM_TYPE_VAL) + +#define ICE_KEY_SIZE 32 +#define ICE_SALT_SIZE 32 + +static uint8_t ice_key[ICE_KEY_SIZE]; +static uint8_t ice_salt[ICE_KEY_SIZE]; + +int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, + char *storage_type) +{ + struct scm_desc desc = {0}; + int ret, ret1; + char *tzbuf_key = (char *)ice_key; + char *tzbuf_salt = (char *)ice_salt; + char *s_type = storage_type; + uint32_t smc_id = 0; + u32 tzbuflen_key = sizeof(ice_key); + u32 tzbuflen_salt = sizeof(ice_salt); + + if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { + pr_err("%s Invalid index %d\n", __func__, index); + return -EINVAL; + } + if (!key || !salt) { + pr_err("%s Invalid key/salt\n", __func__); + return -EINVAL; + } + + if (!tzbuf_key || !tzbuf_salt) { + pr_err("%s No Memory\n", __func__); + return -ENOMEM; + } + + if (s_type == NULL) { + pr_err("%s Invalid Storage type\n", __func__); + return -EINVAL; + } + + memset(tzbuf_key, 0, tzbuflen_key); + memset(tzbuf_salt, 0, tzbuflen_salt); + + memcpy(ice_key, key, tzbuflen_key); + memcpy(ice_salt, salt, tzbuflen_salt); + + dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key); + dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt); + + smc_id = TZ_ES_SET_ICE_KEY_ID; + + desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID; + desc.args[0] = index; + desc.args[1] = virt_to_phys(tzbuf_key); + desc.args[2] = tzbuflen_key; + desc.args[3] = virt_to_phys(tzbuf_salt); + desc.args[4] = tzbuflen_salt; + + ret = qcom_ice_setup_ice_hw((const char *)s_type, true); + if (ret) { + pr_err("%s: could not enable clocks: %d\n", __func__, ret); + goto out; + } + + ret = scm_call2(smc_id, &desc); + if (ret) { + pr_err("%s: Set Key Error: %d\n", __func__, ret); + if (ret == -EBUSY) { + if (qcom_ice_setup_ice_hw((const char *)s_type, false)) + pr_err("%s: clock disable failed\n", __func__); + goto out; + } + /* Try to invalidate the key to keep ICE in proper state */ + smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; + desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; + desc.args[0] = index; + ret1 = scm_call2(smc_id, &desc); + if (ret1) + pr_err("%s: Invalidate Key Error: %d\n", __func__, + ret1); + } + + ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false); + if (ret1) + pr_err("%s: Error %d disabling clocks\n", __func__, ret1); + +out: + return ret; +} + +int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type) +{ + struct scm_desc desc = {0}; + int ret = 0; + + uint32_t smc_id = 0; + + if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) { + pr_err("%s Invalid index %d\n", __func__, index); + return -EINVAL; + } + + if (storage_type == NULL) { + pr_err("%s Invalid Storage type\n", __func__); + return -EINVAL; + } + + smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID; + + desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID; + desc.args[0] = index; + + ret = qcom_ice_setup_ice_hw((const char *)storage_type, true); + if (ret) { + pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret); + return ret; + } + + ret = scm_call2(smc_id, &desc); + if (ret) + pr_err("%s: Error: 0x%x\n", __func__, ret); + + if (qcom_ice_setup_ice_hw((const char *)storage_type, false)) + pr_err("%s: could not disable clocks\n", __func__); + + return ret; +} diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h new file mode 100644 index 0000000000000000000000000000000000000000..a0019391911601f76f737e477e2f132fb0f52457 --- /dev/null +++ b/security/pfe/pfk_ice.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_ICE_H_ +#define PFK_ICE_H_ + +/* + * PFK ICE + * + * ICE keys configuration through scm calls. + * + */ + +#include + +int pfk_ice_init(void); +int pfk_ice_deinit(void); + +int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt, + char *storage_type); +int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type); + +#endif /* PFK_ICE_H_ */ diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..3214327b8bcd14c425ff3030728b54841ac77d6d --- /dev/null +++ b/security/pfe/pfk_internal.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _PFK_INTERNAL_H_ +#define _PFK_INTERNAL_H_ + +#include +#include + +struct pfk_key_info { + const unsigned char *key; + const unsigned char *salt; + size_t key_size; + size_t salt_size; +}; + +int pfk_key_size_to_key_type(size_t key_size, + enum ice_crpto_key_size *key_size_type); + +bool pfe_is_inode_filesystem_type(const struct inode *inode, + const char *fs_type); + +char *inode_to_filename(const struct inode *inode); + +#endif /* _PFK_INTERNAL_H_ */ diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c new file mode 100644 index 0000000000000000000000000000000000000000..6ccfbd1a54840b63e4bd75ca0e4f45d613998bd9 --- /dev/null +++ b/security/pfe/pfk_kc.c @@ -0,0 +1,905 @@ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * PFK Key Cache + * + * Key Cache used internally in PFK. + * The purpose of the cache is to save access time to QSEE when loading keys. + * Currently the cache is the same size as the total number of keys that can + * be loaded to ICE. Since this number is relatively small, the algorithms for + * cache eviction are simple, linear and based on last usage timestamp, i.e + * the node that will be evicted is the one with the oldest timestamp. + * Empty entries always have the oldest timestamp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pfk_kc.h" +#include "pfk_ice.h" + + +/** the first available index in ice engine */ +#define PFK_KC_STARTING_INDEX 2 + +/** currently the only supported key and salt sizes */ +#define PFK_KC_KEY_SIZE 32 +#define PFK_KC_SALT_SIZE 32 + +/** Table size */ +/* TODO replace by some constant from ice.h */ +#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX)) + +/** The maximum key and salt size */ +#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE +#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE +#define PFK_UFS "ufs" + +static DEFINE_SPINLOCK(kc_lock); +static unsigned long flags; +static bool kc_ready; +static char *s_type = "sdcc"; + +/** + * enum pfk_kc_entry_state - state of the entry inside kc table + * + * @FREE: entry is free + * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine + and cannot be used by others. SCM call + to load key to ICE is pending to be performed + * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and + cannot be used by others. SCM call to load the + key to ICE was successfully executed and key is + now loaded + * @INACTIVE_INVALIDATING: entry is being invalidated during file close + and cannot be used by others until invalidation + is complete + * @INACTIVE: entry's key is already loaded, but is not + currently being used. It can be re-used for + optimization and to avoid SCM call cost or + it can be taken by another key if there are + no FREE entries + * @SCM_ERROR: error occurred while scm call was performed to + load the key to ICE + */ +enum pfk_kc_entry_state { + FREE, + ACTIVE_ICE_PRELOAD, + ACTIVE_ICE_LOADED, + INACTIVE_INVALIDATING, + INACTIVE, + SCM_ERROR +}; + +struct kc_entry { + unsigned char key[PFK_MAX_KEY_SIZE]; + size_t key_size; + + unsigned char salt[PFK_MAX_SALT_SIZE]; + size_t salt_size; + + u64 time_stamp; + u32 key_index; + + struct task_struct *thread_pending; + + enum pfk_kc_entry_state state; + + /* ref count for the number of requests in the HW queue for this key */ + int loaded_ref_cnt; + int scm_error; +}; + +static struct kc_entry kc_table[PFK_KC_TABLE_SIZE]; + +/** + * kc_is_ready() - driver is initialized and ready. + * + * Return: true if the key cache is ready. + */ +static inline bool kc_is_ready(void) +{ + return kc_ready; +} + +static inline void kc_spin_lock(void) +{ + spin_lock_irqsave(&kc_lock, flags); +} + +static inline void kc_spin_unlock(void) +{ + spin_unlock_irqrestore(&kc_lock, flags); +} + +/** + * kc_entry_is_available() - checks whether the entry is available + * + * Return true if it is , false otherwise or if invalid + * Should be invoked under spinlock + */ +static bool kc_entry_is_available(const struct kc_entry *entry) +{ + if (!entry) + return false; + + return (entry->state == FREE || entry->state == INACTIVE); +} + +/** + * kc_entry_wait_till_available() - waits till entry is available + * + * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted + * by signal + * + * Should be invoked under spinlock + */ +static int kc_entry_wait_till_available(struct kc_entry *entry) +{ + int res = 0; + + while (!kc_entry_is_available(entry)) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + res = -ERESTARTSYS; + break; + } + /* assuming only one thread can try to invalidate + * the same entry + */ + entry->thread_pending = current; + kc_spin_unlock(); + schedule(); + kc_spin_lock(); + } + set_current_state(TASK_RUNNING); + + return res; +} + +/** + * kc_entry_start_invalidating() - moves entry to state + * INACTIVE_INVALIDATING + * If entry is in use, waits till + * it gets available + * @entry: pointer to entry + * + * Return 0 in case of success, otherwise error + * Should be invoked under spinlock + */ +static int kc_entry_start_invalidating(struct kc_entry *entry) +{ + int res; + + res = kc_entry_wait_till_available(entry); + if (res) + return res; + + entry->state = INACTIVE_INVALIDATING; + + return 0; +} + +/** + * kc_entry_finish_invalidating() - moves entry to state FREE + * wakes up all the tasks waiting + * on it + * + * @entry: pointer to entry + * + * Return 0 in case of success, otherwise error + * Should be invoked under spinlock + */ +static void kc_entry_finish_invalidating(struct kc_entry *entry) +{ + if (!entry) + return; + + if (entry->state != INACTIVE_INVALIDATING) + return; + + entry->state = FREE; +} + +/** + * kc_min_entry() - compare two entries to find one with minimal time + * @a: ptr to the first entry. If NULL the other entry will be returned + * @b: pointer to the second entry + * + * Return the entry which timestamp is the minimal, or b if a is NULL + */ +static inline struct kc_entry *kc_min_entry(struct kc_entry *a, + struct kc_entry *b) +{ + if (!a) + return b; + + if (time_before64(b->time_stamp, a->time_stamp)) + return b; + + return a; +} + +/** + * kc_entry_at_index() - return entry at specific index + * @index: index of entry to be accessed + * + * Return entry + * Should be invoked under spinlock + */ +static struct kc_entry *kc_entry_at_index(int index) +{ + return &(kc_table[index]); +} + +/** + * kc_find_key_at_index() - find kc entry starting at specific index + * @key: key to look for + * @key_size: the key size + * @salt: salt to look for + * @salt_size: the salt size + * @sarting_index: index to start search with, if entry found, updated with + * index of that entry + * + * Return entry or NULL in case of error + * Should be invoked under spinlock + */ +static struct kc_entry *kc_find_key_at_index(const unsigned char *key, + size_t key_size, const unsigned char *salt, size_t salt_size, + int *starting_index) +{ + struct kc_entry *entry = NULL; + int i = 0; + + for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) { + entry = kc_entry_at_index(i); + + if (salt != NULL) { + if (entry->salt_size != salt_size) + continue; + + if (memcmp(entry->salt, salt, salt_size) != 0) + continue; + } + + if (entry->key_size != key_size) + continue; + + if (memcmp(entry->key, key, key_size) == 0) { + *starting_index = i; + return entry; + } + } + + return NULL; +} + +/** + * kc_find_key() - find kc entry + * @key: key to look for + * @key_size: the key size + * @salt: salt to look for + * @salt_size: the salt size + * + * Return entry or NULL in case of error + * Should be invoked under spinlock + */ +static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + int index = 0; + + return kc_find_key_at_index(key, key_size, salt, salt_size, &index); +} + +/** + * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp + * that is not locked + * + * Returns entry with minimal timestamp. Empty entries have timestamp + * of 0, therefore they are returned first. + * If all the entries are locked, will return NULL + * Should be invoked under spin lock + */ +static struct kc_entry *kc_find_oldest_entry_non_locked(void) +{ + struct kc_entry *curr_min_entry = NULL; + struct kc_entry *entry = NULL; + int i = 0; + + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) { + entry = kc_entry_at_index(i); + + if (entry->state == FREE) + return entry; + + if (entry->state == INACTIVE) + curr_min_entry = kc_min_entry(curr_min_entry, entry); + } + + return curr_min_entry; +} + +/** + * kc_update_timestamp() - updates timestamp of entry to current + * + * @entry: entry to update + * + */ +static void kc_update_timestamp(struct kc_entry *entry) +{ + if (!entry) + return; + + entry->time_stamp = get_jiffies_64(); +} + +/** + * kc_clear_entry() - clear the key from entry and mark entry not in use + * + * @entry: pointer to entry + * + * Should be invoked under spinlock + */ +static void kc_clear_entry(struct kc_entry *entry) +{ + if (!entry) + return; + + memset(entry->key, 0, entry->key_size); + memset(entry->salt, 0, entry->salt_size); + + entry->key_size = 0; + entry->salt_size = 0; + + entry->time_stamp = 0; + entry->scm_error = 0; + + entry->state = FREE; + + entry->loaded_ref_cnt = 0; + entry->thread_pending = NULL; +} + +/** + * kc_update_entry() - replaces the key in given entry and + * loads the new key to ICE + * + * @entry: entry to replace key in + * @key: key + * @key_size: key_size + * @salt: salt + * @salt_size: salt_size + * + * The previous key is securely released and wiped, the new one is loaded + * to ICE. + * Should be invoked under spinlock + */ +static int kc_update_entry(struct kc_entry *entry, const unsigned char *key, + size_t key_size, const unsigned char *salt, size_t salt_size) +{ + int ret; + + kc_clear_entry(entry); + + memcpy(entry->key, key, key_size); + entry->key_size = key_size; + + memcpy(entry->salt, salt, salt_size); + entry->salt_size = salt_size; + + /* Mark entry as no longer free before releasing the lock */ + entry->state = ACTIVE_ICE_PRELOAD; + kc_spin_unlock(); + + ret = qti_pfk_ice_set_key(entry->key_index, entry->key, + entry->salt, s_type); + + kc_spin_lock(); + return ret; +} + +/** + * pfk_kc_init() - init function + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_init(void) +{ + int i = 0; + struct kc_entry *entry = NULL; + + kc_spin_lock(); + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) { + entry = kc_entry_at_index(i); + entry->key_index = PFK_KC_STARTING_INDEX + i; + } + kc_ready = true; + kc_spin_unlock(); + return 0; +} + +/** + * pfk_kc_denit() - deinit function + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_deinit(void) +{ + int res = pfk_kc_clear(); + + kc_ready = false; + return res; +} + +/** + * pfk_kc_load_key_start() - retrieve the key from cache or add it if + * it's not there and return the ICE hw key index in @key_index. + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the salt + * @salt_size: the size of the salt + * @key_index: the pointer to key_index where the output will be stored + * @async: whether scm calls are allowed in the caller context + * + * If key is present in cache, than the key_index will be retrieved from cache. + * If it is not present, the oldest entry from kc table will be evicted, + * the key will be loaded to ICE via QSEE to the index that is the evicted + * entry number and stored in cache. + * Entry that is going to be used is marked as being used, it will mark + * as not being used when ICE finishes using it and pfk_kc_load_key_end + * will be invoked. + * As QSEE calls can only be done from a non-atomic context, when @async flag + * is set to 'false', it specifies that it is ok to make the calls in the + * current context. Otherwise, when @async is set, the caller should retry the + * call again from a different context, and -EAGAIN error will be returned. + * + * Return 0 in case of success, error otherwise + */ +int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size, u32 *key_index, + bool async) +{ + int ret = 0; + struct kc_entry *entry = NULL; + bool entry_exists = false; + + if (!kc_is_ready()) + return -ENODEV; + + if (!key || !salt || !key_index) { + pr_err("%s key/salt/key_index NULL\n", __func__); + return -EINVAL; + } + + if (key_size != PFK_KC_KEY_SIZE) { + pr_err("unsupported key size %zu\n", key_size); + return -EINVAL; + } + + if (salt_size != PFK_KC_SALT_SIZE) { + pr_err("unsupported salt size %zu\n", salt_size); + return -EINVAL; + } + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + if (async) { + pr_debug("%s task will populate entry\n", __func__); + kc_spin_unlock(); + return -EAGAIN; + } + + entry = kc_find_oldest_entry_non_locked(); + if (!entry) { + /* could not find a single non locked entry, + * return EBUSY to upper layers so that the + * request will be rescheduled + */ + kc_spin_unlock(); + return -EBUSY; + } + } else { + entry_exists = true; + } + + pr_debug("entry with index %d is in state %d\n", + entry->key_index, entry->state); + + switch (entry->state) { + case (INACTIVE): + if (entry_exists) { + kc_update_timestamp(entry); + entry->state = ACTIVE_ICE_LOADED; + + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + break; + } + case (FREE): + ret = kc_update_entry(entry, key, key_size, salt, salt_size); + if (ret) { + entry->state = SCM_ERROR; + entry->scm_error = ret; + pr_err("%s: key load error (%d)\n", __func__, ret); + } else { + kc_update_timestamp(entry); + entry->state = ACTIVE_ICE_LOADED; + + /* + * In case of UFS only increase ref cnt for async calls, + * sync calls from within work thread do not pass + * requests further to HW + */ + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + } + break; + case (ACTIVE_ICE_PRELOAD): + case (INACTIVE_INVALIDATING): + ret = -EAGAIN; + break; + case (ACTIVE_ICE_LOADED): + kc_update_timestamp(entry); + + if (!strcmp(s_type, (char *)PFK_UFS)) { + if (async) + entry->loaded_ref_cnt++; + } else { + entry->loaded_ref_cnt++; + } + break; + case(SCM_ERROR): + ret = entry->scm_error; + kc_clear_entry(entry); + entry->state = FREE; + break; + default: + pr_err("invalid state %d for entry with key index %d\n", + entry->state, entry->key_index); + ret = -EINVAL; + } + + *key_index = entry->key_index; + kc_spin_unlock(); + + return ret; +} + +/** + * pfk_kc_load_key_end() - finish the process of key loading that was started + * by pfk_kc_load_key_start + * by marking the entry as not + * being in use + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the salt + * @salt_size: the size of the salt + * + */ +void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + struct kc_entry *entry = NULL; + struct task_struct *tmp_pending = NULL; + int ref_cnt = 0; + + if (!kc_is_ready()) + return; + + if (!key || !salt) + return; + + if (key_size != PFK_KC_KEY_SIZE) + return; + + if (salt_size != PFK_KC_SALT_SIZE) + return; + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + kc_spin_unlock(); + pr_err("internal error, there should an entry to unlock\n"); + + return; + } + ref_cnt = --entry->loaded_ref_cnt; + + if (ref_cnt < 0) + pr_err("internal error, ref count should never be negative\n"); + + if (!ref_cnt) { + entry->state = INACTIVE; + /* + * wake-up invalidation if it's waiting + * for the entry to be released + */ + if (entry->thread_pending) { + tmp_pending = entry->thread_pending; + entry->thread_pending = NULL; + + kc_spin_unlock(); + wake_up_process(tmp_pending); + return; + } + } + + kc_spin_unlock(); +} + +/** + * pfk_kc_remove_key() - remove the key from cache and from ICE engine + * @key: pointer to the key + * @key_size: the size of the key + * @salt: pointer to the key + * @salt_size: the size of the key + * + * Return 0 in case of success, error otherwise (also in case of non + * (existing key) + */ +int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size) +{ + struct kc_entry *entry = NULL; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + if (!key) + return -EINVAL; + + if (!salt) + return -EINVAL; + + if (key_size != PFK_KC_KEY_SIZE) + return -EINVAL; + + if (salt_size != PFK_KC_SALT_SIZE) + return -EINVAL; + + kc_spin_lock(); + + entry = kc_find_key(key, key_size, salt, salt_size); + if (!entry) { + pr_debug("%s: key does not exist\n", __func__); + kc_spin_unlock(); + return -EINVAL; + } + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + return res; + } + kc_clear_entry(entry); + + kc_spin_unlock(); + + qti_pfk_ice_invalidate_key(entry->key_index, s_type); + + kc_spin_lock(); + kc_entry_finish_invalidating(entry); + kc_spin_unlock(); + + return 0; +} + +/** + * pfk_kc_remove_key() - remove the key from cache and from ICE engine + * when no salt is available. Will only search key part, if there are several, + * all will be removed + * + * @key: pointer to the key + * @key_size: the size of the key + * + * Return 0 in case of success, error otherwise (also for non-existing key) + */ +int pfk_kc_remove_key(const unsigned char *key, size_t key_size) +{ + struct kc_entry *entry = NULL; + int index = 0; + int temp_indexes[PFK_KC_TABLE_SIZE] = {0}; + int temp_indexes_size = 0; + int i = 0; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + if (!key) + return -EINVAL; + + if (key_size != PFK_KC_KEY_SIZE) + return -EINVAL; + + memset(temp_indexes, -1, sizeof(temp_indexes)); + + kc_spin_lock(); + + entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); + if (!entry) { + pr_err("%s: key does not exist\n", __func__); + kc_spin_unlock(); + return -EINVAL; + } + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + return res; + } + + temp_indexes[temp_indexes_size++] = index; + kc_clear_entry(entry); + + /* let's clean additional entries with the same key if there are any */ + do { + index++; + entry = kc_find_key_at_index(key, key_size, NULL, 0, &index); + if (!entry) + break; + + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + goto out; + } + + temp_indexes[temp_indexes_size++] = index; + + kc_clear_entry(entry); + + + } while (true); + + kc_spin_unlock(); + + temp_indexes_size--; + for (i = temp_indexes_size; i >= 0 ; i--) + qti_pfk_ice_invalidate_key( + kc_entry_at_index(temp_indexes[i])->key_index, + s_type); + + /* fall through */ + res = 0; + +out: + kc_spin_lock(); + for (i = temp_indexes_size; i >= 0 ; i--) + kc_entry_finish_invalidating( + kc_entry_at_index(temp_indexes[i])); + kc_spin_unlock(); + + return res; +} + +/** + * pfk_kc_clear() - clear the table and remove all keys from ICE + * + * Return 0 on success, error otherwise + * + */ +int pfk_kc_clear(void) +{ + struct kc_entry *entry = NULL; + int i = 0; + int res = 0; + + if (!kc_is_ready()) + return -ENODEV; + + kc_spin_lock(); + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) { + entry = kc_entry_at_index(i); + res = kc_entry_start_invalidating(entry); + if (res != 0) { + kc_spin_unlock(); + goto out; + } + kc_clear_entry(entry); + } + kc_spin_unlock(); + + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) + qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index, + s_type); + + /* fall through */ + res = 0; +out: + kc_spin_lock(); + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) + kc_entry_finish_invalidating(kc_entry_at_index(i)); + kc_spin_unlock(); + + return res; +} + +/** + * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE + * The assumption is that at this point we don't have any pending transactions + * Also, there is no need to clear keys from ICE + * + * Return 0 on success, error otherwise + * + */ +void pfk_kc_clear_on_reset(void) +{ + struct kc_entry *entry = NULL; + int i = 0; + + if (!kc_is_ready()) + return; + + kc_spin_lock(); + for (i = 0; i < PFK_KC_TABLE_SIZE; i++) { + entry = kc_entry_at_index(i); + kc_clear_entry(entry); + } + kc_spin_unlock(); +} + +static int pfk_kc_find_storage_type(char **device) +{ + char boot[20] = {'\0'}; + char *match = (char *)strnstr(saved_command_line, + "androidboot.bootdevice=", + strlen(saved_command_line)); + if (match) { + memcpy(boot, (match + strlen("androidboot.bootdevice=")), + sizeof(boot) - 1); + if (strnstr(boot, PFK_UFS, strlen(boot))) + *device = PFK_UFS; + + return 0; + } + return -EINVAL; +} + +static int __init pfk_kc_pre_init(void) +{ + return pfk_kc_find_storage_type(&s_type); +} + +static void __exit pfk_kc_exit(void) +{ + s_type = NULL; +} + +module_init(pfk_kc_pre_init); +module_exit(pfk_kc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Per-File-Key-KC driver"); diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h new file mode 100644 index 0000000000000000000000000000000000000000..6adeee2259cdb63088f6247c09dec7beff69827a --- /dev/null +++ b/security/pfe/pfk_kc.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef PFK_KC_H_ +#define PFK_KC_H_ + +#include + +int pfk_kc_init(void); +int pfk_kc_deinit(void); +int pfk_kc_load_key_start(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size, u32 *key_index, + bool async); +void pfk_kc_load_key_end(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size); +int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size, + const unsigned char *salt, size_t salt_size); +int pfk_kc_remove_key(const unsigned char *key, size_t key_size); +int pfk_kc_clear(void); +void pfk_kc_clear_on_reset(void); +extern char *saved_command_line; + + +#endif /* PFK_KC_H_ */ diff --git a/security/security.c b/security/security.c index 264a5e5a0595eda46e846ce1430b79f43eec008f..d0d99921b75138e8579f76edcf53859743dc5639 100644 --- a/security/security.c +++ b/security/security.c @@ -612,6 +612,14 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode } EXPORT_SYMBOL_GPL(security_inode_create); +int security_inode_post_create(struct inode *dir, struct dentry *dentry, + umode_t mode) +{ + if (unlikely(IS_PRIVATE(dir))) + return 0; + return call_int_hook(inode_post_create, 0, dir, dentry, mode); +} + int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2e3a627fc0b1f034a68469ccab6232cae6e41f5a..dd9ca3b13547679567163c309f9c3788f270b196 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -102,7 +102,7 @@ static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); #ifdef CONFIG_SECURITY_SELINUX_DEVELOP -int selinux_enforcing; +int selinux_enforcing __rticdata; static int __init enforcing_setup(char *str) { diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 3d54468ce3342851169396813c48fc3a0428a813..af8582501f93c21c6f505fd57b3b83eeecede767 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -26,8 +26,9 @@ #include #include #include -#include "flask.h" -#include "avc.h" +//#include "flask.h" +//#include "avc.h" +#include "security.h" struct task_security_struct { u32 osid; /* SID prior to last execve */ @@ -64,6 +65,8 @@ struct inode_security_struct { u32 sid; /* SID of this object */ u16 sclass; /* security class of this object */ unsigned char initialized; /* initialization flag */ + u32 tag; /* Per-File-Encryption tag */ + void *pfk_data; /* Per-File-Key data from ecryptfs */ spinlock_t lock; }; diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 02f0412d42f24d485ed98bc7ff0c70fd5c7b8f47..de4c7d32b955e26aa2badacd8617809ec6a5eed6 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -13,7 +13,6 @@ #include #include #include -#include "flask.h" #define SECSID_NULL 0x00000000 /* unspecified SID */ #define SECSID_WILD 0xffffffff /* wildcard SID */ diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index b275743e23cc1d919d1049f873d8d92319f9d2a2..56766f038fe960d2f1d7864c06647c718cfd80d0 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -91,7 +91,7 @@ static DEFINE_RWLOCK(policy_rwlock); static struct sidtab sidtab; struct policydb policydb; -int ss_initialized; +int ss_initialized __rticdata; /* * The largest sequence number that has been used when diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index f055ca10bbc1d33c9c1cee1fd913b7c930984ac1..1583dce5486592c9cba780086f07b75140deac82 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -115,6 +115,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream) return -ENOMEM; runtime->substream = substream; spin_lock_init(&runtime->lock); + mutex_init(&runtime->realloc_mutex); init_waitqueue_head(&runtime->sleep); INIT_WORK(&runtime->event_work, snd_rawmidi_input_event_work); runtime->event = NULL; @@ -636,8 +637,10 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, struct snd_rawmidi_params * params) { char *newbuf; + char *oldbuf; struct snd_rawmidi_runtime *runtime = substream->runtime; - + unsigned long flags; + if (substream->append && substream->use_count > 1) return -EBUSY; snd_rawmidi_drain_output(substream); @@ -648,13 +651,22 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - newbuf = krealloc(runtime->buffer, params->buffer_size, + mutex_lock(&runtime->realloc_mutex); + newbuf = __krealloc(runtime->buffer, params->buffer_size, GFP_KERNEL); - if (!newbuf) + if (!newbuf) { + mutex_unlock(&runtime->realloc_mutex); return -ENOMEM; + } + spin_lock_irqsave(&runtime->lock, flags); + oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; runtime->avail = runtime->buffer_size; + spin_unlock_irqrestore(&runtime->lock, flags); + if (oldbuf != newbuf) + kfree(oldbuf); + mutex_unlock(&runtime->realloc_mutex); } runtime->avail_min = params->avail_min; substream->active_sensing = !params->no_active_sensing; @@ -666,7 +678,9 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, struct snd_rawmidi_params * params) { char *newbuf; + char *oldbuf; struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long flags; snd_rawmidi_drain_input(substream); if (params->buffer_size < 32 || params->buffer_size > 1024L * 1024L) { @@ -676,12 +690,21 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream, return -EINVAL; } if (params->buffer_size != runtime->buffer_size) { - newbuf = krealloc(runtime->buffer, params->buffer_size, + mutex_lock(&runtime->realloc_mutex); + newbuf = __krealloc(runtime->buffer, params->buffer_size, GFP_KERNEL); - if (!newbuf) + if (!newbuf) { + mutex_unlock(&runtime->realloc_mutex); return -ENOMEM; + } + spin_lock_irqsave(&runtime->lock, flags); + oldbuf = runtime->buffer; runtime->buffer = newbuf; runtime->buffer_size = params->buffer_size; + spin_unlock_irqrestore(&runtime->lock, flags); + if (oldbuf != newbuf) + kfree(oldbuf); + mutex_unlock(&runtime->realloc_mutex); } runtime->avail_min = params->avail_min; return 0; @@ -953,6 +976,8 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, struct snd_rawmidi_runtime *runtime = substream->runtime; unsigned long appl_ptr; + if (userbuf) + mutex_lock(&runtime->realloc_mutex); spin_lock_irqsave(&runtime->lock, flags); while (count > 0 && runtime->avail) { count1 = runtime->buffer_size - runtime->appl_ptr; @@ -973,6 +998,7 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, spin_unlock_irqrestore(&runtime->lock, flags); if (copy_to_user(userbuf + result, runtime->buffer + appl_ptr, count1)) { + mutex_unlock(&runtime->realloc_mutex); return result > 0 ? result : -EFAULT; } spin_lock_irqsave(&runtime->lock, flags); @@ -981,6 +1007,8 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, count -= count1; } spin_unlock_irqrestore(&runtime->lock, flags); + if (userbuf) + mutex_unlock(&runtime->realloc_mutex); return result; } @@ -1245,10 +1273,14 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, return -EINVAL; result = 0; + if (userbuf) + mutex_lock(&runtime->realloc_mutex); spin_lock_irqsave(&runtime->lock, flags); if (substream->append) { if ((long)runtime->avail < count) { spin_unlock_irqrestore(&runtime->lock, flags); + if (userbuf) + mutex_unlock(&runtime->realloc_mutex); return -EAGAIN; } } @@ -1284,6 +1316,8 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, __end: count1 = runtime->avail < runtime->buffer_size; spin_unlock_irqrestore(&runtime->lock, flags); + if (userbuf) + mutex_unlock(&runtime->realloc_mutex); if (count1) snd_rawmidi_output_trigger(substream, 1); return result; diff --git a/sound/core/timer.c b/sound/core/timer.c index 626f47b322cc30af81fce8db4c655ee0b76194a3..1c586c7e928aea3a359e3519d3c42fe211c41419 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -1352,6 +1352,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { + memset(&r1, 0, sizeof(r1)); r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; @@ -1518,7 +1519,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) } else { if (id.subdevice < 0) id.subdevice = 0; - else + else if (id.subdevice < INT_MAX) id.subdevice++; } } diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index d1eb14842340e63dc20d0ad8a187cc131c3570a1..a12e594d4e3b3a23d78cc0b75531b845c7b6e331 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec, return err; strlcpy(pcm->name, cpcm->name, sizeof(pcm->name)); apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); - if (apcm == NULL) + if (apcm == NULL) { + snd_device_free(chip->card, pcm); return -ENOMEM; + } apcm->chip = chip; apcm->pcm = pcm; apcm->codec = codec; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 5b4dbcec6de8dab957f045786d4808b4edfd573f..ba9a7e552183392b1d07d535a91bdd9206cea65d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6ae061183efffc105af43a6dcb7dd4a84486960f..02157e3d82bb7c1de78801d4cfc667a6aa5f5cd8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2518,6 +2518,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110), SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ), SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN), + SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270), SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000), SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ), @@ -4844,7 +4845,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec, struct alc_spec *spec = codec->spec; if (action == HDA_FIXUP_ACT_PRE_PROBE) { - spec->shutup = alc_no_shutup; /* reduce click noise */ spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; codec->power_save_node = 0; /* avoid click noises */ @@ -5243,6 +5243,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" +static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */ + hda_fixup_thinkpad_acpi(codec, fix, action); +} + /* for dell wmi mic mute led */ #include "dell_wmi_helper.c" @@ -5786,7 +5793,7 @@ static const struct hda_fixup alc269_fixups[] = { }, [ALC269_FIXUP_THINKPAD_ACPI] = { .type = HDA_FIXUP_FUNC, - .v.func = hda_fixup_thinkpad_acpi, + .v.func = alc_fixup_thinkpad_acpi, .chained = true, .chain_id = ALC269_FIXUP_SKU_IGNORE, }, @@ -6436,10 +6443,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), - SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), - SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), @@ -6610,6 +6617,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60140}, {0x14, 0x90170110}, {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + {0x12, 0x90a60140}, + {0x14, 0x90170110}, + {0x19, 0x02a11030}, + {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION, + {0x14, 0x90170110}, + {0x19, 0x02a11030}, + {0x1a, 0x02a11040}, + {0x1b, 0x01014020}, + {0x21, 0x0221101f}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170150}, diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c index c53bd6f2c2d7f6179420c0f6db37b8d1b41c1838..3d011abaa2660a7cc92b4690006c107352862007 100644 --- a/sound/soc/cirrus/edb93xx.c +++ b/sound/soc/cirrus/edb93xx.c @@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = { .cpu_dai_name = "ep93xx-i2s", .codec_name = "spi0.0", .codec_dai_name = "cs4271-hifi", - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &edb93xx_ops, }; diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c index 934f8aefdd90b8f94beda83dc82393d4091e4aec..0dc3852c46219f1aca029a08b7a46681d0b0a389 100644 --- a/sound/soc/cirrus/ep93xx-i2s.c +++ b/sound/soc/cirrus/ep93xx-i2s.c @@ -51,7 +51,9 @@ #define EP93XX_I2S_WRDLEN_24 (1 << 0) #define EP93XX_I2S_WRDLEN_32 (2 << 0) -#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */ +#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */ + +#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */ #define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */ #define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */ @@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai); - unsigned int clk_cfg, lin_ctrl; + unsigned int clk_cfg; + unsigned int txlin_ctrl = 0; + unsigned int rxlin_ctrl = 0; clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG); - lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA); switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: clk_cfg |= EP93XX_I2S_CLKCFG_REL; - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST; break; case SND_SOC_DAIFMT_LEFT_J: clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; - lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST; break; case SND_SOC_DAIFMT_RIGHT_J: clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; - lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST; + rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST; + txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST; break; default: @@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* Negative bit clock, lrclk low on left word */ - clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL); + clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS); break; case SND_SOC_DAIFMT_NB_IF: /* Negative bit clock, lrclk low on right word */ clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP; - clk_cfg |= EP93XX_I2S_CLKCFG_REL; + clk_cfg |= EP93XX_I2S_CLKCFG_LRS; break; case SND_SOC_DAIFMT_IB_NF: /* Positive bit clock, lrclk low on left word */ clk_cfg |= EP93XX_I2S_CLKCFG_CKP; - clk_cfg &= ~EP93XX_I2S_CLKCFG_REL; + clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS; break; case SND_SOC_DAIFMT_IB_IF: /* Positive bit clock, lrclk low on right word */ - clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL; + clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS; break; } /* Write new register values */ ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg); ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg); - ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl); - ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl); + ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl); + ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl); return 0; } diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c index 2334ec19e7ebb74d4e9b60db5fe37bb4abf6a10f..11ff7b2672b2250318ea17652054619bb7fb897f 100644 --- a/sound/soc/cirrus/snappercl15.c +++ b/sound/soc/cirrus/snappercl15.c @@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = { .codec_dai_name = "tlv320aic23-hifi", .codec_name = "tlv320aic23-codec.0-001a", .platform_name = "ep93xx-i2s", - .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF | + .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &snappercl15_ops, }; diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c index 129978d1243ebcdd3a1558c1c0f1f0dd2056bdd7..51ce53e23599a88b2d24a4732371a01139add2eb 100644 --- a/sound/soc/codecs/cs35l35.c +++ b/sound/soc/codecs/cs35l35.c @@ -1106,6 +1106,7 @@ static struct regmap_config cs35l35_regmap = { .readable_reg = cs35l35_readable_register, .precious_reg = cs35l35_precious_register, .cache_type = REGCACHE_RBTREE, + .use_single_rw = true, }; static irqreturn_t cs35l35_irq(int irq, void *data) diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 8c7063e1aa46a42b9ca3bfcc0d1e1893a15ed955..0b9b014b4bb6c4fb07ab6bbb2a4217b3e21d20f9 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1184,7 +1184,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + pm8916_mbhc_switch_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc switch irq", priv); @@ -1198,7 +1199,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + mbhc_btn_press_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc btn press irq", priv); @@ -1211,7 +1213,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return irq; } - ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, + ret = devm_request_threaded_irq(dev, irq, NULL, + mbhc_btn_release_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mbhc btn release irq", priv); diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index d7956ababd11775b0b9d04552faa68f2fadd2545..e52e68b562382cd83a2723dcc2277f526e51cbc1 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = { {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, {RT5514_DELAY_BUF_CTRL3, 0x00000000}, + {RT5514_ASRC_IN_CTRL1, 0x00000003}, {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, @@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg) case RT5514_PLL3_CALIB_CTRL5: case RT5514_DELAY_BUF_CTRL1: case RT5514_DELAY_BUF_CTRL3: + case RT5514_ASRC_IN_CTRL1: case RT5514_DOWNFILTER0_CTRL1: case RT5514_DOWNFILTER0_CTRL2: case RT5514_DOWNFILTER0_CTRL3: @@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev, case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: + case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3: diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index e64f660e65876cf0ec81a30e688fb44b12bb5127..a552c7ea4c0e9c13cf455dd83519901ffe4eda57 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -432,6 +432,8 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, static void dapm_kcontrol_free(struct snd_kcontrol *kctl) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); + + list_del(&data->paths); kfree(data->wlist); kfree(data); } diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index e5049fbfc4f1770db65f01d2eb6495d55413557a..30cdad2eab7f2b19d43879a9219f647c79ac415a 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -510,7 +510,7 @@ static void remove_widget(struct snd_soc_component *comp, */ if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { /* enumerated widget mixer */ - for (i = 0; i < w->num_kcontrols; i++) { + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { struct snd_kcontrol *kcontrol = w->kcontrols[i]; struct soc_enum *se = (struct soc_enum *)kcontrol->private_value; @@ -528,7 +528,7 @@ static void remove_widget(struct snd_soc_component *comp, kfree(w->kcontrol_news); } else { /* volume mixer or bytes controls */ - for (i = 0; i < w->num_kcontrols; i++) { + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { struct snd_kcontrol *kcontrol = w->kcontrols[i]; if (dobj->widget.kcontrol_type @@ -2571,7 +2571,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index) /* match index */ if (dobj->index != index && - dobj->index != SND_SOC_TPLG_INDEX_ALL) + index != SND_SOC_TPLG_INDEX_ALL) continue; switch (dobj->type) { diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c index 4f254bcc442347ffb529925fed70d7e299d50773..61b9aa5d641529666c2f7695fc6c1804e86899ce 100644 --- a/tools/net/bpf_dbg.c +++ b/tools/net/bpf_dbg.c @@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file) static int cmd_load(char *arg) { - char *subcmd, *cont, *tmp = strdup(arg); + char *subcmd, *cont = NULL, *tmp = strdup(arg); int ret = CMD_OK; subcmd = strtok_r(tmp, " ", &cont); @@ -1073,7 +1073,10 @@ static int cmd_load(char *arg) bpf_reset(); bpf_reset_breakpoints(); - ret = cmd_load_bpf(cont); + if (!cont) + ret = CMD_ERR; + else + ret = cmd_load_bpf(cont); } else if (matches(subcmd, "pcap") == 0) { ret = cmd_load_pcap(cont); } else { diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h index b3e32b010ab194ed613034234c403c4067502776..c2c01f84df75f1f9b35a3c898686a82973026d88 100644 --- a/tools/objtool/arch/x86/include/asm/insn.h +++ b/tools/objtool/arch/x86/include/asm/insn.h @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +#define POP_SS_OPCODE 0x1f +#define MOV_SREG_OPCODE 0x8e + +/* + * Intel SDM Vol.3A 6.8.3 states; + * "Any single-step trap that would be delivered following the MOV to SS + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is + * suppressed." + * This function returns true if @insn is MOV SS or POP SS. On these + * instructions, single stepping is suppressed. + */ +static inline int insn_masking_exception(struct insn *insn) +{ + return insn->opcode.bytes[0] == POP_SS_OPCODE || + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); +} + #endif /* _ASM_X86_INSN_H */ diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json new file mode 100644 index 0000000000000000000000000000000000000000..b4791b443a6678e2da59ad890023d1db40f143d6 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json @@ -0,0 +1,1453 @@ +[ + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.", + "EventCode": "0x2E", + "Counter": "0,1,2,3", + "UMask": "0x41", + "PEBScounters": "0,1,2,3", + "EventName": "LONGEST_LAT_CACHE.MISS", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "L2 cache request misses" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.", + "EventCode": "0x2E", + "Counter": "0,1,2,3", + "UMask": "0x4f", + "PEBScounters": "0,1,2,3", + "EventName": "LONGEST_LAT_CACHE.REFERENCE", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "L2 cache requests" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.", + "EventCode": "0x30", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "L2_REJECT_XQ.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Requests rejected by the XQ" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.", + "EventCode": "0x31", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "CORE_REJECT_L2Q.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Requests rejected by the L2Q" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.", + "EventCode": "0x51", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "DL1.REPLACEMENT", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "L1 Cache evictions for dirty data" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.", + "EventCode": "0x86", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss." + }, + { + "CollectPEBSRecord": "1", + "EventCode": "0xB7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE", + "PDIR_COUNTER": "na", + "SampleAfterValue": "100007", + "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x21", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", + "SampleAfterValue": "200003", + "BriefDescription": "Locked load uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x41", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x42", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", + "SampleAfterValue": "200003", + "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x43", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.SPLIT", + "SampleAfterValue": "200003", + "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts the number of load uops retired.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x81", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts the number of store uops retired.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x82", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.ALL_STORES", + "SampleAfterValue": "200003", + "BriefDescription": "Store uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x83", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.ALL", + "SampleAfterValue": "200003", + "BriefDescription": "Memory uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired that hit the L1 data cache.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired that hit in the L2 cache.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that hit L2 (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired that miss the L1 data cache.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired that miss in the L2 cache.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that missed L2 (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x20", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.HITM", + "SampleAfterValue": "200003", + "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x40", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Loads retired that hit WCB (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.", + "EventCode": "0xD1", + "Counter": "0,1,2,3", + "UMask": "0x80", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Loads retired that came from DRAM (Precise event capable)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010008", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040008", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000008", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000008", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000008", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010020", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040020", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000020", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000020", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000020", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010400", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040400", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000400", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000400", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000400", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000011000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000041000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200001000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000001000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000001000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000012000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000042000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200002000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000002000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000002000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000014800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000044800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200004800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000004800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000004800", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000018000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000013010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000043010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200003010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000003010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000003010", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000013091", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000043091", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200003091", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000003091", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000003091", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000010022", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0000040022", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x0200000022", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x1000000022", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x4000000022", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x00000132b7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x00000432b7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x02000032b7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x10000032b7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", + "Offcore": "1" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", + "EventCode": "0xB7", + "MSRValue": "0x40000032b7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING", + "PDIR_COUNTER": "na", + "MSRIndex": "0x1a6", + "SampleAfterValue": "100007", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.", + "Offcore": "1" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json new file mode 100644 index 0000000000000000000000000000000000000000..a7878965ceab278a83eac138313ca72d8d1cd01a --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json @@ -0,0 +1,62 @@ +[ + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.", + "EventCode": "0x80", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "ICACHE.HIT", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.", + "EventCode": "0x80", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "ICACHE.MISSES", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.", + "EventCode": "0x80", + "Counter": "0,1,2,3", + "UMask": "0x3", + "PEBScounters": "0,1,2,3", + "EventName": "ICACHE.ACCESSES", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.", + "EventCode": "0xE7", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "MS_DECODED.MS_ENTRY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "MS decode starts" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.", + "EventCode": "0xE9", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Decode restrictions due to predicting wrong instruction length" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json new file mode 100644 index 0000000000000000000000000000000000000000..91e0815f3ffbb15bb16e7e6b8915db3b186d4ec2 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json @@ -0,0 +1,38 @@ +[ + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.", + "EventCode": "0x13", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops that split a page (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.", + "EventCode": "0x13", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT", + "SampleAfterValue": "200003", + "BriefDescription": "Store uops that split a page (Precise event capable)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "Machine clears due to memory ordering issue" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json new file mode 100644 index 0000000000000000000000000000000000000000..b860374418abb1a423a30f36efdf0381e4bd862a --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json @@ -0,0 +1,98 @@ +[ + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.", + "EventCode": "0x86", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "FETCH_STALL.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles code-fetch stalled due to any reason." + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.", + "EventCode": "0x86", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding." + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).", + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Unfilled issue slots per cycle" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.", + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.", + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Unfilled issue slots per cycle to recover" + }, + { + "CollectPEBSRecord": "2", + "PublicDescription": "Counts hardware interrupts received by the processor.", + "EventCode": "0xCB", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "HW_INTERRUPTS.RECEIVED", + "PDIR_COUNTER": "na", + "SampleAfterValue": "203", + "BriefDescription": "Hardware interrupts received" + }, + { + "CollectPEBSRecord": "2", + "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.", + "EventCode": "0xCB", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "HW_INTERRUPTS.MASKED", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles hardware interrupts are masked" + }, + { + "CollectPEBSRecord": "2", + "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).", + "EventCode": "0xCB", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles pending interrupts are masked" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json new file mode 100644 index 0000000000000000000000000000000000000000..ccf1aed69197bd77459db10b8f7ce7aebc182f90 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json @@ -0,0 +1,544 @@ +[ + { + "PEBS": "2", + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.", + "EventCode": "0x00", + "Counter": "Fixed counter 0", + "UMask": "0x1", + "PEBScounters": "32", + "EventName": "INST_RETIRED.ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Instructions retired (Fixed event)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.", + "EventCode": "0x00", + "Counter": "Fixed counter 1", + "UMask": "0x2", + "PEBScounters": "33", + "EventName": "CPU_CLK_UNHALTED.CORE", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Core cycles when core is not halted (Fixed event)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.", + "EventCode": "0x00", + "Counter": "Fixed counter 2", + "UMask": "0x3", + "PEBScounters": "34", + "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when core is not halted (Fixed event)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.", + "EventCode": "0x03", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "LD_BLOCKS.DATA_UNKNOWN", + "SampleAfterValue": "200003", + "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.", + "EventCode": "0x03", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "SampleAfterValue": "200003", + "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.", + "EventCode": "0x03", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "LD_BLOCKS.4K_ALIAS", + "SampleAfterValue": "200003", + "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).", + "EventCode": "0x03", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "LD_BLOCKS.UTLB_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts anytime a load that retires is blocked for any reason.", + "EventCode": "0x03", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "LD_BLOCKS.ALL_BLOCK", + "SampleAfterValue": "200003", + "BriefDescription": "Loads blocked (Precise event capable)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.", + "EventCode": "0x0E", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_ISSUED.ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Uops issued to the back end per cycle" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.", + "EventCode": "0x3C", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "CPU_CLK_UNHALTED.CORE_P", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Core cycles when core is not halted" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.", + "EventCode": "0x3C", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "CPU_CLK_UNHALTED.REF", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when core is not halted" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.", + "EventCode": "0x9C", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_NOT_DELIVERED.ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Uops requested but not-delivered to the back-end per cycle" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.", + "EventCode": "0xC0", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "INST_RETIRED.ANY_P", + "SampleAfterValue": "2000003", + "BriefDescription": "Instructions retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.", + "EventCode": "0xC0", + "Counter": "0,1,2,3", + "UMask": "0x0", + "EventName": "INST_RETIRED.PREC_DIST", + "SampleAfterValue": "2000003", + "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts uops which retired.", + "EventCode": "0xC2", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_RETIRED.ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.", + "EventCode": "0xC2", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_RETIRED.MS", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "MS uops retired (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of floating point divide uops retired.", + "EventCode": "0xC2", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_RETIRED.FPDIV", + "SampleAfterValue": "2000003", + "BriefDescription": "Floating point divide uops retired (Precise Event Capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of integer divide uops retired.", + "EventCode": "0xC2", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "UOPS_RETIRED.IDIV", + "SampleAfterValue": "2000003", + "BriefDescription": "Integer divide uops retired (Precise Event Capable)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts machine clears for any reason.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "All machine clears" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.SMC", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "Self-Modifying Code detected" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.FP_ASSIST", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "Machine clears due to FP assists" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.DISAMBIGUATION", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "Machine clears due to memory disambiguation" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x20", + "PEBScounters": "0,1,2,3", + "EventName": "MACHINE_CLEARS.PAGE_FAULT", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "Machines clear due to a page fault" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES", + "SampleAfterValue": "200003", + "BriefDescription": "Retired branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0x7e", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.JCC", + "SampleAfterValue": "200003", + "BriefDescription": "Retired conditional branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts the number of taken branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0x80", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES", + "SampleAfterValue": "200003", + "BriefDescription": "Retired taken branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xbf", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "SampleAfterValue": "200003", + "BriefDescription": "Retired far branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xeb", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.NON_RETURN_IND", + "SampleAfterValue": "200003", + "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts near return branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xf7", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.RETURN", + "SampleAfterValue": "200003", + "BriefDescription": "Retired near return instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts near CALL branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xf9", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Retired near call instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts near indirect CALL branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xfb", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.IND_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Retired near indirect call instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts near relative CALL branch instructions retired.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xfd", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.REL_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Retired near relative call instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0xfe", + "PEBScounters": "0,1,2,3", + "EventName": "BR_INST_RETIRED.TAKEN_JCC", + "SampleAfterValue": "200003", + "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0x7e", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.JCC", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0xeb", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.NON_RETURN_IND", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0xf7", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.RETURN", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0xfb", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.IND_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0xfe", + "PEBScounters": "0,1,2,3", + "EventName": "BR_MISP_RETIRED.TAKEN_JCC", + "SampleAfterValue": "200003", + "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts core cycles if either divide unit is busy.", + "EventCode": "0xCD", + "Counter": "0,1,2,3", + "UMask": "0x0", + "PEBScounters": "0,1,2,3", + "EventName": "CYCLES_DIV_BUSY.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles a divider is busy" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts core cycles the integer divide unit is busy.", + "EventCode": "0xCD", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "CYCLES_DIV_BUSY.IDIV", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles the integer divide unit is busy" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts core cycles the floating point divide unit is busy.", + "EventCode": "0xCD", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "CYCLES_DIV_BUSY.FPDIV", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles the FP divide unit is busy" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.", + "EventCode": "0xE6", + "Counter": "0,1,2,3", + "UMask": "0x1", + "PEBScounters": "0,1,2,3", + "EventName": "BACLEARS.ALL", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "BACLEARs asserted for any branch type" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts BACLEARS on return instructions.", + "EventCode": "0xE6", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "BACLEARS.RETURN", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "BACLEARs asserted for return branch" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.", + "EventCode": "0xE6", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "BACLEARS.COND", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "BACLEARs asserted for conditional branch" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json new file mode 100644 index 0000000000000000000000000000000000000000..0b53a3b0dfb87074aeab59bfd0a538e65dbf06da --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json @@ -0,0 +1,218 @@ +[ + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.", + "EventCode": "0x08", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walk completed due to a demand load to a 4K page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.", + "EventCode": "0x08", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.", + "EventCode": "0x08", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walk completed due to a demand load to a 1GB page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.", + "EventCode": "0x08", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_LOAD_MISSES.WALK_PENDING", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walks outstanding due to a demand load every cycle." + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.", + "EventCode": "0x49", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to a demand data store to a 4K page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.", + "EventCode": "0x49", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.", + "EventCode": "0x49", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to a demand data store to a 1GB page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.", + "EventCode": "0x49", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "DTLB_STORE_MISSES.WALK_PENDING", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walks outstanding due to a demand data store every cycle." + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.", + "EventCode": "0x4F", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "EPT.WALK_PENDING", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walks outstanding due to walking the EPT every cycle" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.", + "EventCode": "0x81", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "ITLB.MISS", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "ITLB misses" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.", + "EventCode": "0x85", + "Counter": "0,1,2,3", + "UMask": "0x2", + "PEBScounters": "0,1,2,3", + "EventName": "ITLB_MISSES.WALK_COMPLETED_4K", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.", + "EventCode": "0x85", + "Counter": "0,1,2,3", + "UMask": "0x4", + "PEBScounters": "0,1,2,3", + "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.", + "EventCode": "0x85", + "Counter": "0,1,2,3", + "UMask": "0x8", + "PEBScounters": "0,1,2,3", + "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB", + "PDIR_COUNTER": "na", + "SampleAfterValue": "2000003", + "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page" + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.", + "EventCode": "0x85", + "Counter": "0,1,2,3", + "UMask": "0x10", + "PEBScounters": "0,1,2,3", + "EventName": "ITLB_MISSES.WALK_PENDING", + "PDIR_COUNTER": "na", + "SampleAfterValue": "200003", + "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle." + }, + { + "CollectPEBSRecord": "1", + "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.", + "EventCode": "0xBD", + "Counter": "0,1,2,3", + "UMask": "0x20", + "PEBScounters": "0,1,2,3", + "EventName": "TLB_FLUSHES.STLB_ANY", + "PDIR_COUNTER": "na", + "SampleAfterValue": "20003", + "BriefDescription": "STLB flushes" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts load uops retired that caused a DTLB miss.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x11", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS", + "SampleAfterValue": "200003", + "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts store uops retired that caused a DTLB miss.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x12", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES", + "SampleAfterValue": "200003", + "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)" + }, + { + "PEBS": "2", + "CollectPEBSRecord": "2", + "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x13", + "PEBScounters": "0,1,2,3", + "EventName": "MEM_UOPS_RETIRED.DTLB_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)" + } +] \ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index 4ea068366c3e8e7a9d88a518b8a26d8a39141b08..fe1a2c47cabf22d9dcbb28cc1718911dd1fb88cb 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core GenuineIntel-6-36,v4,bonnell,core GenuineIntel-6-35,v4,bonnell,core GenuineIntel-6-5C,v8,goldmont,core +GenuineIntel-6-7A,v1,goldmontplus,core GenuineIntel-6-3C,v24,haswell,core GenuineIntel-6-45,v24,haswell,core GenuineIntel-6-46,v24,haswell,core diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 00c98c968cb15f5d41dbec59295d231d73207ac0..505c13bf7e30d210c700b3c686a23cd5311ed56b 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -352,6 +352,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || (strncmp(name, "[vdso]", 6) == 0) || + (strncmp(name, "[vdso32]", 8) == 0) || + (strncmp(name, "[vdsox32]", 9) == 0) || (strncmp(name, "[vsyscall]", 10) == 0)) { m->kmod = false; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index f9157aed12890f3f3dc417dba9c55aeea7ac537e..d404bed7003aacee1079af492ba3f20334edc06e 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -113,6 +113,7 @@ struct intel_pt_decoder { bool have_cyc; bool fixup_last_mtc; bool have_last_ip; + enum intel_pt_param_flags flags; uint64_t pos; uint64_t last_ip; uint64_t ip; @@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) decoder->return_compression = params->return_compression; decoder->branch_enable = params->branch_enable; + decoder->flags = params->flags; + decoder->period = params->period; decoder->period_type = params->period_type; @@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder) return ret; } +static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder, + struct intel_pt_insn *intel_pt_insn, + uint64_t ip, int err) +{ + return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err && + intel_pt_insn->branch == INTEL_PT_BR_INDIRECT && + ip == decoder->ip + intel_pt_insn->length; +} + static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) { struct intel_pt_insn intel_pt_insn; @@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip); if (err == INTEL_PT_RETURN) return 0; - if (err == -EAGAIN) { + if (err == -EAGAIN || + intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { if (intel_pt_fup_event(decoder)) return 0; - return err; + return -EAGAIN; } decoder->set_fup_tx_flags = false; if (err) @@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) { intel_pt_log("ERROR: Buffer overflow\n"); intel_pt_clear_tx_flags(decoder); - decoder->have_tma = false; decoder->cbr = 0; decoder->timestamp_insn_cnt = 0; decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; @@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder) case INTEL_PT_PSB: case INTEL_PT_TSC: case INTEL_PT_TMA: - case INTEL_PT_CBR: case INTEL_PT_MODE_TSX: case INTEL_PT_BAD: case INTEL_PT_PSBEND: @@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder) decoder->pkt_step = 0; return -ENOENT; + case INTEL_PT_CBR: + intel_pt_calc_cbr(decoder); + break; + case INTEL_PT_OVF: return intel_pt_overflow(decoder); diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h index fc1752d50019c680f0e29a7e2176e716015ff632..51c18d67f4ca855d02f19be640490ab7a9e74356 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h @@ -60,6 +60,14 @@ enum { INTEL_PT_ERR_MAX, }; +enum intel_pt_param_flags { + /* + * FUP packet can contain next linear instruction pointer instead of + * current linear instruction pointer. + */ + INTEL_PT_FUP_WITH_NLIP = 1 << 0, +}; + struct intel_pt_state { enum intel_pt_sample_type type; int err; @@ -106,6 +114,7 @@ struct intel_pt_params { unsigned int mtc_period; uint32_t tsc_ctc_ratio_n; uint32_t tsc_ctc_ratio_d; + enum intel_pt_param_flags flags; }; struct intel_pt_decoder; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index ba4c9dd186434a33c8c33a59ab8884fd7c679dd3..d426761a549d02d67756c541ea7ab0b2a0495e68 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf, if (len < offs) return INTEL_PT_NEED_MORE_BYTES; byte = buf[offs++]; - payload |= (byte >> 1) << shift; + payload |= ((uint64_t)byte >> 1) << shift; } packet->type = INTEL_PT_CYC; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index d9573c1fa555d0316290301b5c114f465ad64f3e..3b118fa9da89bee1680b5662497854b49320905e 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -784,6 +784,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, unsigned int queue_nr) { struct intel_pt_params params = { .get_trace = 0, }; + struct perf_env *env = pt->machine->env; struct intel_pt_queue *ptq; ptq = zalloc(sizeof(struct intel_pt_queue)); @@ -865,6 +866,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, } } + if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) + params.flags |= INTEL_PT_FUP_WITH_NLIP; + ptq->decoder = intel_pt_decoder_new(¶ms); if (!ptq->decoder) goto out_free; @@ -1560,6 +1564,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq) if (intel_pt_is_switch_ip(ptq, state->to_ip)) { switch (ptq->switch_state) { + case INTEL_PT_SS_NOT_TRACING: case INTEL_PT_SS_UNKNOWN: case INTEL_PT_SS_EXPECTING_SWITCH_IP: err = intel_pt_next_tid(pt, ptq); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4e8dd5fd45fd2876c77450e86b11b092fe5bc769..ec40e47aa19873b800bbe0f279ac72de032bfeae 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2093,16 +2093,14 @@ static bool symbol__read_kptr_restrict(void) int symbol__annotation_init(void) { + if (symbol_conf.init_annotation) + return 0; + if (symbol_conf.initialized) { pr_err("Annotation needs to be init before symbol__init()\n"); return -1; } - if (symbol_conf.init_annotation) { - pr_warning("Annotation being initialized multiple times\n"); - return 0; - } - symbol_conf.priv_size += sizeof(struct annotation); symbol_conf.init_annotation = true; return 0; diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index f2019b37370d310cd845b6a42101d77cad77bc6b..6a4982d029bf1174fb6b3b4877ff34368a497da1 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -15,14 +15,29 @@ reset_tracer() { # reset the current tracer echo nop > current_tracer } -reset_trigger() { # reset all current setting triggers - grep -v ^# events/*/*/trigger | +reset_trigger_file() { + # remove action triggers first + grep -H ':on[^:]*(' $@ | + while read line; do + cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "` + file=`echo $line | cut -f1 -d:` + echo "!$cmd" >> $file + done + grep -Hv ^# $@ | while read line; do cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "` - echo "!$cmd" > `echo $line | cut -f1 -d:` + file=`echo $line | cut -f1 -d:` + echo "!$cmd" > $file done } +reset_trigger() { # reset all current setting triggers + if [ -d events/synthetic ]; then + reset_trigger_file events/synthetic/*/trigger + fi + reset_trigger_file events/*/*/trigger +} + reset_events_filter() { # reset all current setting filters grep -v ^none events/*/*/filter | while read line; do diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc new file mode 100644 index 0000000000000000000000000000000000000000..c193dce611a23fa0116791b62193641ecaec213c --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc @@ -0,0 +1,44 @@ +#!/bin/sh +# description: event trigger - test multiple actions on hist trigger + + +do_reset() { + reset_trigger + echo > set_event + clear_trace +} + +fail() { #msg + do_reset + echo $1 + exit_fail +} + +if [ ! -f set_event ]; then + echo "event tracing is not supported" + exit_unsupported +fi + +if [ ! -f synthetic_events ]; then + echo "synthetic event is not supported" + exit_unsupported +fi + +clear_synthetic_events +reset_tracer +do_reset + +echo "Test multiple actions on hist trigger" +echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events +TRIGGER1=events/sched/sched_wakeup/trigger +TRIGGER2=events/sched/sched_switch/trigger + +echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1 +echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2 +echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2 + +do_reset + +exit 0 diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index aa6e2d7f6a1fdc36e397a4cf5d43bc65068b6c13..903980921d9ebc3d6da86210df84f6c12e759db0 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ - protection_keys test_vdso test_vsyscall + protection_keys test_vdso test_vsyscall mov_ss_trap TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c new file mode 100644 index 0000000000000000000000000000000000000000..3c3a022654f36ee52c31a57f342083607a06fcda --- /dev/null +++ b/tools/testing/selftests/x86/mov_ss_trap.c @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS + * + * This does MOV SS from a watchpointed address followed by various + * types of kernel entries. A MOV SS that hits a watchpoint will queue + * up a #DB trap but will not actually deliver that trap. The trap + * will be delivered after the next instruction instead. The CPU's logic + * seems to be: + * + * - Any fault: drop the pending #DB trap. + * - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then + * deliver #DB. + * - ICEBP: enter the kernel but do not deliver the watchpoint trap + * - breakpoint: only one #DB is delivered (phew!) + * + * There are plenty of ways for a kernel to handle this incorrectly. This + * test tries to exercise all the cases. + * + * This should mostly cover CVE-2018-1087 and CVE-2018-8897. + */ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define X86_EFLAGS_RF (1UL << 16) + +#if __x86_64__ +# define REG_IP REG_RIP +#else +# define REG_IP REG_EIP +#endif + +unsigned short ss; +extern unsigned char breakpoint_insn[]; +sigjmp_buf jmpbuf; +static unsigned char altstack_data[SIGSTKSZ]; + +static void enable_watchpoint(void) +{ + pid_t parent = getpid(); + int status; + + pid_t child = fork(); + if (child < 0) + err(1, "fork"); + + if (child) { + if (waitpid(child, &status, 0) != child) + err(1, "waitpid for child"); + } else { + unsigned long dr0, dr1, dr7; + + dr0 = (unsigned long)&ss; + dr1 = (unsigned long)breakpoint_insn; + dr7 = ((1UL << 1) | /* G0 */ + (3UL << 16) | /* RW0 = read or write */ + (1UL << 18) | /* LEN0 = 2 bytes */ + (1UL << 3)); /* G1, RW1 = insn */ + + if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0) + err(1, "PTRACE_ATTACH"); + + if (waitpid(parent, &status, 0) != parent) + err(1, "waitpid for child"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0) + err(1, "PTRACE_POKEUSER DR0"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0) + err(1, "PTRACE_POKEUSER DR1"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0) + err(1, "PTRACE_POKEUSER DR7"); + + printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7); + + if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0) + err(1, "PTRACE_DETACH"); + + exit(0); + } +} + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); +} + +static char const * const signames[] = { + [SIGSEGV] = "SIGSEGV", + [SIGBUS] = "SIBGUS", + [SIGTRAP] = "SIGTRAP", + [SIGILL] = "SIGILL", +}; + +static void sigtrap(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n", + (unsigned long)ctx->uc_mcontext.gregs[REG_IP], + !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF)); +} + +static void handle_and_return(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot %s with RIP=%lx\n", signames[sig], + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); +} + +static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot %s with RIP=%lx\n", signames[sig], + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); + + siglongjmp(jmpbuf, 1); +} + +int main() +{ + unsigned long nr; + + asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss)); + printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss); + + if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0) + printf("\tPR_SET_PTRACER_ANY succeeded\n"); + + printf("\tSet up a watchpoint\n"); + sethandler(SIGTRAP, sigtrap, 0); + enable_watchpoint(); + + printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n"); + asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT3\n"); + asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT 3\n"); + asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; CS CS INT3\n"); + asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; CSx14 INT3\n"); + asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT 4\n"); + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss)); + +#ifdef __i386__ + printf("[RUN]\tMOV SS; INTO\n"); + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); + nr = -1; + asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into" + : [tmp] "+r" (nr) : [ss] "m" (ss)); +#endif + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; ICEBP\n"); + + /* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */ + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); + + asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss)); + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; CLI\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss)); + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; #PF\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]" + : [tmp] "=r" (nr) : [ss] "m" (ss)); + } + + /* + * INT $1: if #DB has DPL=3 and there isn't special handling, + * then the kernel will die. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; INT 1\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss)); + } + +#ifdef __x86_64__ + /* + * In principle, we should test 32-bit SYSCALL as well, but + * the calling convention is so unpredictable that it's + * not obviously worth the effort. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; SYSCALL\n"); + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); + nr = SYS_getpid; + /* + * Toggle the high bit of RSP to make it noncanonical to + * strengthen this test on non-SMAP systems. + */ + asm volatile ("btc $63, %%rsp\n\t" + "mov %[ss], %%ss; syscall\n\t" + "btc $63, %%rsp" + : "+a" (nr) : [ss] "m" (ss) + : "rcx" +#ifdef __x86_64__ + , "r11" +#endif + ); + } +#endif + + printf("[RUN]\tMOV SS; breakpointed NOP\n"); + asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss)); + + /* + * Invoking SYSENTER directly breaks all the rules. Just handle + * the SIGSEGV. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; SYSENTER\n"); + stack_t stack = { + .ss_sp = altstack_data, + .ss_size = SIGSTKSZ, + }; + if (sigaltstack(&stack, NULL) != 0) + err(1, "sigaltstack"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); + nr = SYS_getpid; + asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) + : [ss] "m" (ss) : "flags", "rcx" +#ifdef __x86_64__ + , "r11" +#endif + ); + + /* We're unreachable here. SYSENTER forgets RIP. */ + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; INT $0x80\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + nr = 20; /* compat getpid */ + asm volatile ("mov %[ss], %%ss; int $0x80" + : "+a" (nr) : [ss] "m" (ss) + : "flags" +#ifdef __x86_64__ + , "r8", "r9", "r10", "r11" +#endif + ); + } + + printf("[OK]\tI aten't dead\n"); + return 0; +} diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index 9c0325e1ea6844f666bfdcc8204763a8614b9875..50f7e92724813a3525154ede4f2b282af7e5a839 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c @@ -368,6 +368,11 @@ static int expected_bnd_index = -1; uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */ unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS]; +/* Failed address bound checks: */ +#ifndef SEGV_BNDERR +# define SEGV_BNDERR 3 +#endif + /* * The kernel is supposed to provide some information about the bounds * exception in the siginfo. It should match what we have in the bounds @@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext) br_count++; dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count); -#define SEGV_BNDERR 3 /* failed address bound checks */ - dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n", status, ip, br_reason); dprintf2("si_signo: %d\n", si->si_signo); diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index f15aa5a76fe3457e96e438c15e7ad40d3c7fbce0..460b4bdf4c1edff9d5dfa0d451dbaa393d53b80c 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -72,10 +72,9 @@ extern void abort_hooks(void); test_nr, iteration_nr); \ dprintf0("errno at assert: %d", errno); \ abort_hooks(); \ - assert(condition); \ + exit(__LINE__); \ } \ } while (0) -#define raw_assert(cond) assert(cond) void cat_into_file(char *str, char *file) { @@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file) * these need to be raw because they are called under * pkey_assert() */ - raw_assert(fd >= 0); + if (fd < 0) { + fprintf(stderr, "error opening '%s'\n", str); + perror("error: "); + exit(__LINE__); + } + ret = write(fd, str, strlen(str)); if (ret != strlen(str)) { perror("write to file failed"); fprintf(stderr, "filename: '%s' str: '%s'\n", file, str); - raw_assert(0); + exit(__LINE__); } close(fd); } @@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me) #ifdef __i386__ #ifndef SYS_mprotect_key -# define SYS_mprotect_key 380 +# define SYS_mprotect_key 380 #endif + #ifndef SYS_pkey_alloc -# define SYS_pkey_alloc 381 -# define SYS_pkey_free 382 +# define SYS_pkey_alloc 381 +# define SYS_pkey_free 382 #endif -#define REG_IP_IDX REG_EIP -#define si_pkey_offset 0x14 + +#define REG_IP_IDX REG_EIP +#define si_pkey_offset 0x14 #else #ifndef SYS_mprotect_key -# define SYS_mprotect_key 329 +# define SYS_mprotect_key 329 #endif + #ifndef SYS_pkey_alloc -# define SYS_pkey_alloc 330 -# define SYS_pkey_free 331 +# define SYS_pkey_alloc 330 +# define SYS_pkey_free 331 #endif -#define REG_IP_IDX REG_RIP -#define si_pkey_offset 0x20 + +#define REG_IP_IDX REG_RIP +#define si_pkey_offset 0x20 #endif @@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes) } } -#define SEGV_BNDERR 3 /* failed address bound checks */ -#define SEGV_PKUERR 4 +/* Failed address bound checks: */ +#ifndef SEGV_BNDERR +# define SEGV_BNDERR 3 +#endif + +#ifndef SEGV_PKUERR +# define SEGV_PKUERR 4 +#endif static char *si_code_str(int si_code) { @@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) dump_mem(pkru_ptr - 128, 256); pkey_assert(*pkru_ptr); - si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); - dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); - dump_mem(si_pkey_ptr - 8, 24); - siginfo_pkey = *si_pkey_ptr; - pkey_assert(siginfo_pkey < NR_PKEYS); - last_si_pkey = siginfo_pkey; - if ((si->si_code == SEGV_MAPERR) || (si->si_code == SEGV_ACCERR) || (si->si_code == SEGV_BNDERR)) { @@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) exit(4); } + si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); + dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); + dump_mem((u8 *)si_pkey_ptr - 8, 24); + siginfo_pkey = *si_pkey_ptr; + pkey_assert(siginfo_pkey < NR_PKEYS); + last_si_pkey = siginfo_pkey; + dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); /* need __rdpkru() version so we do not do shadow_pkru checking */ dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); @@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); pkru_faults++; dprintf1("<<<<==================================================\n"); - return; - if (trapno == 14) { - fprintf(stderr, - "ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n", - trapno, ip); - fprintf(stderr, "si_addr %p\n", si->si_addr); - fprintf(stderr, "REG_ERR: %lx\n", - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - exit(1); - } else { - fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip); - fprintf(stderr, "si_addr %p\n", si->si_addr); - fprintf(stderr, "REG_ERR: %lx\n", - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - exit(2); - } dprint_in_signal = 0; } @@ -393,10 +391,15 @@ pid_t fork_lazy_child(void) return forkret; } -#define PKEY_DISABLE_ACCESS 0x1 -#define PKEY_DISABLE_WRITE 0x2 +#ifndef PKEY_DISABLE_ACCESS +# define PKEY_DISABLE_ACCESS 0x1 +#endif + +#ifndef PKEY_DISABLE_WRITE +# define PKEY_DISABLE_WRITE 0x2 +#endif -u32 pkey_get(int pkey, unsigned long flags) +static u32 hw_pkey_get(int pkey, unsigned long flags) { u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); u32 pkru = __rdpkru(); @@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags) return masked_pkru; } -int pkey_set(int pkey, unsigned long rights, unsigned long flags) +static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags) { u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); u32 old_pkru = __rdpkru(); @@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags) pkey, flags); pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); - pkey_rights = pkey_get(pkey, syscall_flags); + pkey_rights = hw_pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); pkey_assert(pkey_rights >= 0); pkey_rights |= flags; - ret = pkey_set(pkey, pkey_rights, syscall_flags); + ret = hw_pkey_set(pkey, pkey_rights, syscall_flags); assert(!ret); /*pkru and flags have the same format */ shadow_pkru |= flags << (pkey * 2); @@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags) pkey_assert(ret >= 0); - pkey_rights = pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + pkey_rights = hw_pkey_get(pkey, syscall_flags); + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); @@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags) { unsigned long syscall_flags = 0; int ret; - int pkey_rights = pkey_get(pkey, syscall_flags); + int pkey_rights = hw_pkey_get(pkey, syscall_flags); u32 orig_pkru = rdpkru(); pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); pkey_assert(pkey_rights >= 0); pkey_rights |= flags; - ret = pkey_set(pkey, pkey_rights, 0); + ret = hw_pkey_set(pkey, pkey_rights, 0); /* pkru and flags have the same format */ shadow_pkru &= ~(flags << (pkey * 2)); pkey_assert(ret >= 0); - pkey_rights = pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + pkey_rights = hw_pkey_get(pkey, syscall_flags); + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); @@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, struct pkey_malloc_record { void *ptr; long size; + int prot; }; struct pkey_malloc_record *pkey_malloc_records; +struct pkey_malloc_record *pkey_last_malloc_record; long nr_pkey_malloc_records; -void record_pkey_malloc(void *ptr, long size) +void record_pkey_malloc(void *ptr, long size, int prot) { long i; struct pkey_malloc_record *rec = NULL; @@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size) (int)(rec - pkey_malloc_records), rec, ptr, size); rec->ptr = ptr; rec->size = size; + rec->prot = prot; + pkey_last_malloc_record = rec; nr_pkey_malloc_records++; } @@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) pkey_assert(ptr != (void *)-1); ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey); pkey_assert(!ret); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); rdpkru(); dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr); @@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) size = ALIGN_UP(size, HPAGE_SIZE * 2); ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); pkey_assert(ptr != (void *)-1); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); mprotect_pkey(ptr, size, prot, pkey); dprintf1("unaligned ptr: %p\n", ptr); @@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) pkey_assert(ptr != (void *)-1); mprotect_pkey(ptr, size, prot, pkey); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr); return ptr; @@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey) mprotect_pkey(ptr, size, prot, pkey); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr); close(fd); @@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey) } int last_pkru_faults; +#define UNKNOWN_PKEY -2 void expected_pk_fault(int pkey) { dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n", __func__, last_pkru_faults, pkru_faults); dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey); pkey_assert(last_pkru_faults + 1 == pkru_faults); - pkey_assert(last_si_pkey == pkey); + + /* + * For exec-only memory, we do not know the pkey in + * advance, so skip this check. + */ + if (pkey != UNKNOWN_PKEY) + pkey_assert(last_si_pkey == pkey); + /* * The signal handler shold have cleared out PKRU to let the * test program continue. We now have to restore it. @@ -939,10 +954,11 @@ void expected_pk_fault(int pkey) last_si_pkey = -1; } -void do_not_expect_pk_fault(void) -{ - pkey_assert(last_pkru_faults == pkru_faults); -} +#define do_not_expect_pk_fault(msg) do { \ + if (last_pkru_faults != pkru_faults) \ + dprintf0("unexpected PK fault: %s\n", msg); \ + pkey_assert(last_pkru_faults == pkru_faults); \ +} while (0) int test_fds[10] = { -1 }; int nr_test_fds; @@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) pkey_assert(i < NR_PKEYS*2); /* - * There are 16 pkeys supported in hardware. One is taken - * up for the default (0) and another can be taken up by - * an execute-only mapping. Ensure that we can allocate - * at least 14 (16-2). + * There are 16 pkeys supported in hardware. Three are + * allocated by the time we get here: + * 1. The default key (0) + * 2. One possibly consumed by an execute-only mapping. + * 3. One allocated by the test code and passed in via + * 'pkey' to this function. + * Ensure that we can allocate at least another 13 (16-3). */ - pkey_assert(i >= NR_PKEYS-2); + pkey_assert(i >= NR_PKEYS-3); for (i = 0; i < nr_allocated_pkeys; i++) { err = sys_pkey_free(allocated_pkeys[i]); @@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) } } +/* + * pkey 0 is special. It is allocated by default, so you do not + * have to call pkey_alloc() to use it first. Make sure that it + * is usable. + */ +void test_mprotect_with_pkey_0(int *ptr, u16 pkey) +{ + long size; + int prot; + + assert(pkey_last_malloc_record); + size = pkey_last_malloc_record->size; + /* + * This is a bit of a hack. But mprotect() requires + * huge-page-aligned sizes when operating on hugetlbfs. + * So, make sure that we use something that's a multiple + * of a huge page when we can. + */ + if (size >= HPAGE_SIZE) + size = HPAGE_SIZE; + prot = pkey_last_malloc_record->prot; + + /* Use pkey 0 */ + mprotect_pkey(ptr, size, prot, 0); + + /* Make sure that we can set it back to the original pkey. */ + mprotect_pkey(ptr, size, prot, pkey); +} + void test_ptrace_of_child(int *ptr, u16 pkey) { __attribute__((__unused__)) int peek_result; @@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey) pkey_assert(ret != -1); /* Now access from the current task, and expect NO exception: */ peek_result = read_ptr(plain_ptr); - do_not_expect_pk_fault(); + do_not_expect_pk_fault("read plain pointer after ptrace"); ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0); pkey_assert(ret != -1); @@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey) free(plain_ptr_unaligned); } -void test_executing_on_unreadable_memory(int *ptr, u16 pkey) +void *get_pointer_to_instructions(void) { void *p1; - int scratch; - int ptr_contents; - int ret; p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE); dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write); @@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) /* Point 'p1' at the *second* page of the function: */ p1 += PAGE_SIZE; + /* + * Try to ensure we fault this in on next touch to ensure + * we get an instruction fault as opposed to a data one + */ madvise(p1, PAGE_SIZE, MADV_DONTNEED); + + return p1; +} + +void test_executing_on_unreadable_memory(int *ptr, u16 pkey) +{ + void *p1; + int scratch; + int ptr_contents; + int ret; + + p1 = get_pointer_to_instructions(); lots_o_noops_around_write(&scratch); ptr_contents = read_ptr(p1); dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); @@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) */ madvise(p1, PAGE_SIZE, MADV_DONTNEED); lots_o_noops_around_write(&scratch); - do_not_expect_pk_fault(); + do_not_expect_pk_fault("executing on PROT_EXEC memory"); ptr_contents = read_ptr(p1); dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); expected_pk_fault(pkey); } +void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) +{ + void *p1; + int scratch; + int ptr_contents; + int ret; + + dprintf1("%s() start\n", __func__); + + p1 = get_pointer_to_instructions(); + lots_o_noops_around_write(&scratch); + ptr_contents = read_ptr(p1); + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); + + /* Use a *normal* mprotect(), not mprotect_pkey(): */ + ret = mprotect(p1, PAGE_SIZE, PROT_EXEC); + pkey_assert(!ret); + + dprintf2("pkru: %x\n", rdpkru()); + + /* Make sure this is an *instruction* fault */ + madvise(p1, PAGE_SIZE, MADV_DONTNEED); + lots_o_noops_around_write(&scratch); + do_not_expect_pk_fault("executing on PROT_EXEC memory"); + ptr_contents = read_ptr(p1); + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); + expected_pk_fault(UNKNOWN_PKEY); + + /* + * Put the memory back to non-PROT_EXEC. Should clear the + * exec-only pkey off the VMA and allow it to be readable + * again. Go to PROT_NONE first to check for a kernel bug + * that did not clear the pkey when doing PROT_NONE. + */ + ret = mprotect(p1, PAGE_SIZE, PROT_NONE); + pkey_assert(!ret); + + ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC); + pkey_assert(!ret); + ptr_contents = read_ptr(p1); + do_not_expect_pk_fault("plain read on recently PROT_EXEC area"); +} + void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) { int size = PAGE_SIZE; @@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = { test_kernel_gup_of_access_disabled_region, test_kernel_gup_write_to_write_disabled_region, test_executing_on_unreadable_memory, + test_implicit_mprotect_exec_only_memory, + test_mprotect_with_pkey_0, test_ptrace_of_child, test_pkey_syscalls_on_non_allocated_pkey, test_pkey_syscalls_bad_args, diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index b3d4a10f09a1195dde672a0624257fe220e4c311..af003268bf3ef2536719c8b17f49f68583fa7970 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -14,6 +14,8 @@ #include #include #include +#include + #include #include @@ -320,6 +322,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, if (n > vgic_v3_max_apr_idx(vcpu)) return 0; + + n = array_index_nospec(n, 4); + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ return vgicv3->vgic_ap1r[n]; }